From bf8d71e82c27eae1e47bde411054f5258df29146 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Thu, 17 Nov 2016 16:20:41 -0800 Subject: Add notion of upstream that is separate from root in Topology, etc. --- node/IncomingPacket.cpp | 10 ++---- node/Packet.hpp | 19 ++++++++--- node/Topology.cpp | 87 ++++++++++++++++++++++++++++++++++--------------- node/Topology.hpp | 37 +++++++++++---------- 4 files changed, 98 insertions(+), 55 deletions(-) (limited to 'node') diff --git a/node/IncomingPacket.cpp b/node/IncomingPacket.cpp index bde5df71..c6346346 100644 --- a/node/IncomingPacket.cpp +++ b/node/IncomingPacket.cpp @@ -160,7 +160,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr case Packet::ERROR_IDENTITY_COLLISION: // FIXME: for federation this will need a payload with a signature or something. - if (RR->topology->isRoot(peer->identity())) + if (RR->topology->isUpstream(peer->identity())) RR->node->postEvent(ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION); break; @@ -508,11 +508,7 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr id.serialize(outp,false); ++count; } else { - // If I am not the root and don't know this identity, ask upstream. Downstream - // peer may re-request in the future and if so we will be able to provide it. - if (!RR->topology->amRoot()) - RR->sw->requestWhois(addr); - + RR->sw->requestWhois(addr); #ifdef ZT_ENABLE_CLUSTER // Distribute WHOIS queries across a cluster if we do not know the ID. // This may result in duplicate OKs to the querying peer, which is fine. @@ -666,7 +662,7 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr

address(),RR->identity.address(),Packet::VERB_OK); outp.append((uint8_t)Packet::VERB_EXT_FRAME); outp.append((uint64_t)packetId()); diff --git a/node/Packet.hpp b/node/Packet.hpp index a8738884..7a742aad 100644 --- a/node/Packet.hpp +++ b/node/Packet.hpp @@ -617,10 +617,8 @@ public: * <[1] protocol address length (4 for IPv4, 16 for IPv6)> * <[...] protocol address (network byte order)> * - * This is sent by a relaying node to initiate NAT traversal between two - * peers that are communicating by way of indirect relay. The relay will - * send this to both peers at the same time on a periodic basis, telling - * each where it might find the other on the network. + * An upstream node can send this to inform both sides of a relay of + * information they might use to establish a direct connection. * * Upon receipt a peer sends HELLO to establish a direct link. * @@ -1051,7 +1049,18 @@ public: * OK or ERROR and has no special semantics outside of whatever the user * (via the ZeroTier core API) chooses to give it. */ - VERB_USER_MESSAGE = 0x14 + VERB_USER_MESSAGE = 0x14, + + /** + * Information related to federation and mesh-like behavior: + * <[2] 16-bit length of Dictionary> + * <[...] topology definition info Dictionary> + * + * This message can carry information that can be used to define topology + * and implement "mesh-like" behavior. It can optionally generate OK or + * ERROR, and these carry the same payload. + */ + VERB_TOPOLOGY_HINT = 0x15 }; /** diff --git a/node/Topology.cpp b/node/Topology.cpp index 12a7cc0b..48ced7c5 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -111,9 +111,8 @@ SharedPtr Topology::getPeer(const Address &zta) { Mutex::Lock _l(_lock); const SharedPtr *const ap = _peers.get(zta); - if (ap) { + if (ap) return *ap; - } } try { @@ -158,7 +157,7 @@ void Topology::saveIdentity(const Identity &id) } } -SharedPtr Topology::getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid) +SharedPtr Topology::getUpstreamPeer(const Address *avoid,unsigned int avoidCount,bool strictAvoid) { const uint64_t now = RR->node->now(); Mutex::Lock _l(_lock); @@ -189,22 +188,25 @@ SharedPtr Topology::getBestRoot(const Address *avoid,unsigned int avoidCou const SharedPtr *bestOverall = (const SharedPtr *)0; const SharedPtr *bestNotAvoid = (const SharedPtr *)0; - for(std::vector< SharedPtr >::const_iterator r(_rootPeers.begin());r!=_rootPeers.end();++r) { - bool avoiding = false; - for(unsigned int i=0;iaddress()) { - avoiding = true; - break; + for(std::vector

::const_iterator a(_upstreamAddresses.begin());a!=_upstreamAddresses.end();++a) { + const SharedPtr *const p = _peers.get(*a); + if (p) { + bool avoiding = false; + for(unsigned int i=0;iaddress()) { + avoiding = true; + break; + } + } + const unsigned int q = (*p)->relayQuality(now); + if (q <= bestQualityOverall) { + bestQualityOverall = q; + bestOverall = &(*p); + } + if ((!avoiding)&&(q <= bestQualityNotAvoid)) { + bestQualityNotAvoid = q; + bestNotAvoid = &(*p); } - } - const unsigned int q = (*r)->relayQuality(now); - if (q <= bestQualityOverall) { - bestQualityOverall = q; - bestOverall = &(*r); - } - if ((!avoiding)&&(q <= bestQualityNotAvoid)) { - bestQualityNotAvoid = q; - bestNotAvoid = &(*r); } } @@ -219,9 +221,34 @@ SharedPtr Topology::getBestRoot(const Address *avoid,unsigned int avoidCou return SharedPtr(); } +bool Topology::isRoot(const Identity &id) const +{ + Mutex::Lock _l(_lock); + return (std::find(_rootAddresses.begin(),_rootAddresses.end(),id.address()) != _rootAddresses.end()); +} + bool Topology::isUpstream(const Identity &id) const { - return isRoot(id); + Mutex::Lock _l(_lock); + return (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),id.address()) != _upstreamAddresses.end()); +} + +void Topology::setUpstream(const Address &a,bool upstream) +{ + Mutex::Lock _l(_lock); + if (std::find(_rootAddresses.begin(),_rootAddresses.end(),a) == _rootAddresses.end()) { + if (upstream) { + if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),a) == _upstreamAddresses.end()) + _upstreamAddresses.push_back(a); + } else { + std::vector
ua; + for(std::vector
::iterator i(_upstreamAddresses.begin());i!=_upstreamAddresses.end();++i) { + if (a != *i) + ua.push_back(*i); + } + _upstreamAddresses.swap(ua); + } + } } bool Topology::worldUpdateIfValid(const World &newWorld) @@ -249,7 +276,7 @@ void Topology::clean(uint64_t now) Address *a = (Address *)0; SharedPtr *p = (SharedPtr *)0; while (i.next(a,p)) { - if ( (!(*p)->isAlive(now)) && (std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()) ) + if ( (!(*p)->isAlive(now)) && (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),*a) == _upstreamAddresses.end()) ) _peers.erase(*a); } } @@ -280,25 +307,33 @@ Identity Topology::_getIdentity(const Address &zta) void Topology::_setWorld(const World &newWorld) { // assumed _lock is locked (or in constructor) + + std::vector
ua; + for(std::vector
::iterator a(_upstreamAddresses.begin());a!=_upstreamAddresses.end();++a) { + if (std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()) + ua.push_back(*a); + } + _world = newWorld; - _amRoot = false; _rootAddresses.clear(); - _rootPeers.clear(); + _amRoot = false; + for(std::vector::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) { _rootAddresses.push_back(r->identity.address()); + if (std::find(ua.begin(),ua.end(),r->identity.address()) == ua.end()) + ua.push_back(r->identity.address()); if (r->identity.address() == RR->identity.address()) { _amRoot = true; } else { SharedPtr *rp = _peers.get(r->identity.address()); - if (rp) { - _rootPeers.push_back(*rp); - } else { + if (!rp) { SharedPtr newrp(new Peer(RR,RR->identity,r->identity)); _peers.set(r->identity.address(),newrp); - _rootPeers.push_back(newrp); } } } + + _upstreamAddresses.swap(ua); } } // namespace ZeroTier diff --git a/node/Topology.hpp b/node/Topology.hpp index e63766cb..573d5ca2 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -125,35 +125,27 @@ public: void saveIdentity(const Identity &id); /** - * Get the current favorite root server + * Get the current best upstream peer * * @return Root server with lowest latency or NULL if none */ - inline SharedPtr getBestRoot() { return getBestRoot((const Address *)0,0,false); } + inline SharedPtr getUpstreamPeer() { return getUpstreamPeer((const Address *)0,0,false); } /** - * Get the best root server, avoiding root servers listed in an array - * - * This will get the best root server (lowest latency, etc.) but will - * try to avoid the listed root servers, only using them if no others - * are available. + * Get the current best upstream peer, avoiding those in the supplied avoid list * * @param avoid Nodes to avoid * @param avoidCount Number of nodes to avoid * @param strictAvoid If false, consider avoided root servers anyway if no non-avoid root servers are available * @return Root server or NULL if none available */ - SharedPtr getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid); + SharedPtr getUpstreamPeer(const Address *avoid,unsigned int avoidCount,bool strictAvoid); /** * @param id Identity to check * @return True if this is a designated root server in this world */ - inline bool isRoot(const Identity &id) const - { - Mutex::Lock _l(_lock); - return (std::find(_rootAddresses.begin(),_rootAddresses.end(),id.address()) != _rootAddresses.end()); - } + bool isRoot(const Identity &id) const; /** * @param id Identity to check @@ -161,6 +153,16 @@ public: */ bool isUpstream(const Identity &id) const; + /** + * Set whether or not an address is upstream + * + * If the address is a root this does nothing, since roots are fixed. + * + * @param a Target address + * @param upstream New upstream status + */ + void setUpstream(const Address &a,bool upstream); + /** * @return Vector of root server addresses */ @@ -175,7 +177,8 @@ public: */ inline std::vector
upstreamAddresses() const { - return rootAddresses(); + Mutex::Lock _l(_lock); + return _upstreamAddresses; } /** @@ -342,9 +345,9 @@ private: Hashtable< Address,SharedPtr > _peers; Hashtable< Path::HashKey,SharedPtr > _paths; - std::vector< Address > _rootAddresses; - std::vector< SharedPtr > _rootPeers; - bool _amRoot; + std::vector< Address > _upstreamAddresses; // includes roots + std::vector< Address > _rootAddresses; // only roots + bool _amRoot; // am I a root? Mutex _lock; }; -- cgit v1.2.3