From a8bd8fff933b4fcc21db2be936232fff1b4f95d8 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Tue, 14 Oct 2014 16:38:27 -0700 Subject: Make several changes to eliminate potential deadlock or recursive lock conditions, and add back rescan of multicast groups on network startup. --- node/Network.cpp | 8 ++++++-- node/Topology.cpp | 33 ++++++++++++++++----------------- node/Topology.hpp | 43 +++++++++++++++++++++++++------------------ 3 files changed, 47 insertions(+), 37 deletions(-) diff --git a/node/Network.cpp b/node/Network.cpp index 3949066c..e11d7d1a 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -118,12 +118,13 @@ public: AnnounceMulticastGroupsToPeersWithActiveDirectPaths(const RuntimeEnvironment *renv,Network *nw) : RR(renv), _now(Utils::now()), - _network(nw) + _network(nw), + _supernodeAddresses(renv->topology->supernodeAddresses()) {} inline void operator()(Topology &t,const SharedPtr &p) { - if ( ( (p->hasActiveDirectPath(_now)) && (_network->isAllowed(p->address())) ) || (t.isSupernode(p->address())) ) { + if ( ( (p->hasActiveDirectPath(_now)) && (_network->isAllowed(p->address())) ) || (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) != _supernodeAddresses.end()) ) { Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE); std::set mgs(_network->multicastGroups()); @@ -151,6 +152,7 @@ private: const RuntimeEnvironment *RR; uint64_t _now; Network *_network; + std::vector
_supernodeAddresses; }; bool Network::rescanMulticastGroups() @@ -527,6 +529,8 @@ void Network::threadMain() t->setEnabled(_enabled); } } + + rescanMulticastGroups(); } void Network::_CBhandleTapData(void *arg,const MAC &from,const MAC &to,unsigned int etherType,const Buffer<4096> &data) diff --git a/node/Topology.cpp b/node/Topology.cpp index 586ce996..45300ed2 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -25,8 +25,6 @@ * LLC. Start here: http://www.zerotier.com/ */ -#include - #include "Constants.hpp" #include "Defaults.hpp" #include "Topology.hpp" @@ -51,7 +49,7 @@ Topology::~Topology() void Topology::setSupernodes(const std::map< Identity,std::vector< std::pair > > &sn) { - Mutex::Lock _l(_supernodes_m); + Mutex::Lock _l(_lock); if (_supernodes == sn) return; // no change @@ -62,18 +60,20 @@ void Topology::setSupernodes(const std::map< Identity,std::vector< std::pair > >::const_iterator i(sn.begin());i!=sn.end();++i) { - if (i->first != RR->identity) { - SharedPtr p(getPeer(i->first.address())); + if (i->first != RR->identity) { // do not add self as a peer + SharedPtr &p = _activePeers[i->first.address()]; if (!p) - p = addPeer(SharedPtr(new Peer(RR->identity,i->first))); + p = SharedPtr(new Peer(RR->identity,i->first)); for(std::vector< std::pair >::const_iterator j(i->second.begin());j!=i->second.end();++j) p->addPath(Path(j->first,(j->second) ? Path::PATH_TYPE_TCP_OUT : Path::PATH_TYPE_UDP,true)); p->use(now); _supernodePeers.push_back(p); } - _supernodeAddresses.insert(i->first.address()); + _supernodeAddresses.push_back(i->first.address()); } + std::sort(_supernodeAddresses.begin(),_supernodeAddresses.end()); + _amSupernode = (_supernodes.find(RR->identity) != _supernodes.end()); } @@ -107,7 +107,7 @@ SharedPtr Topology::addPeer(const SharedPtr &peer) } uint64_t now = Utils::now(); - Mutex::Lock _l(_activePeers_m); + Mutex::Lock _l(_lock); SharedPtr p(_activePeers.insert(std::pair< Address,SharedPtr >(peer->address(),peer)).first->second); p->use(now); @@ -124,7 +124,7 @@ SharedPtr Topology::getPeer(const Address &zta) } uint64_t now = Utils::now(); - Mutex::Lock _l(_activePeers_m); + Mutex::Lock _l(_lock); SharedPtr &ap = _activePeers[zta]; @@ -151,7 +151,7 @@ SharedPtr Topology::getBestSupernode(const Address *avoid,unsigned int avo { SharedPtr bestSupernode; uint64_t now = Utils::now(); - Mutex::Lock _l(_supernodes_m); + Mutex::Lock _l(_lock); if (_amSupernode) { /* If I am a supernode, the "best" supernode is the one whose address @@ -160,15 +160,15 @@ SharedPtr Topology::getBestSupernode(const Address *avoid,unsigned int avo * circumnavigate the globe rather than bouncing between just two. */ if (_supernodeAddresses.size() > 1) { // gotta be one other than me for this to work - std::set
::const_iterator sna(_supernodeAddresses.find(RR->identity.address())); + std::vector
::const_iterator sna(std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),RR->identity.address())); if (sna != _supernodeAddresses.end()) { // sanity check -- _amSupernode should've been false in this case for(;;) { if (++sna == _supernodeAddresses.end()) sna = _supernodeAddresses.begin(); // wrap around at end if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order - SharedPtr p(getPeer(*sna)); - if ((p)&&(p->hasActiveDirectPath(now))) { - bestSupernode = p; + std::map< Address,SharedPtr >::const_iterator p(_activePeers.find(*sna)); + if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) { + bestSupernode = p->second; break; } } @@ -247,10 +247,9 @@ keep_searching_for_supernodes: void Topology::clean(uint64_t now) { - Mutex::Lock _l(_activePeers_m); - Mutex::Lock _l2(_supernodes_m); + Mutex::Lock _l(_lock); for(std::map< Address,SharedPtr >::iterator p(_activePeers.begin());p!=_activePeers.end();) { - if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(!_supernodeAddresses.count(p->second->address()))) { + if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->first) == _supernodeAddresses.end())) { _activePeers.erase(p++); } else { p->second->clean(now); diff --git a/node/Topology.hpp b/node/Topology.hpp index f58d7486..e3e573ed 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -32,9 +32,9 @@ #include #include -#include #include #include +#include #include "Constants.hpp" @@ -102,7 +102,7 @@ public: */ inline std::vector< SharedPtr > supernodePeers() const { - Mutex::Lock _l(_supernodes_m); + Mutex::Lock _l(_lock); return _supernodePeers; } @@ -111,7 +111,7 @@ public: */ inline unsigned int numSupernodes() const { - Mutex::Lock _l(_supernodes_m); + Mutex::Lock _l(_lock); return _supernodePeers.size(); } @@ -146,16 +146,16 @@ public: inline bool isSupernode(const Address &zta) const throw() { - Mutex::Lock _l(_supernodes_m); - return (_supernodeAddresses.count(zta) > 0); + Mutex::Lock _l(_lock); + return (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),zta) != _supernodeAddresses.end()); } /** - * @return Set of supernode addresses + * @return Vector of supernode addresses */ - inline std::set
supernodeAddresses() const + inline std::vector
supernodeAddresses() const { - Mutex::Lock _l(_supernodes_m); + Mutex::Lock _l(_lock); return _supernodeAddresses; } @@ -175,13 +175,17 @@ public: * Note: explicitly template this by reference if you want the object * passed by reference instead of copied. * + * Warning: be careful not to use features in these that call any other + * methods of Topology that may lock _lock, otherwise a recursive lock + * and deadlock or lock corruption may occur. + * * @param f Function to apply * @tparam F Function or function object type */ template inline void eachPeer(F f) { - Mutex::Lock _l(_activePeers_m); + Mutex::Lock _l(_lock); for(std::map< Address,SharedPtr >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p) f(*this,p->second); } @@ -192,13 +196,17 @@ public: * Note: explicitly template this by reference if you want the object * passed by reference instead of copied. * + * Warning: be careful not to use features in these that call any other + * methods of Topology that may lock _lock, otherwise a recursive lock + * and deadlock or lock corruption may occur. + * * @param f Function to apply * @tparam F Function or function object type */ template inline void eachSupernodePeer(F f) { - Mutex::Lock _l(_supernodes_m); + Mutex::Lock _l(_lock); for(std::vector< SharedPtr >::const_iterator p(_supernodePeers.begin());p!=_supernodePeers.end();++p) f(*this,*p); } @@ -227,7 +235,7 @@ public: * * Note that we measure ping time from time of last receive rather * than time of last send in order to only count full round trips. */ - if ( (!_supernodeAddresses.count(p->address())) && + if ( (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) == _supernodeAddresses.end()) && ((_now - p->lastFrame()) < ZT_PEER_PATH_ACTIVITY_TIMEOUT) && ((_now - p->lastDirectReceive()) >= ZT_PEER_DIRECT_PING_DELAY) ) { p->sendPing(RR,_now); @@ -236,7 +244,7 @@ public: private: uint64_t _now; - std::set
_supernodeAddresses; + std::vector
_supernodeAddresses; const RuntimeEnvironment *RR; }; @@ -305,7 +313,7 @@ public: Packet outp(p->address(),RR->identity.address(),Packet::VERB_NOP); outp.armor(p->key(),false); // no need to encrypt a NOP - if (_supernodeAddresses.count(p->address())) { + if (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) != _supernodeAddresses.end()) { // Send NOP directly to supernodes p->send(RR,outp.data(),outp.size(),_now); } else { @@ -320,7 +328,7 @@ public: private: uint64_t _now; SharedPtr _supernode; - std::set
_supernodeAddresses; + std::vector
_supernodeAddresses; const RuntimeEnvironment *RR; }; @@ -362,12 +370,11 @@ private: std::string _idCacheBase; std::map< Address,SharedPtr > _activePeers; - Mutex _activePeers_m; - std::map< Identity,std::vector< std::pair > > _supernodes; - std::set< Address > _supernodeAddresses; + std::vector< Address > _supernodeAddresses; std::vector< SharedPtr > _supernodePeers; - Mutex _supernodes_m; + + Mutex _lock; // Set to true if my identity is in _supernodes volatile bool _amSupernode; -- cgit v1.2.3