diff options
| author | Adam Ierymenko <adam.ierymenko@gmail.com> | 2015-12-17 10:53:07 -0800 |
|---|---|---|
| committer | Adam Ierymenko <adam.ierymenko@gmail.com> | 2015-12-17 10:53:07 -0800 |
| commit | 2160164e8c20d9e5cb5fb266ca69c040b7881252 (patch) | |
| tree | bec468a0ccb80633c69c67c6041cbf09b5bd4495 /node/Peer.cpp | |
| parent | 3137f43da9289d65f47249535233afab5d7f6c68 (diff) | |
| download | infinitytier-2160164e8c20d9e5cb5fb266ca69c040b7881252.tar.gz infinitytier-2160164e8c20d9e5cb5fb266ca69c040b7881252.zip | |
(1) Get rid of path sorting and just scan them, since sorting may have been a premature optimization that introduced a regression and path instability in a few edge cases, and (2) do not attempt to contact remote paths received via PUSH_DIRECT_PATH if we already have that path and it is already active (dumb, should have done this originally)
Diffstat (limited to 'node/Peer.cpp')
| -rw-r--r-- | node/Peer.cpp | 60 |
1 files changed, 17 insertions, 43 deletions
diff --git a/node/Peer.cpp b/node/Peer.cpp index 31e9c27d..891827b4 100644 --- a/node/Peer.cpp +++ b/node/Peer.cpp @@ -182,7 +182,6 @@ void Peer::received( slot->setClusterSuboptimal(suboptimalPath); #endif _numPaths = np; - _sortPaths(now); } #ifdef ZT_ENABLE_CLUSTER @@ -362,7 +361,6 @@ bool Peer::resetWithinScope(const RuntimeEnvironment *RR,InetAddress::IpScope sc ++x; } _numPaths = y; - _sortPaths(now); return (y < np); } @@ -501,58 +499,34 @@ void Peer::clean(const RuntimeEnvironment *RR,uint64_t now) } } -struct _SortPathsByQuality -{ - uint64_t _now; - _SortPathsByQuality(const uint64_t now) : _now(now) {} - inline bool operator()(const Path &a,const Path &b) const - { - const uint64_t qa = ( - ((uint64_t)a.active(_now) << 63) | - (((uint64_t)(a.preferenceRank() & 0xfff)) << 51) | - ((uint64_t)a.lastReceived() & 0x7ffffffffffffULL) ); - const uint64_t qb = ( - ((uint64_t)b.active(_now) << 63) | - (((uint64_t)(b.preferenceRank() & 0xfff)) << 51) | - ((uint64_t)b.lastReceived() & 0x7ffffffffffffULL) ); - return (qb < qa); // invert sense to sort in descending order - } -}; -void Peer::_sortPaths(const uint64_t now) -{ - // assumes _lock is locked - _lastPathSort = now; - std::sort(&(_paths[0]),&(_paths[_numPaths]),_SortPathsByQuality(now)); -} - Path *Peer::_getBestPath(const uint64_t now) { // assumes _lock is locked - if ((now - _lastPathSort) >= ZT_PEER_PATH_SORT_INTERVAL) - _sortPaths(now); - if (_paths[0].active(now)) { - return &(_paths[0]); - } else { - _sortPaths(now); - if (_paths[0].active(now)) - return &(_paths[0]); + Path *bestPath = (Path *)0; + uint64_t bestPathScore = 0; + for(unsigned int i=0;i<_numPaths;++i) { + const uint64_t score = _paths[i].score(); + if ((score >= bestPathScore)&&(_paths[i].active(now))) { + bestPathScore = score; + bestPath = &(_paths[i]); + } } - return (Path *)0; + return bestPath; } Path *Peer::_getBestPath(const uint64_t now,int inetAddressFamily) { // assumes _lock is locked - if ((now - _lastPathSort) >= ZT_PEER_PATH_SORT_INTERVAL) - _sortPaths(now); - for(int k=0;k<2;++k) { // try once, and if it fails sort and try one more time - for(unsigned int i=0;i<_numPaths;++i) { - if ((_paths[i].active(now))&&((int)_paths[i].address().ss_family == inetAddressFamily)) - return &(_paths[i]); + Path *bestPath = (Path *)0; + uint64_t bestPathScore = 0; + for(unsigned int i=0;i<_numPaths;++i) { + const uint64_t score = _paths[i].score(); + if (((int)_paths[i].address().ss_family == inetAddressFamily)&&(score >= bestPathScore)&&(_paths[i].active(now))) { + bestPathScore = score; + bestPath = &(_paths[i]); } - _sortPaths(now); } - return (Path *)0; + return bestPath; } } // namespace ZeroTier |
