From 3947807b1ffc844f62eeec7dd0fe552d280fe807 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Thu, 27 Aug 2015 15:36:13 -0700 Subject: A simple and fast Hashtable, tested but not yet integrated with anything. --- node/Hashtable.hpp | 252 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 252 insertions(+) create mode 100644 node/Hashtable.hpp (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp new file mode 100644 index 00000000..bba1fa2b --- /dev/null +++ b/node/Hashtable.hpp @@ -0,0 +1,252 @@ +/* + * ZeroTier One - Network Virtualization Everywhere + * Copyright (C) 2011-2015 ZeroTier, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * -- + * + * ZeroTier may be used and distributed under the terms of the GPLv3, which + * are available at: http://www.gnu.org/licenses/gpl-3.0.html + * + */ + +#ifndef ZT_HASHTABLE_HPP +#define ZT_HASHTABLE_HPP + +#include +#include +#include + +#include + +namespace ZeroTier { + +/** + * A minimal hash table implementation for the ZeroTier core + * + * This is not a drop-in replacement for STL containers, and has several + * limitations. It's designed to be small and fast for use in the + * ZeroTier core. + * + * Pairs of values can also be used as a key. In this case the first and + * second element of the pair's hash codes are XORed. + */ +template +class Hashtable +{ +private: + struct _Bucket + { + _Bucket(const K &k,const V &v) : + k(k), + v(v) {} + _Bucket *next; + K k; + V v; + }; + +public: + /** + * A simple forward iterator (different from STL) + * + * It's safe to erase the last key, but not others. Don't use set() since that + * may rehash and invalidate the iterator. Note the erasing the key will destroy + * the targets of the pointers returned by next(). + */ + class Iterator + { + public: + /** + * @param ht Hash table to iterate over + */ + Iterator(Hashtable &ht) : + _idx(0), + _ht(&ht), + _b(ht._t[0]) + { + } + + /** + * @param kptr Pointer to set to point to next key + * @param vptr Pointer to set to point to next value + * @return True if kptr and vptr are set, false if no more entries + */ + inline bool next(K *&kptr,V *&vptr) + { + for(;;) { + if (_b) { + kptr = &(_b->k); + vptr = &(_b->v); + _b = _b->next; + return true; + } + ++_idx; + if (_idx >= _ht->_bc) + return false; + _b = _ht->_t[_idx]; + } + } + + private: + unsigned long _idx; + Hashtable *_ht; + Hashtable::_Bucket *_b; + }; + friend class Hashtable::Iterator; + + /** + * @param bc Initial capacity in buckets (default: 128, must be nonzero) + */ + Hashtable(unsigned long bc = 128) : + _t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * bc))), + _bc(bc), + _s(0) + { + if (!_t) + throw std::bad_alloc(); + for(unsigned long i=0;inext; + delete b; + b = nb; + } + _t[i] = (_Bucket *)0; + } + _s = 0; + } + } + + /** + * @param k Key + * @return Pointer to value or NULL if not found + */ + inline V *get(const K &k) + { + _Bucket *b = _t[_hc(k) % _bc]; + while (b) { + if (b->k == k) + return &(b->v); + b = b->next; + } + return (V *)0; + } + inline const V *get(const K &k) const { return const_cast(this)->get(k); } + + /** + * @param k Key + * @return True if value was present + */ + inline bool erase(const K &k) + { + const unsigned long bidx = _hc(k) % _bc; + _Bucket *lastb = (_Bucket *)0; + _Bucket *b = _t[bidx]; + while (b) { + if (b->k == k) { + if (lastb) + lastb->next = b->next; + else _t[bidx] = b->next; + delete b; + --_s; + return true; + } + lastb = b; + b = b->next; + } + return false; + } + + /** + * @param k Key + * @param v Value + */ + inline void set(const K &k,const V &v) + { + if (_s >= _bc) { + const unsigned long nc = _bc * 2; + _Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc)); + if (nt) { + for(unsigned long i=0;inext; + const unsigned long nidx = _hc(b->k) % nc; + b->next = nt[nidx]; + nt[nidx] = b; + b = nb; + } + } + ::free(_t); + _t = nt; + _bc = nc; + } + } + const unsigned long bidx = _hc(k) % _bc; + _Bucket *const b = new _Bucket(k,v); + b->next = _t[bidx]; + _t[bidx] = b; + ++_s; + } + + /** + * @return Number of entries + */ + inline unsigned long size() const throw() { return _s; } + + /** + * @return True if table is empty + */ + inline bool empty() const throw() { return (_s == 0); } + +private: + template + static inline unsigned long _hc(const O &obj) + { + return obj.hashCode(); + } + static inline unsigned long _hc(const uint64_t i) + { + // NOTE: this is fine for network IDs, but might be bad for other kinds + // of IDs if they are not evenly or randomly distributed. + return (unsigned long)((i ^ (i >> 32)) * 2654435761ULL); + } + + _Bucket **_t; + unsigned long _bc; + unsigned long _s; +}; + +} // namespace ZeroTier + +#endif -- cgit v1.2.3 From b11ffc9635204f11daac4f20596dc4e3da687eee Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Thu, 27 Aug 2015 16:17:21 -0700 Subject: Integrate Hashtable into Multicaster, where @mwarning found heaviest std::map() overhead. --- node/Hashtable.hpp | 97 ++++++++++++++++++++++++++++++++++--------------- node/MAC.hpp | 2 + node/MulticastGroup.hpp | 2 + node/Multicaster.cpp | 56 +++++++++++++++------------- node/Multicaster.hpp | 17 ++++++++- 5 files changed, 117 insertions(+), 57 deletions(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index bba1fa2b..c997d54f 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -19,7 +19,6 @@ * * ZeroTier may be used and distributed under the terms of the GPLv3, which * are available at: http://www.gnu.org/licenses/gpl-3.0.html - * */ #ifndef ZT_HASHTABLE_HPP @@ -39,9 +38,6 @@ namespace ZeroTier { * This is not a drop-in replacement for STL containers, and has several * limitations. It's designed to be small and fast for use in the * ZeroTier core. - * - * Pairs of values can also be used as a key. In this case the first and - * second element of the pair's hash codes are XORed. */ template class Hashtable @@ -49,12 +45,11 @@ class Hashtable private: struct _Bucket { - _Bucket(const K &k,const V &v) : - k(k), - v(v) {} - _Bucket *next; + _Bucket(const K &k,const V &v) : k(k),v(v) {} + _Bucket(const K &k) : k(k),v() {} K k; V v; + _Bucket *next; }; public: @@ -188,35 +183,56 @@ public: /** * @param k Key * @param v Value + * @return Reference to value in table */ - inline void set(const K &k,const V &v) + inline V &set(const K &k,const V &v) { - if (_s >= _bc) { - const unsigned long nc = _bc * 2; - _Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc)); - if (nt) { - for(unsigned long i=0;inext; - const unsigned long nidx = _hc(b->k) % nc; - b->next = nt[nidx]; - nt[nidx] = b; - b = nb; - } - } - ::free(_t); - _t = nt; - _bc = nc; + const unsigned long bidx = _hc(k) % _bc; + + _Bucket *b = _t[bidx]; + while (b) { + if (b->k == k) { + b->v = v; + return b->v; } + b = b->next; } + + if (_s >= _bc) + _grow(); + + b = new _Bucket(k,v); + b->next = _t[bidx]; + _t[bidx] = b; + ++_s; + + return b->v; + } + + /** + * @param k Key + * @return Value, possibly newly created + */ + inline V &operator[](const K &k) + { const unsigned long bidx = _hc(k) % _bc; - _Bucket *const b = new _Bucket(k,v); + + _Bucket *b = _t[bidx]; + while (b) { + if (b->k == k) + return b->v; + b = b->next; + } + + if (_s >= _bc) + _grow(); + + b = new _Bucket(k); b->next = _t[bidx]; _t[bidx] = b; ++_s; + + return b->v; } /** @@ -242,6 +258,29 @@ private: return (unsigned long)((i ^ (i >> 32)) * 2654435761ULL); } + inline void _grow() + { + const unsigned long nc = _bc * 2; + _Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc)); + if (nt) { + for(unsigned long i=0;inext; + const unsigned long nidx = _hc(b->k) % nc; + b->next = nt[nidx]; + nt[nidx] = b; + b = nb; + } + } + ::free(_t); + _t = nt; + _bc = nc; + } + } + _Bucket **_t; unsigned long _bc; unsigned long _s; diff --git a/node/MAC.hpp b/node/MAC.hpp index 442a7a2e..619b7195 100644 --- a/node/MAC.hpp +++ b/node/MAC.hpp @@ -242,6 +242,8 @@ public: */ inline unsigned int size() const throw() { return 6; } + inline unsigned long hashCode() const throw() { return (unsigned long)_m; } + inline MAC &operator=(const MAC &m) throw() { diff --git a/node/MulticastGroup.hpp b/node/MulticastGroup.hpp index 61fb55f2..fad433b5 100644 --- a/node/MulticastGroup.hpp +++ b/node/MulticastGroup.hpp @@ -141,6 +141,8 @@ public: */ inline uint32_t adi() const throw() { return _adi; } + inline unsigned long hashCode() const throw() { return (_mac.hashCode() ^ (unsigned long)_adi); } + inline bool operator==(const MulticastGroup &g) const throw() { return ((_mac == g._mac)&&(_adi == g._adi)); } inline bool operator!=(const MulticastGroup &g) const throw() { return ((_mac != g._mac)||(_adi != g._adi)); } inline bool operator<(const MulticastGroup &g) const throw() diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp index 489c170b..07792737 100644 --- a/node/Multicaster.cpp +++ b/node/Multicaster.cpp @@ -41,7 +41,9 @@ namespace ZeroTier { Multicaster::Multicaster(const RuntimeEnvironment *renv) : - RR(renv) + RR(renv), + _groups(1024), + _groups_m() { } @@ -54,7 +56,7 @@ void Multicaster::addMultiple(uint64_t now,uint64_t nwid,const MulticastGroup &m const unsigned char *p = (const unsigned char *)addresses; const unsigned char *e = p + (5 * count); Mutex::Lock _l(_groups_m); - MulticastGroupStatus &gs = _groups[std::pair(nwid,mg)]; + MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)]; while (p != e) { _add(now,nwid,mg,gs,Address(p,5)); p += 5; @@ -64,11 +66,11 @@ void Multicaster::addMultiple(uint64_t now,uint64_t nwid,const MulticastGroup &m void Multicaster::remove(uint64_t nwid,const MulticastGroup &mg,const Address &member) { Mutex::Lock _l(_groups_m); - std::map< std::pair,MulticastGroupStatus >::iterator g(_groups.find(std::pair(nwid,mg))); - if (g != _groups.end()) { - for(std::vector::iterator m(g->second.members.begin());m!=g->second.members.end();++m) { + MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg)); + if (s) { + for(std::vector::iterator m(s->members.begin());m!=s->members.end();++m) { if (m->address == member) { - g->second.members.erase(m); + s->members.erase(m); break; } } @@ -102,18 +104,18 @@ unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const Mutex::Lock _l(_groups_m); - std::map< std::pair,MulticastGroupStatus >::const_iterator gs(_groups.find(std::pair(nwid,mg))); - if ((gs != _groups.end())&&(!gs->second.members.empty())) { - totalKnown += (unsigned int)gs->second.members.size(); + const MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg)); + if ((s)&&(!s->members.empty())) { + totalKnown += (unsigned int)s->members.size(); // Members are returned in random order so that repeated gather queries // will return different subsets of a large multicast group. k = 0; - while ((added < limit)&&(k < gs->second.members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_UDP_DEFAULT_PAYLOAD_MTU)) { + while ((added < limit)&&(k < s->members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_UDP_DEFAULT_PAYLOAD_MTU)) { rptr = (unsigned int)RR->node->prng(); restart_member_scan: - a = gs->second.members[rptr % (unsigned int)gs->second.members.size()].address.toInt(); + a = s->members[rptr % (unsigned int)s->members.size()].address.toInt(); for(i=0;i Multicaster::getMembers(uint64_t nwid,const MulticastGroup { std::vector
ls; Mutex::Lock _l(_groups_m); - std::map< std::pair,MulticastGroupStatus >::const_iterator gs(_groups.find(std::pair(nwid,mg))); - if (gs == _groups.end()) + const MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg)); + if (!s) return ls; - for(std::vector::const_reverse_iterator m(gs->second.members.rbegin());m!=gs->second.members.rend();++m) { + for(std::vector::const_reverse_iterator m(s->members.rbegin());m!=s->members.rend();++m) { ls.push_back(m->address); if (ls.size() >= limit) break; @@ -173,7 +175,7 @@ void Multicaster::send( unsigned long *indexes = idxbuf; Mutex::Lock _l(_groups_m); - MulticastGroupStatus &gs = _groups[std::pair(nwid,mg)]; + MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)]; if (!gs.members.empty()) { // Allocate a memory buffer if group is monstrous @@ -291,18 +293,22 @@ void Multicaster::send( void Multicaster::clean(uint64_t now) { Mutex::Lock _l(_groups_m); - for(std::map< std::pair,MulticastGroupStatus >::iterator mm(_groups.begin());mm!=_groups.end();) { - for(std::list::iterator tx(mm->second.txQueue.begin());tx!=mm->second.txQueue.end();) { + + Multicaster::Key *k = (Multicaster::Key *)0; + MulticastGroupStatus *s = (MulticastGroupStatus *)0; + Hashtable::Iterator mm(_groups); + while (mm.next(k,s)) { + for(std::list::iterator tx(s->txQueue.begin());tx!=s->txQueue.end();) { if ((tx->expired(now))||(tx->atLimit())) - mm->second.txQueue.erase(tx++); + s->txQueue.erase(tx++); else ++tx; } unsigned long count = 0; { - std::vector::iterator reader(mm->second.members.begin()); + std::vector::iterator reader(s->members.begin()); std::vector::iterator writer(reader); - while (reader != mm->second.members.end()) { + while (reader != s->members.end()) { if ((now - reader->timestamp) < ZT_MULTICAST_LIKE_EXPIRE) { *writer = *reader; ++writer; @@ -313,13 +319,11 @@ void Multicaster::clean(uint64_t now) } if (count) { - mm->second.members.resize(count); - ++mm; - } else if (mm->second.txQueue.empty()) { - _groups.erase(mm++); + s->members.resize(count); + } else if (s->txQueue.empty()) { + _groups.erase(*k); } else { - mm->second.members.clear(); - ++mm; + s->members.clear(); } } } diff --git a/node/Multicaster.hpp b/node/Multicaster.hpp index 0dd199f9..898c4db7 100644 --- a/node/Multicaster.hpp +++ b/node/Multicaster.hpp @@ -36,6 +36,7 @@ #include #include "Constants.hpp" +#include "Hashtable.hpp" #include "Address.hpp" #include "MAC.hpp" #include "MulticastGroup.hpp" @@ -56,6 +57,18 @@ class Packet; class Multicaster : NonCopyable { private: + struct Key + { + Key() : nwid(0),mg() {} + Key(uint64_t n,const MulticastGroup &g) : nwid(n),mg(g) {} + + uint64_t nwid; + MulticastGroup mg; + + inline bool operator==(const Key &k) const throw() { return ((nwid == k.nwid)&&(mg == k.mg)); } + inline unsigned long hashCode() const throw() { return (mg.hashCode() ^ (unsigned long)(nwid ^ (nwid >> 32))); } + }; + struct MulticastGroupMember { MulticastGroupMember() {} @@ -89,7 +102,7 @@ public: inline void add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,const Address &member) { Mutex::Lock _l(_groups_m); - _add(now,nwid,mg,_groups[std::pair(nwid,mg)],member); + _add(now,nwid,mg,_groups[Multicaster::Key(nwid,mg)],member); } /** @@ -181,7 +194,7 @@ private: void _add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,MulticastGroupStatus &gs,const Address &member); const RuntimeEnvironment *RR; - std::map< std::pair,MulticastGroupStatus > _groups; + Hashtable _groups; Mutex _groups_m; }; -- cgit v1.2.3 From da9a720c3fc2d69e35f393fbb96a716599ac0a6f Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Thu, 3 Sep 2015 17:33:06 -0700 Subject: Hash table bug fix, and add copy constructor and assignment operator for principle of least surprise. --- node/Hashtable.hpp | 74 +++++++++++++++++++++++++++++++++++++++++++++++++----- selftest.cpp | 39 ++++++++++++++++++++++++---- 2 files changed, 102 insertions(+), 11 deletions(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index c997d54f..5076751d 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -29,6 +29,7 @@ #include #include +#include namespace ZeroTier { @@ -47,9 +48,11 @@ private: { _Bucket(const K &k,const V &v) : k(k),v(v) {} _Bucket(const K &k) : k(k),v() {} + _Bucket(const _Bucket &b) : k(b.k),v(b.v) {} + inline _Bucket &operator=(const _Bucket &b) { k = b.k; v = b.v; return *this; } K k; V v; - _Bucket *next; + _Bucket *next; // must be set manually for each _Bucket }; public: @@ -115,12 +118,47 @@ public: _t[i] = (_Bucket *)0; } + Hashtable(const Hashtable &ht) : + _t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * ht._bc))), + _bc(ht._bc), + _s(ht._s) + { + if (!_t) + throw std::bad_alloc(); + for(unsigned long i=0;i<_bc;++i) + _t[i] = (_Bucket *)0; + for(unsigned long i=0;i<_bc;++i) { + const _Bucket *b = ht._t[i]; + while (b) { + _Bucket *nb = new _Bucket(*b); + nb->next = _t[i]; + _t[i] = nb; + b = b->next; + } + } + } + ~Hashtable() { - clear(); + this->clear(); ::free(_t); } + inline Hashtable &operator=(const Hashtable &ht) + { + this->clear(); + if (ht._s) { + for(unsigned long i=0;iset(b->k,b->v); + b = b->next; + } + } + } + return *this; + } + /** * Erase all entries */ @@ -140,6 +178,24 @@ public: } } + /** + * @return Vector of all keys + */ + inline typename std::vector keys() + { + typename std::vector k; + if (_s) { + for(unsigned long i=0;i<_bc;++i) { + _Bucket *b = _t[i]; + while (b) { + k.push_back(b->k); + b = b->next; + } + } + } + return k; + } + /** * @param k Key * @return Pointer to value or NULL if not found @@ -187,7 +243,8 @@ public: */ inline V &set(const K &k,const V &v) { - const unsigned long bidx = _hc(k) % _bc; + const unsigned long h = _hc(k); + unsigned long bidx = h % _bc; _Bucket *b = _t[bidx]; while (b) { @@ -198,8 +255,10 @@ public: b = b->next; } - if (_s >= _bc) + if (_s >= _bc) { _grow(); + bidx = h % _bc; + } b = new _Bucket(k,v); b->next = _t[bidx]; @@ -215,7 +274,8 @@ public: */ inline V &operator[](const K &k) { - const unsigned long bidx = _hc(k) % _bc; + const unsigned long h = _hc(k); + unsigned long bidx = h % _bc; _Bucket *b = _t[bidx]; while (b) { @@ -224,8 +284,10 @@ public: b = b->next; } - if (_s >= _bc) + if (_s >= _bc) { _grow(); + bidx = h % _bc; + } b = new _Bucket(k); b->next = _t[bidx]; diff --git a/selftest.cpp b/selftest.cpp index 15afe52e..5e3b620b 100644 --- a/selftest.cpp +++ b/selftest.cpp @@ -581,31 +581,60 @@ static int testOther() { std::cout << "[other] Testing Hashtable... "; std::cout.flush(); { - Hashtable ht(128); + Hashtable ht; + Hashtable ht2; std::map ref; // assume std::map works correctly :) for(int x=0;x<2;++x) { for(int i=0;i<25000;++i) { uint64_t k = rand(); while ((k == 0)||(ref.count(k) > 0)) ++k; - std::string v; + std::string v("!"); for(int j=0;j<(int)(k % 64);++j) v.push_back("0123456789"[rand() % 10]); ht.set(k,v); ref[k] = v; } if (ht.size() != ref.size()) { - std::cout << "FAILED! (size mismatch)" << std::endl; + std::cout << "FAILED! (size mismatch, original)" << std::endl; + return -1; + } + ht2 = ht; + Hashtable ht3(ht2); + if (ht2.size() != ref.size()) { + std::cout << "FAILED! (size mismatch, assigned)" << std::endl; + return -1; + } + if (ht3.size() != ref.size()) { + std::cout << "FAILED! (size mismatch, copied)" << std::endl; return -1; } for(std::map::iterator i(ref.begin());i!=ref.end();++i) { std::string *v = ht.get(i->first); if (!v) { - std::cout << "FAILED! (key not found)" << std::endl; + std::cout << "FAILED! (key " << i->first << " not found, original)" << std::endl; + return -1; + } + if (*v != i->second) { + std::cout << "FAILED! (key " << i->first << " not equal, original)" << std::endl; + return -1; + } + v = ht2.get(i->first); + if (!v) { + std::cout << "FAILED! (key " << i->first << " not found, assigned)" << std::endl; + return -1; + } + if (*v != i->second) { + std::cout << "FAILED! (key " << i->first << " not equal, assigned)" << std::endl; + return -1; + } + v = ht3.get(i->first); + if (!v) { + std::cout << "FAILED! (key " << i->first << " not found, copied)" << std::endl; return -1; } if (*v != i->second) { - std::cout << "FAILED! (key not equal)" << std::endl; + std::cout << "FAILED! (key " << i->first << " not equal, copied)" << std::endl; return -1; } } -- cgit v1.2.3 From cfd101c9b85b20e5911445998a6f040089e3e414 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 11:50:12 -0700 Subject: Add entries() to go with keys() for future use. --- node/Hashtable.hpp | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 5076751d..6f7541c4 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -30,6 +30,8 @@ #include #include +#include +#include namespace ZeroTier { @@ -196,6 +198,24 @@ public: return k; } + /** + * @return Vector of all entries (pairs of K,V) + */ + inline typename std::vector< std::pair > entries() + { + typename std::vector< std::pair > k; + if (_s) { + for(unsigned long i=0;i<_bc;++i) { + _Bucket *b = _t[i]; + while (b) { + k.push_back(std::pair(b->k,b->v)); + b = b->next; + } + } + } + return k; + } + /** * @param k Key * @return Pointer to value or NULL if not found -- cgit v1.2.3 From 3a959a7763b44ffcddce557167169150a28b9059 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 12:14:21 -0700 Subject: Swap out std::map<> for Hashtable<> for main peer database in Topology. (ongoing std::map-ectomy) --- node/Address.hpp | 14 +++++++++++--- node/Hashtable.hpp | 6 ++++-- node/Node.cpp | 5 +++-- node/Topology.cpp | 18 ++++++++++-------- node/Topology.hpp | 14 +++++++++----- 5 files changed, 37 insertions(+), 20 deletions(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Address.hpp b/node/Address.hpp index 137e4f4f..0b38ec62 100644 --- a/node/Address.hpp +++ b/node/Address.hpp @@ -166,6 +166,15 @@ public: return _a; } + /** + * @return Hash code for use with Hashtable + */ + inline unsigned long hashCode() const + throw() + { + return (unsigned long)_a; + } + /** * @return Hexadecimal string */ @@ -197,11 +206,11 @@ public: /** * Check if this address is reserved - * + * * The all-zero null address and any address beginning with 0xff are * reserved. (0xff is reserved for future use to designate possibly * longer addresses, addresses based on IPv6 innards, etc.) - * + * * @return True if address is reserved and may not be used */ inline bool isReserved() const @@ -230,4 +239,3 @@ private: } // namespace ZeroTier #endif - diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 6f7541c4..84b5be0e 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -183,10 +183,11 @@ public: /** * @return Vector of all keys */ - inline typename std::vector keys() + inline typename std::vector keys() const { typename std::vector k; if (_s) { + k.reserve(_s); for(unsigned long i=0;i<_bc;++i) { _Bucket *b = _t[i]; while (b) { @@ -201,10 +202,11 @@ public: /** * @return Vector of all entries (pairs of K,V) */ - inline typename std::vector< std::pair > entries() + inline typename std::vector< std::pair > entries() const { typename std::vector< std::pair > k; if (_s) { + k.reserve(_s); for(unsigned long i=0;i<_bc;++i) { _Bucket *b = _t[i]; while (b) { diff --git a/node/Node.cpp b/node/Node.cpp index 534c085d..c8c50d66 100644 --- a/node/Node.cpp +++ b/node/Node.cpp @@ -355,7 +355,8 @@ void Node::status(ZT1_NodeStatus *status) const ZT1_PeerList *Node::peers() const { - std::map< Address,SharedPtr > peers(RR->topology->allPeers()); + std::vector< std::pair< Address,SharedPtr > > peers(RR->topology->allPeers()); + std::sort(peers.begin(),peers.end()); char *buf = (char *)::malloc(sizeof(ZT1_PeerList) + (sizeof(ZT1_Peer) * peers.size())); if (!buf) @@ -364,7 +365,7 @@ ZT1_PeerList *Node::peers() const pl->peers = (ZT1_Peer *)(buf + sizeof(ZT1_PeerList)); pl->peerCount = 0; - for(std::map< Address,SharedPtr >::iterator pi(peers.begin());pi!=peers.end();++pi) { + for(std::vector< std::pair< Address,SharedPtr > >::iterator pi(peers.begin());pi!=peers.end();++pi) { ZT1_Peer *p = &(pl->peers[pl->peerCount++]); p->address = pi->second->address().toInt(); p->lastUnicastFrame = pi->second->lastUnicastFrame(); diff --git a/node/Topology.cpp b/node/Topology.cpp index b255080e..25a92acd 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -103,7 +103,7 @@ SharedPtr Topology::addPeer(const SharedPtr &peer) const uint64_t now = RR->node->now(); Mutex::Lock _l(_lock); - SharedPtr p(_activePeers.insert(std::pair< Address,SharedPtr >(peer->address(),peer)).first->second); + SharedPtr &p = _activePeers.set(peer->address(),peer); p->use(now); _saveIdentity(p->identity()); @@ -160,9 +160,9 @@ SharedPtr Topology::getBestRoot(const Address *avoid,unsigned int avoidCou if (++sna == _rootAddresses.end()) sna = _rootAddresses.begin(); // wrap around at end if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order - std::map< Address,SharedPtr >::const_iterator p(_activePeers.find(*sna)); - if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) { - bestRoot = p->second; + SharedPtr *p = _activePeers.get(*sna); + if ((p)&&((*p)->hasActiveDirectPath(now))) { + bestRoot = *p; break; } } @@ -249,10 +249,12 @@ bool Topology::isRoot(const Identity &id) const void Topology::clean(uint64_t now) { Mutex::Lock _l(_lock); - for(std::map< Address,SharedPtr >::iterator p(_activePeers.begin());p!=_activePeers.end();) { - if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->first) == _rootAddresses.end())) { - _activePeers.erase(p++); - } else ++p; + Hashtable< Address,SharedPtr >::Iterator i(_activePeers); + Address *a = (Address *)0; + SharedPtr *p = (SharedPtr *)0; + while (i.next(a,p)) + if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) { + _activePeers.erase(*a); } } diff --git a/node/Topology.hpp b/node/Topology.hpp index 1c5cca00..3066b50c 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -44,6 +44,7 @@ #include "Mutex.hpp" #include "InetAddress.hpp" #include "Dictionary.hpp" +#include "Hashtable.hpp" namespace ZeroTier { @@ -163,17 +164,20 @@ public: inline void eachPeer(F f) { Mutex::Lock _l(_lock); - for(std::map< Address,SharedPtr >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p) - f(*this,p->second); + Hashtable< Address,SharedPtr >::Iterator i(_activePeers); + Address *a = (Address *)0; + SharedPtr *p = (SharedPtr *)0; + while (i.next(a,p)) + f(*this,*p); } /** * @return All currently active peers by address */ - inline std::map< Address,SharedPtr > allPeers() const + inline std::vector< std::pair< Address,SharedPtr > > allPeers() const { Mutex::Lock _l(_lock); - return _activePeers; + return _activePeers.entries(); } /** @@ -190,7 +194,7 @@ private: const RuntimeEnvironment *RR; - std::map< Address,SharedPtr > _activePeers; + Hashtable< Address,SharedPtr > _activePeers; std::map< Identity,std::vector > _roots; std::vector< Address > _rootAddresses; std::vector< SharedPtr > _rootPeers; -- cgit v1.2.3 From 7b8ce1605781f14d909e0aa099455b86d738c60a Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 13:42:19 -0700 Subject: Another std::map<> dies. --- node/Hashtable.hpp | 35 +++++++++++++++++++++++++++++++++++ node/Network.cpp | 21 ++++++++++++--------- node/Network.hpp | 3 ++- node/Peer.hpp | 1 + 4 files changed, 50 insertions(+), 10 deletions(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 84b5be0e..59a57726 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -199,6 +199,26 @@ public: return k; } + /** + * Append all keys (in unspecified order) to the supplied vector or list + * + * @param v Vector, list, or other compliant container + * @tparam Type of V (generally inferred) + */ + template + inline void appendKeys(C &v) const + { + if (_s) { + for(unsigned long i=0;i<_bc;++i) { + _Bucket *b = _t[i]; + while (b) { + v.push_back(b->k); + b = b->next; + } + } + } + } + /** * @return Vector of all entries (pairs of K,V) */ @@ -234,6 +254,21 @@ public: } inline const V *get(const K &k) const { return const_cast(this)->get(k); } + /** + * @param k Key to check + * @return True if key is present + */ + inline bool contains(const K &k) const + { + _Bucket *b = _t[_hc(k) % _bc]; + while (b) { + if (b->k == k) + return true; + b = b->next; + } + return false; + } + /** * @param k Key * @return True if value was present diff --git a/node/Network.cpp b/node/Network.cpp index 39042fab..8317cad9 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -147,7 +147,7 @@ bool Network::subscribedToMulticastGroup(const MulticastGroup &mg,bool includeBr if (std::binary_search(_myMulticastGroups.begin(),_myMulticastGroups.end(),mg)) return true; else if (includeBridgedGroups) - return (_multicastGroupsBehindMe.find(mg) != _multicastGroupsBehindMe.end()); + return _multicastGroupsBehindMe.contains(mg); else return false; } @@ -373,10 +373,14 @@ void Network::clean() } // Clean learned multicast groups if we haven't heard from them in a while - for(std::map::iterator mg(_multicastGroupsBehindMe.begin());mg!=_multicastGroupsBehindMe.end();) { - if ((now - mg->second) > (ZT_MULTICAST_LIKE_EXPIRE * 2)) - _multicastGroupsBehindMe.erase(mg++); - else ++mg; + { + Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe); + MulticastGroup *mg = (MulticastGroup *)0; + uint64_t *ts = (uint64_t *)0; + while (i.next(mg,ts)) { + if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2)) + _multicastGroupsBehindMe.erase(*mg); + } } } @@ -408,8 +412,8 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr) void Network::learnBridgedMulticastGroup(const MulticastGroup &mg,uint64_t now) { Mutex::Lock _l(_lock); - unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size(); - _multicastGroupsBehindMe[mg] = now; + const unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size(); + _multicastGroupsBehindMe.set(mg,now); if (tmp != _multicastGroupsBehindMe.size()) _announceMulticastGroups(); } @@ -510,8 +514,7 @@ std::vector Network::_allMulticastGroups() const std::vector mgs; mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1); mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end()); - for(std::map< MulticastGroup,uint64_t >::const_iterator i(_multicastGroupsBehindMe.begin());i!=_multicastGroupsBehindMe.end();++i) - mgs.push_back(i->first); + _multicastGroupsBehindMe.appendKeys(mgs); if ((_config)&&(_config->enableBroadcast())) mgs.push_back(Network::BROADCAST); std::sort(mgs.begin(),mgs.end()); diff --git a/node/Network.hpp b/node/Network.hpp index d7320d46..47d2efc0 100644 --- a/node/Network.hpp +++ b/node/Network.hpp @@ -40,6 +40,7 @@ #include "Constants.hpp" #include "NonCopyable.hpp" +#include "Hashtable.hpp" #include "Address.hpp" #include "Mutex.hpp" #include "SharedPtr.hpp" @@ -359,7 +360,7 @@ private: volatile bool _portInitialized; std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to including those behind us (updated periodically) - std::map< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each + Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each std::map _remoteBridgeRoutes; // remote addresses where given MACs are reachable diff --git a/node/Peer.hpp b/node/Peer.hpp index 283e3f33..ef436cd9 100644 --- a/node/Peer.hpp +++ b/node/Peer.hpp @@ -40,6 +40,7 @@ #include "../include/ZeroTierOne.h" #include "RuntimeEnvironment.hpp" +#include "CertificateOfMembership.hpp" #include "RemotePath.hpp" #include "Address.hpp" #include "Utils.hpp" -- cgit v1.2.3 From 0ab3e49be91ed7a8723c8b58750aef77c01e8d08 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 14:44:22 -0700 Subject: Starting in on Switch... kill map in defrag queue, which will probably improve performance pretty decently under high load with lots of peers. --- node/Hashtable.hpp | 9 ++++++--- node/Switch.cpp | 49 ++++++++++++++++++++++++++----------------------- node/Switch.hpp | 4 +++- 3 files changed, 35 insertions(+), 27 deletions(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 59a57726..29c54838 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -372,9 +372,12 @@ private: } static inline unsigned long _hc(const uint64_t i) { - // NOTE: this is fine for network IDs, but might be bad for other kinds - // of IDs if they are not evenly or randomly distributed. - return (unsigned long)((i ^ (i >> 32)) * 2654435761ULL); + /* NOTE: this assumes that 'i' is evenly distributed, which is the case for + * packet IDs and network IDs -- the two use cases in ZT for uint64_t keys. + * These values are also greater than 0xffffffff so they'll map onto a full + * bucket count just fine no matter what happens. Normally you'd want to + * hash an integer key index in a hash table. */ + return (unsigned long)i; } inline void _grow() diff --git a/node/Switch.cpp b/node/Switch.cpp index 989f497a..3dcb7002 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -531,11 +531,14 @@ unsigned long Switch::doTimerTasks(uint64_t now) { // Time out packets that didn't get all their fragments. Mutex::Lock _l(_defragQueue_m); - for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) { - if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) { + Hashtable< uint64_t,DefragQueueEntry >::Iterator i(_defragQueue); + uint64_t *packetId = (uint64_t *)0; + DefragQueueEntry *qe = (DefragQueueEntry *)0; + while (i.next(packetId,qe)) { + if ((now - qe->creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) { TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first); - _defragQueue.erase(i++); - } else ++i; + _defragQueue.erase(*packetId); + } } } @@ -577,32 +580,31 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void // seeing a Packet::Fragment? Mutex::Lock _l(_defragQueue_m); - std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid)); + DefragQueueEntry &dq = _defragQueue[pid]; - if (dqe == _defragQueue.end()) { + if (!dq.creationTime) { // We received a Packet::Fragment without its head, so queue it and wait - DefragQueueEntry &dq = _defragQueue[pid]; dq.creationTime = RR->node->now(); dq.frags[fno - 1] = fragment; dq.totalFragments = tf; // total fragment count is known dq.haveFragments = 1 << fno; // we have only this fragment //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str()); - } else if (!(dqe->second.haveFragments & (1 << fno))) { + } else if (!(dq.haveFragments & (1 << fno))) { // We have other fragments and maybe the head, so add this one and check - dqe->second.frags[fno - 1] = fragment; - dqe->second.totalFragments = tf; + dq.frags[fno - 1] = fragment; + dq.totalFragments = tf; //TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str()); - if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) { + if (Utils::countBits(dq.haveFragments |= (1 << fno)) == tf) { // We have all fragments -- assemble and process full Packet //TRACE("packet %.16llx is complete, assembling and processing...",pid); - SharedPtr packet(dqe->second.frag0); + SharedPtr packet(dq.frag0); for(unsigned int f=1;fappend(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength()); - _defragQueue.erase(dqe); + packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength()); + _defragQueue.erase(pid); // dq no longer valid after this if (!packet->tryDecode(RR)) { Mutex::Lock _l(_rxQueue_m); @@ -645,26 +647,27 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat uint64_t pid = packet->packetId(); Mutex::Lock _l(_defragQueue_m); - std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid)); + DefragQueueEntry &dq = _defragQueue[pid]; - if (dqe == _defragQueue.end()) { + if (!dq.creationTime) { // If we have no other fragments yet, create an entry and save the head - DefragQueueEntry &dq = _defragQueue[pid]; + dq.creationTime = RR->node->now(); dq.frag0 = packet; dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment dq.haveFragments = 1; // head is first bit (left to right) //TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str()); - } else if (!(dqe->second.haveFragments & 1)) { + } else if (!(dq.haveFragments & 1)) { // If we have other fragments but no head, see if we are complete with the head - if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) { + + if ((dq.totalFragments)&&(Utils::countBits(dq.haveFragments |= 1) == dq.totalFragments)) { // We have all fragments -- assemble and process full Packet //TRACE("packet %.16llx is complete, assembling and processing...",pid); // packet already contains head, so append fragments - for(unsigned int f=1;fsecond.totalFragments;++f) - packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength()); - _defragQueue.erase(dqe); + for(unsigned int f=1;fappend(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength()); + _defragQueue.erase(pid); // dq no longer valid after this if (!packet->tryDecode(RR)) { Mutex::Lock _l(_rxQueue_m); @@ -672,7 +675,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat } } else { // Still waiting on more fragments, so queue the head - dqe->second.frag0 = packet; + dq.frag0 = packet; } } // else this is a duplicate head, ignore } else { diff --git a/node/Switch.hpp b/node/Switch.hpp index ac85606e..a1b36014 100644 --- a/node/Switch.hpp +++ b/node/Switch.hpp @@ -45,6 +45,7 @@ #include "Network.hpp" #include "SharedPtr.hpp" #include "IncomingPacket.hpp" +#include "Hashtable.hpp" /* Ethernet frame types that might be relevant to us */ #define ZT_ETHERTYPE_IPV4 0x0800 @@ -199,13 +200,14 @@ private: // Packet defragmentation queue -- comes before RX queue in path struct DefragQueueEntry { + DefragQueueEntry() : creationTime(0),totalFragments(0),haveFragments(0) {} uint64_t creationTime; SharedPtr frag0; Packet::Fragment frags[ZT_MAX_PACKET_FRAGMENTS - 1]; unsigned int totalFragments; // 0 if only frag0 received, waiting for frags uint32_t haveFragments; // bit mask, LSB to MSB }; - std::map< uint64_t,DefragQueueEntry > _defragQueue; + Hashtable< uint64_t,DefragQueueEntry > _defragQueue; Mutex _defragQueue_m; // ZeroTier-layer RX queue of incoming packets in the process of being decoded -- cgit v1.2.3 From 3dba016a9354d9c50743877988c8d928d25f7a2b Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 4 Sep 2015 15:21:22 -0700 Subject: Almost done... very few std::map<>s remaining in any spot that matters. --- node/Hashtable.hpp | 5 +++-- node/Switch.cpp | 38 ++++++++++++++++++-------------------- node/Switch.hpp | 19 ++++++++++++++++++- 3 files changed, 39 insertions(+), 23 deletions(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index 29c54838..bcc111e3 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -39,8 +39,9 @@ namespace ZeroTier { * A minimal hash table implementation for the ZeroTier core * * This is not a drop-in replacement for STL containers, and has several - * limitations. It's designed to be small and fast for use in the - * ZeroTier core. + * limitations. Keys can be uint64_t or an object, and if the latter they + * must implement a method called hashCode() that returns an unsigned long + * value that is evenly distributed. */ template class Hashtable diff --git a/node/Switch.cpp b/node/Switch.cpp index 7d6f8094..d5ee3e23 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -309,31 +309,18 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force) const uint64_t now = RR->node->now(); - std::pair cg(Peer::findCommonGround(*p1p,*p2p,now)); - if (!(cg.first)) - return false; - - if (cg.first.ipScope() != cg.second.ipScope()) - return false; - - // Addresses are sorted in key for last unite attempt map for order - // invariant lookup: (p1,p2) == (p2,p1) - Array uniteKey; - if (p1 >= p2) { - uniteKey[0] = p2; - uniteKey[1] = p1; - } else { - uniteKey[0] = p1; - uniteKey[1] = p2; - } { Mutex::Lock _l(_lastUniteAttempt_m); - std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey)); - if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL)) + uint64_t &luts = _lastUniteAttempt[_LastUniteKey(p1,p2)]; + if (((now - luts) < ZT_MIN_UNITE_INTERVAL)&&(!force)) return false; - else _lastUniteAttempt[uniteKey] = now; + luts = now; } + std::pair cg(Peer::findCommonGround(*p1p,*p2p,now)); + if ((!(cg.first))||(cg.first.ipScope() != cg.second.ipScope())) + return false; + TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str()); /* Tell P1 where to find P2 and vice versa, sending the packets to P1 and @@ -543,6 +530,17 @@ unsigned long Switch::doTimerTasks(uint64_t now) } } + { // Remove really old last unite attempt entries to keep table size controlled + Mutex::Lock _l(_lastUniteAttempt_m); + Hashtable< _LastUniteKey,uint64_t >::Iterator i(_lastUniteAttempt); + _LastUniteKey *k = (_LastUniteKey *)0; + uint64_t *v = (uint64_t *)0; + while (i.next(k,v)) { + if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 16)) + _lastUniteAttempt.erase(*k); + } + } + return nextDelay; } diff --git a/node/Switch.hpp b/node/Switch.hpp index 0791681f..2d83b70c 100644 --- a/node/Switch.hpp +++ b/node/Switch.hpp @@ -235,7 +235,24 @@ private: Mutex _txQueue_m; // Tracks sending of VERB_RENDEZVOUS to relaying peers - std::map< Array< Address,2 >,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior + struct _LastUniteKey + { + _LastUniteKey() : x(0),y(0) {} + _LastUniteKey(const Address &a1,const Address &a2) + { + if (a1 > a2) { + x = a2.toInt(); + y = a1.toInt(); + } else { + x = a1.toInt(); + y = a2.toInt(); + } + } + inline unsigned long hashCode() const throw() { return ((unsigned long)x ^ (unsigned long)y); } + inline bool operator==(const _LastUniteKey &k) const throw() { return ((x == k.x)&&(y == k.y)); } + uint64_t x,y; + }; + Hashtable< _LastUniteKey,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior Mutex _lastUniteAttempt_m; // Active attempts to contact remote peers, including state of multi-phase NAT traversal -- cgit v1.2.3 From c1a53a26536d2635118262f5f719795b2e70e5fa Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 11 Sep 2015 11:45:04 -0700 Subject: ARP cache and responder agent code for use in netcon and iOS. --- node/Hashtable.hpp | 5 ++ node/MAC.hpp | 6 +++ osdep/Arp.cpp | 134 +++++++++++++++++++++++++++++++++++++++++++++ osdep/Arp.hpp | 156 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 301 insertions(+) create mode 100644 osdep/Arp.cpp create mode 100644 osdep/Arp.hpp (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index bcc111e3..d2b85c15 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -380,6 +380,11 @@ private: * hash an integer key index in a hash table. */ return (unsigned long)i; } + static inline unsigned long _hc(const uint32_t i) + { + // In the uint32_t case we use a simple multiplier for hashing to ensure coverage + return ((unsigned long)i * (unsigned long)2654435761); + } inline void _grow() { diff --git a/node/MAC.hpp b/node/MAC.hpp index 619b7195..a9cd43cf 100644 --- a/node/MAC.hpp +++ b/node/MAC.hpp @@ -250,6 +250,12 @@ public: _m = m._m; return *this; } + inline MAC &operator=(const uint64_t m) + throw() + { + _m = m; + return *this; + } inline bool operator==(const MAC &m) const throw() { return (_m == m._m); } inline bool operator!=(const MAC &m) const throw() { return (_m != m._m); } diff --git a/osdep/Arp.cpp b/osdep/Arp.cpp new file mode 100644 index 00000000..f71dfb54 --- /dev/null +++ b/osdep/Arp.cpp @@ -0,0 +1,134 @@ +/* + * ZeroTier One - Network Virtualization Everywhere + * Copyright (C) 2011-2015 ZeroTier, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * -- + * + * ZeroTier may be used and distributed under the terms of the GPLv3, which + * are available at: http://www.gnu.org/licenses/gpl-3.0.html + * + * If you would like to embed ZeroTier into a commercial application or + * redistribute it in a modified binary form, please contact ZeroTier Networks + * LLC. Start here: http://www.zerotier.com/ + */ + +#include +#include +#include + +#include "Arp.hpp" +#include "OSUtils.hpp" + +namespace ZeroTier { + +static const uint8_t ARP_REQUEST_HEADER[8] = { 0x00,0x01,0x08,0x00,0x06,0x04,0x00,0x01 }; +static const uint8_t ARP_RESPONSE_HEADER[8] = { 0x00,0x01,0x08,0x00,0x06,0x04,0x00,0x02 }; + +Arp::Arp() : + _cache(256), + _lastCleaned(OSUtils::now()) +{ +} + +void Arp::addLocal(uint32_t ip,const MAC &mac) +{ + _ArpEntry &e = _cache[ip]; + e.lastQuerySent = 0; // local IP + e.lastResponseReceived = 0; // local IP + e.mac = mac; + e.local = true; +} + +void Arp::remove(uint32_t ip) +{ + _cache.erase(ip); +} + +uint32_t Arp::processIncomingArp(const void *arp,unsigned int len,void *response,unsigned int &responseLen,MAC &responseDest) +{ + const uint64_t now = OSUtils::now(); + uint32_t ip = 0; + + responseLen = 0; + responseDest.zero(); + + if (len > 28) { + if (!memcmp(arp,ARP_REQUEST_HEADER,8)) { + // Respond to ARP requests for locally-known IPs + _ArpEntry *targetEntry = _cache.get(reinterpret_cast(arp)[6]); + if ((targetEntry)&&(targetEntry->local)) { + memcpy(response,ARP_RESPONSE_HEADER,8); + targetEntry->mac.copyTo(reinterpret_cast(response) + 8,6); + memcpy(reinterpret_cast(response) + 14,reinterpret_cast(arp) + 24,4); + memcpy(reinterpret_cast(response) + 18,reinterpret_cast(arp) + 8,10); + responseLen = 28; + responseDest.setTo(reinterpret_cast(arp) + 8,6); + } + } else if (!memcmp(arp,ARP_RESPONSE_HEADER,8)) { + // Learn cache entries for remote IPs from relevant ARP replies + uint32_t responseIp = 0; + memcpy(&responseIp,reinterpret_cast(arp) + 14,4); + _ArpEntry *queryEntry = _cache.get(responseIp); + if ((queryEntry)&&(!queryEntry->local)&&((now - queryEntry->lastQuerySent) <= ZT_ARP_QUERY_MAX_TTL)) { + queryEntry->lastResponseReceived = now; + queryEntry->mac.setTo(reinterpret_cast(arp) + 8,6); + ip = responseIp; + } + } + } + + if ((now - _lastCleaned) >= ZT_ARP_EXPIRE) { + _lastCleaned = now; + Hashtable< uint32_t,_ArpEntry >::Iterator i(_cache); + uint32_t *k = (uint32_t *)0; + _ArpEntry *v = (_ArpEntry *)0; + while (i.next(k,v)) { + if ((!v->local)&&((now - v->lastResponseReceived) >= ZT_ARP_EXPIRE)) + _cache.erase(*k); + } + } + + return ip; +} + +MAC Arp::query(const MAC &localMac,uint32_t ip,void *query,unsigned int &queryLen,MAC &queryDest) +{ + const uint64_t now = OSUtils::now(); + + _ArpEntry &e = _cache[ip]; + + if ( ((e.mac)&&((now - e.lastResponseReceived) >= (ZT_ARP_EXPIRE / 3))) || + ((!e.mac)&&((now - e.lastQuerySent) >= ZT_ARP_QUERY_INTERVAL)) ) { + e.lastQuerySent = now; + + uint8_t *q = reinterpret_cast(query); + memcpy(q,ARP_REQUEST_HEADER,8); q += 8; // ARP request header information, always the same + localMac.copyTo(q,6); q += 6; // sending host address + memset(q,0,10); q += 10; // sending IP and target media address are ignored in requests + memcpy(q,&ip,4); // target IP address for resolution (IP already in big-endian byte order) + queryLen = 28; + if (e.mac) + queryDest = e.mac; // confirmation query, send directly to address holder + else queryDest = (uint64_t)0xffffffffffffULL; // broadcast query + } else { + queryLen = 0; + queryDest.zero(); + } + + return e.mac; +} + +} // namespace ZeroTier diff --git a/osdep/Arp.hpp b/osdep/Arp.hpp new file mode 100644 index 00000000..b747cf85 --- /dev/null +++ b/osdep/Arp.hpp @@ -0,0 +1,156 @@ +/* + * ZeroTier One - Network Virtualization Everywhere + * Copyright (C) 2011-2015 ZeroTier, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * -- + * + * ZeroTier may be used and distributed under the terms of the GPLv3, which + * are available at: http://www.gnu.org/licenses/gpl-3.0.html + * + * If you would like to embed ZeroTier into a commercial application or + * redistribute it in a modified binary form, please contact ZeroTier Networks + * LLC. Start here: http://www.zerotier.com/ + */ + +#ifndef ZT_ARP_HPP +#define ZT_ARP_HPP + +#include + +#include + +#include "../node/Constants.hpp" +#include "../node/Hashtable.hpp" +#include "../node/MAC.hpp" + +/** + * Maximum possible ARP length + * + * ARPs are 28 bytes in length, but specify a 128 byte buffer since + * some weird extensions we may support in the future can pad them + * out to as long as 72 bytes. + */ +#define ZT_ARP_BUF_LENGTH 128 + +/** + * Minimum permitted interval between sending ARP queries for a given IP + */ +#define ZT_ARP_QUERY_INTERVAL 2000 + +/** + * Maximum time between query and response, otherwise responses are discarded to prevent poisoning + */ +#define ZT_ARP_QUERY_MAX_TTL 5000 + +/** + * ARP expiration time + */ +#define ZT_ARP_EXPIRE 600000 + +namespace ZeroTier { + +/** + * ARP cache and resolver + * + * To implement ARP: + * + * (1) Call processIncomingArp() on all ARP packets received and then always + * check responseLen after calling. If it is non-zero, send the contents + * of response to responseDest. + * + * (2) Call query() to look up IP addresses, and then check queryLen. If it + * is non-zero, send the contents of query to queryDest (usually broadcast). + * + * Note that either of these functions can technically generate a response or + * a query at any time, so their result parameters for sending ARPs should + * always be checked. + * + * This class is not thread-safe and must be guarded if used in multi-threaded + * code. + */ +class Arp +{ +public: + Arp(); + + /** + * Set a local IP entry that we should respond to ARPs for + * + * @param mac Our local MAC address + * @param ip IP in big-endian byte order (sin_addr.s_addr) + */ + void addLocal(uint32_t ip,const MAC &mac); + + /** + * Delete a local IP entry or a cached ARP entry + * + * @param ip IP in big-endian byte order (sin_addr.s_addr) + */ + void remove(uint32_t ip); + + /** + * Process ARP packets + * + * For ARP queries, a response is generated and responseLen is set to its + * frame payload length in bytes. + * + * For ARP responses, the cache is populated and the IP address entry that + * was learned is returned. + * + * @param arp ARP frame data + * @param len Length of ARP frame (usually 28) + * @param response Response buffer -- MUST be a minimum of ZT_ARP_BUF_LENGTH in size + * @param responseLen Response length, or set to 0 if no response + * @param responseDest Destination of response, or set to null if no response + * @return IP address learned or 0 if no new IPs in cache + */ + uint32_t processIncomingArp(const void *arp,unsigned int len,void *response,unsigned int &responseLen,MAC &responseDest); + + /** + * Get the MAC corresponding to an IP, generating a query if needed + * + * This returns a MAC for a remote IP. The local MAC is returned for local + * IPs as well. It may also generate a query if the IP is not known or the + * entry needs to be refreshed. In this case queryLen will be set to a + * non-zero value, so this should always be checked on return even if the + * MAC returned is non-null. + * + * @param localMac Local MAC address of host interface + * @param ip IP to look up + * @param query Buffer for generated query -- MUST be a minimum of ZT_ARP_BUF_LENGTH in size + * @param queryLen Length of generated query, or set to 0 if no query generated + * @param queryDest Destination of query, or set to null if no query generated + * @return MAC or 0 if no cached entry for this IP + */ + MAC query(const MAC &localMac,uint32_t ip,void *query,unsigned int &queryLen,MAC &queryDest); + +private: + struct _ArpEntry + { + _ArpEntry() : lastQuerySent(0),lastResponseReceived(0),mac(),local(false) {} + uint64_t lastQuerySent; // Time last query was sent or 0 for local IP + uint64_t lastResponseReceived; // Time of last ARP response or 0 for local IP + MAC mac; // MAC address of device responsible for IP or null if not known yet + bool local; // True if this is a local ARP entry + }; + + Hashtable< uint32_t,_ArpEntry > _cache; + uint64_t _lastCleaned; +}; + +} // namespace ZeroTier + +#endif -- cgit v1.2.3 From 4464fa5d392dd930bf847c2dc1c5886398e5d8dd Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Wed, 23 Sep 2015 10:29:05 -0700 Subject: Eliminate another warning. --- node/Hashtable.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'node/Hashtable.hpp') diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp index d2b85c15..beef1468 100644 --- a/node/Hashtable.hpp +++ b/node/Hashtable.hpp @@ -383,7 +383,7 @@ private: static inline unsigned long _hc(const uint32_t i) { // In the uint32_t case we use a simple multiplier for hashing to ensure coverage - return ((unsigned long)i * (unsigned long)2654435761); + return ((unsigned long)i * (unsigned long)0x9e3779b1); } inline void _grow() -- cgit v1.2.3