summaryrefslogtreecommitdiff
path: root/node
diff options
context:
space:
mode:
authorGrant Limberg <glimberg@gmail.com>2015-09-26 13:47:55 -0700
committerGrant Limberg <glimberg@gmail.com>2015-09-26 13:47:55 -0700
commite8cdff3eafd8096da22122eabddf57a09fe2bb90 (patch)
treed231aa6d9ccccc8ced6e1ead606ce16ff551cab9 /node
parent53d98343b7b444508259f6f1643e8d6724fb11e9 (diff)
parentf69454ec9879a0b0a424f743ca144d1123ef7e99 (diff)
downloadinfinitytier-e8cdff3eafd8096da22122eabddf57a09fe2bb90.tar.gz
infinitytier-e8cdff3eafd8096da22122eabddf57a09fe2bb90.zip
Merge branch 'adamierymenko-dev' into android-jni-dev
also update for changed function calls that now accept a local address # Conflicts: # include/ZeroTierOne.h # java/CMakeLists.txt # java/jni/Android.mk # java/jni/ZT1_jnicache.cpp # java/jni/ZT1_jnilookup.h # java/jni/ZT1_jniutils.cpp # java/jni/com_zerotierone_sdk_Node.cpp
Diffstat (limited to 'node')
-rw-r--r--node/Address.hpp14
-rw-r--r--node/Constants.hpp2
-rw-r--r--node/Defaults.cpp2
-rw-r--r--node/Hashtable.hpp419
-rw-r--r--node/IncomingPacket.cpp76
-rw-r--r--node/IncomingPacket.hpp5
-rw-r--r--node/InetAddress.cpp26
-rw-r--r--node/InetAddress.hpp44
-rw-r--r--node/MAC.hpp8
-rw-r--r--node/MulticastGroup.hpp2
-rw-r--r--node/Multicaster.cpp56
-rw-r--r--node/Multicaster.hpp17
-rw-r--r--node/Network.cpp173
-rw-r--r--node/Network.hpp34
-rw-r--r--node/NetworkConfig.cpp6
-rw-r--r--node/NetworkController.hpp2
-rw-r--r--node/Node.cpp225
-rw-r--r--node/Node.hpp67
-rw-r--r--node/Path.hpp13
-rw-r--r--node/Peer.cpp33
-rw-r--r--node/Peer.hpp8
-rw-r--r--node/RemotePath.hpp9
-rw-r--r--node/SHA512.cpp12
-rw-r--r--node/SelfAwareness.cpp37
-rw-r--r--node/SelfAwareness.hpp14
-rw-r--r--node/Switch.cpp179
-rw-r--r--node/Switch.hpp48
-rw-r--r--node/Topology.cpp20
-rw-r--r--node/Topology.hpp14
29 files changed, 1086 insertions, 479 deletions
diff --git a/node/Address.hpp b/node/Address.hpp
index 137e4f4f..0b38ec62 100644
--- a/node/Address.hpp
+++ b/node/Address.hpp
@@ -167,6 +167,15 @@ public:
}
/**
+ * @return Hash code for use with Hashtable
+ */
+ inline unsigned long hashCode() const
+ throw()
+ {
+ return (unsigned long)_a;
+ }
+
+ /**
* @return Hexadecimal string
*/
inline std::string toString() const
@@ -197,11 +206,11 @@ public:
/**
* Check if this address is reserved
- *
+ *
* The all-zero null address and any address beginning with 0xff are
* reserved. (0xff is reserved for future use to designate possibly
* longer addresses, addresses based on IPv6 innards, etc.)
- *
+ *
* @return True if address is reserved and may not be used
*/
inline bool isReserved() const
@@ -230,4 +239,3 @@ private:
} // namespace ZeroTier
#endif
-
diff --git a/node/Constants.hpp b/node/Constants.hpp
index b7aa9817..4f783550 100644
--- a/node/Constants.hpp
+++ b/node/Constants.hpp
@@ -161,7 +161,7 @@
/**
* Default MTU used for Ethernet tap device
*/
-#define ZT_IF_MTU ZT1_MAX_MTU
+#define ZT_IF_MTU ZT_MAX_MTU
/**
* Maximum number of packet fragments we'll support
diff --git a/node/Defaults.cpp b/node/Defaults.cpp
index e64f3844..b311fb6a 100644
--- a/node/Defaults.cpp
+++ b/node/Defaults.cpp
@@ -75,7 +75,7 @@ static inline std::map< Address,Identity > _mkRootTopologyAuth()
Defaults::Defaults() :
defaultRootTopology((const char *)ZT_DEFAULT_ROOT_TOPOLOGY,ZT_DEFAULT_ROOT_TOPOLOGY_LEN),
rootTopologyAuthorities(_mkRootTopologyAuth()),
- v4Broadcast(((uint32_t)0xffffffff),ZT1_DEFAULT_PORT)
+ v4Broadcast(((uint32_t)0xffffffff),ZT_DEFAULT_PORT)
{
}
diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp
new file mode 100644
index 00000000..beef1468
--- /dev/null
+++ b/node/Hashtable.hpp
@@ -0,0 +1,419 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ */
+
+#ifndef ZT_HASHTABLE_HPP
+#define ZT_HASHTABLE_HPP
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <stdexcept>
+#include <vector>
+#include <utility>
+#include <algorithm>
+
+namespace ZeroTier {
+
+/**
+ * A minimal hash table implementation for the ZeroTier core
+ *
+ * This is not a drop-in replacement for STL containers, and has several
+ * limitations. Keys can be uint64_t or an object, and if the latter they
+ * must implement a method called hashCode() that returns an unsigned long
+ * value that is evenly distributed.
+ */
+template<typename K,typename V>
+class Hashtable
+{
+private:
+ struct _Bucket
+ {
+ _Bucket(const K &k,const V &v) : k(k),v(v) {}
+ _Bucket(const K &k) : k(k),v() {}
+ _Bucket(const _Bucket &b) : k(b.k),v(b.v) {}
+ inline _Bucket &operator=(const _Bucket &b) { k = b.k; v = b.v; return *this; }
+ K k;
+ V v;
+ _Bucket *next; // must be set manually for each _Bucket
+ };
+
+public:
+ /**
+ * A simple forward iterator (different from STL)
+ *
+ * It's safe to erase the last key, but not others. Don't use set() since that
+ * may rehash and invalidate the iterator. Note the erasing the key will destroy
+ * the targets of the pointers returned by next().
+ */
+ class Iterator
+ {
+ public:
+ /**
+ * @param ht Hash table to iterate over
+ */
+ Iterator(Hashtable &ht) :
+ _idx(0),
+ _ht(&ht),
+ _b(ht._t[0])
+ {
+ }
+
+ /**
+ * @param kptr Pointer to set to point to next key
+ * @param vptr Pointer to set to point to next value
+ * @return True if kptr and vptr are set, false if no more entries
+ */
+ inline bool next(K *&kptr,V *&vptr)
+ {
+ for(;;) {
+ if (_b) {
+ kptr = &(_b->k);
+ vptr = &(_b->v);
+ _b = _b->next;
+ return true;
+ }
+ ++_idx;
+ if (_idx >= _ht->_bc)
+ return false;
+ _b = _ht->_t[_idx];
+ }
+ }
+
+ private:
+ unsigned long _idx;
+ Hashtable *_ht;
+ Hashtable::_Bucket *_b;
+ };
+ friend class Hashtable::Iterator;
+
+ /**
+ * @param bc Initial capacity in buckets (default: 128, must be nonzero)
+ */
+ Hashtable(unsigned long bc = 128) :
+ _t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * bc))),
+ _bc(bc),
+ _s(0)
+ {
+ if (!_t)
+ throw std::bad_alloc();
+ for(unsigned long i=0;i<bc;++i)
+ _t[i] = (_Bucket *)0;
+ }
+
+ Hashtable(const Hashtable<K,V> &ht) :
+ _t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * ht._bc))),
+ _bc(ht._bc),
+ _s(ht._s)
+ {
+ if (!_t)
+ throw std::bad_alloc();
+ for(unsigned long i=0;i<_bc;++i)
+ _t[i] = (_Bucket *)0;
+ for(unsigned long i=0;i<_bc;++i) {
+ const _Bucket *b = ht._t[i];
+ while (b) {
+ _Bucket *nb = new _Bucket(*b);
+ nb->next = _t[i];
+ _t[i] = nb;
+ b = b->next;
+ }
+ }
+ }
+
+ ~Hashtable()
+ {
+ this->clear();
+ ::free(_t);
+ }
+
+ inline Hashtable &operator=(const Hashtable<K,V> &ht)
+ {
+ this->clear();
+ if (ht._s) {
+ for(unsigned long i=0;i<ht._bc;++i) {
+ const _Bucket *b = ht._t[i];
+ while (b) {
+ this->set(b->k,b->v);
+ b = b->next;
+ }
+ }
+ }
+ return *this;
+ }
+
+ /**
+ * Erase all entries
+ */
+ inline void clear()
+ {
+ if (_s) {
+ for(unsigned long i=0;i<_bc;++i) {
+ _Bucket *b = _t[i];
+ while (b) {
+ _Bucket *const nb = b->next;
+ delete b;
+ b = nb;
+ }
+ _t[i] = (_Bucket *)0;
+ }
+ _s = 0;
+ }
+ }
+
+ /**
+ * @return Vector of all keys
+ */
+ inline typename std::vector<K> keys() const
+ {
+ typename std::vector<K> k;
+ if (_s) {
+ k.reserve(_s);
+ for(unsigned long i=0;i<_bc;++i) {
+ _Bucket *b = _t[i];
+ while (b) {
+ k.push_back(b->k);
+ b = b->next;
+ }
+ }
+ }
+ return k;
+ }
+
+ /**
+ * Append all keys (in unspecified order) to the supplied vector or list
+ *
+ * @param v Vector, list, or other compliant container
+ * @tparam Type of V (generally inferred)
+ */
+ template<typename C>
+ inline void appendKeys(C &v) const
+ {
+ if (_s) {
+ for(unsigned long i=0;i<_bc;++i) {
+ _Bucket *b = _t[i];
+ while (b) {
+ v.push_back(b->k);
+ b = b->next;
+ }
+ }
+ }
+ }
+
+ /**
+ * @return Vector of all entries (pairs of K,V)
+ */
+ inline typename std::vector< std::pair<K,V> > entries() const
+ {
+ typename std::vector< std::pair<K,V> > k;
+ if (_s) {
+ k.reserve(_s);
+ for(unsigned long i=0;i<_bc;++i) {
+ _Bucket *b = _t[i];
+ while (b) {
+ k.push_back(std::pair<K,V>(b->k,b->v));
+ b = b->next;
+ }
+ }
+ }
+ return k;
+ }
+
+ /**
+ * @param k Key
+ * @return Pointer to value or NULL if not found
+ */
+ inline V *get(const K &k)
+ {
+ _Bucket *b = _t[_hc(k) % _bc];
+ while (b) {
+ if (b->k == k)
+ return &(b->v);
+ b = b->next;
+ }
+ return (V *)0;
+ }
+ inline const V *get(const K &k) const { return const_cast<Hashtable *>(this)->get(k); }
+
+ /**
+ * @param k Key to check
+ * @return True if key is present
+ */
+ inline bool contains(const K &k) const
+ {
+ _Bucket *b = _t[_hc(k) % _bc];
+ while (b) {
+ if (b->k == k)
+ return true;
+ b = b->next;
+ }
+ return false;
+ }
+
+ /**
+ * @param k Key
+ * @return True if value was present
+ */
+ inline bool erase(const K &k)
+ {
+ const unsigned long bidx = _hc(k) % _bc;
+ _Bucket *lastb = (_Bucket *)0;
+ _Bucket *b = _t[bidx];
+ while (b) {
+ if (b->k == k) {
+ if (lastb)
+ lastb->next = b->next;
+ else _t[bidx] = b->next;
+ delete b;
+ --_s;
+ return true;
+ }
+ lastb = b;
+ b = b->next;
+ }
+ return false;
+ }
+
+ /**
+ * @param k Key
+ * @param v Value
+ * @return Reference to value in table
+ */
+ inline V &set(const K &k,const V &v)
+ {
+ const unsigned long h = _hc(k);
+ unsigned long bidx = h % _bc;
+
+ _Bucket *b = _t[bidx];
+ while (b) {
+ if (b->k == k) {
+ b->v = v;
+ return b->v;
+ }
+ b = b->next;
+ }
+
+ if (_s >= _bc) {
+ _grow();
+ bidx = h % _bc;
+ }
+
+ b = new _Bucket(k,v);
+ b->next = _t[bidx];
+ _t[bidx] = b;
+ ++_s;
+
+ return b->v;
+ }
+
+ /**
+ * @param k Key
+ * @return Value, possibly newly created
+ */
+ inline V &operator[](const K &k)
+ {
+ const unsigned long h = _hc(k);
+ unsigned long bidx = h % _bc;
+
+ _Bucket *b = _t[bidx];
+ while (b) {
+ if (b->k == k)
+ return b->v;
+ b = b->next;
+ }
+
+ if (_s >= _bc) {
+ _grow();
+ bidx = h % _bc;
+ }
+
+ b = new _Bucket(k);
+ b->next = _t[bidx];
+ _t[bidx] = b;
+ ++_s;
+
+ return b->v;
+ }
+
+ /**
+ * @return Number of entries
+ */
+ inline unsigned long size() const throw() { return _s; }
+
+ /**
+ * @return True if table is empty
+ */
+ inline bool empty() const throw() { return (_s == 0); }
+
+private:
+ template<typename O>
+ static inline unsigned long _hc(const O &obj)
+ {
+ return obj.hashCode();
+ }
+ static inline unsigned long _hc(const uint64_t i)
+ {
+ /* NOTE: this assumes that 'i' is evenly distributed, which is the case for
+ * packet IDs and network IDs -- the two use cases in ZT for uint64_t keys.
+ * These values are also greater than 0xffffffff so they'll map onto a full
+ * bucket count just fine no matter what happens. Normally you'd want to
+ * hash an integer key index in a hash table. */
+ return (unsigned long)i;
+ }
+ static inline unsigned long _hc(const uint32_t i)
+ {
+ // In the uint32_t case we use a simple multiplier for hashing to ensure coverage
+ return ((unsigned long)i * (unsigned long)0x9e3779b1);
+ }
+
+ inline void _grow()
+ {
+ const unsigned long nc = _bc * 2;
+ _Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc));
+ if (nt) {
+ for(unsigned long i=0;i<nc;++i)
+ nt[i] = (_Bucket *)0;
+ for(unsigned long i=0;i<_bc;++i) {
+ _Bucket *b = _t[i];
+ while (b) {
+ _Bucket *const nb = b->next;
+ const unsigned long nidx = _hc(b->k) % nc;
+ b->next = nt[nidx];
+ nt[nidx] = b;
+ b = nb;
+ }
+ }
+ ::free(_t);
+ _t = nt;
+ _bc = nc;
+ }
+ }
+
+ _Bucket **_t;
+ unsigned long _bc;
+ unsigned long _s;
+};
+
+} // namespace ZeroTier
+
+#endif
diff --git a/node/IncomingPacket.cpp b/node/IncomingPacket.cpp
index b1fda8ef..c94ffe2e 100644
--- a/node/IncomingPacket.cpp
+++ b/node/IncomingPacket.cpp
@@ -69,7 +69,7 @@ bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR)
switch(verb()) {
//case Packet::VERB_NOP:
default: // ignore unknown verbs, but if they pass auth check they are "received"
- peer->received(RR,_remoteAddress,hops(),packetId(),verb(),0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),verb(),0,Packet::VERB_NOP);
return true;
case Packet::VERB_HELLO: return _doHELLO(RR);
case Packet::VERB_ERROR: return _doERROR(RR,peer);
@@ -130,7 +130,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
case Packet::ERROR_IDENTITY_COLLISION:
if (RR->topology->isRoot(peer->identity()))
- RR->node->postEvent(ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
+ RR->node->postEvent(ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
break;
case Packet::ERROR_NEED_MEMBERSHIP_CERTIFICATE: {
@@ -144,7 +144,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
nconf->com().serialize(outp);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
} break;
@@ -165,7 +165,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
default: break;
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_ERROR,inRePacketId,inReVerb);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_ERROR,inRePacketId,inReVerb);
} catch (std::exception &ex) {
TRACE("dropped ERROR from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
@@ -224,20 +224,20 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
unsigned char key[ZT_PEER_SECRET_KEY_LENGTH];
if (RR->identity.agree(id,key,ZT_PEER_SECRET_KEY_LENGTH)) {
if (dearmor(key)) { // ensure packet is authentic, otherwise drop
- RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
TRACE("rejected HELLO from %s(%s): address already claimed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
Packet outp(id.address(),RR->identity.address(),Packet::VERB_ERROR);
outp.append((unsigned char)Packet::VERB_HELLO);
outp.append(packetId());
outp.append((unsigned char)Packet::ERROR_IDENTITY_COLLISION);
outp.armor(key,true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} else {
- RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
}
} else {
- RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
TRACE("rejected HELLO from %s(%s): key agreement failed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
}
@@ -246,7 +246,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
// Identity is the same as the one we already have -- check packet integrity
if (!dearmor(peer->key())) {
- RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
@@ -258,7 +258,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
// Check identity proof of work
if (!id.locallyValidate()) {
- RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
TRACE("dropped HELLO from %s(%s): identity invalid",id.address().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
@@ -266,7 +266,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
// Check packet integrity and authentication
SharedPtr<Peer> newPeer(new Peer(RR->identity,id));
if (!dearmor(newPeer->key())) {
- RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
@@ -278,7 +278,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
// VALID -- continues here
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_HELLO,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_HELLO,0,Packet::VERB_NOP);
peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
bool trusted = false;
@@ -316,7 +316,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
}
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} catch (std::exception &ex) {
TRACE("dropped HELLO from %s(%s): %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
@@ -436,7 +436,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
default: break;
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_OK,inRePacketId,inReVerb);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_OK,inRePacketId,inReVerb);
} catch (std::exception &ex) {
TRACE("dropped OK from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
@@ -456,7 +456,7 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer>
outp.append(packetId());
queried->identity().serialize(outp,false);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} else {
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_ERROR);
outp.append((unsigned char)Packet::VERB_WHOIS);
@@ -464,12 +464,12 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer>
outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
outp.append(payload(),ZT_ADDRESS_LENGTH);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
} else {
TRACE("dropped WHOIS from %s(%s): missing or invalid address",source().toString().c_str(),_remoteAddress.toString().c_str());
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_WHOIS,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_WHOIS,0,Packet::VERB_NOP);
} catch ( ... ) {
TRACE("dropped WHOIS from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
@@ -487,8 +487,8 @@ bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,const SharedPtr<
if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
TRACE("RENDEZVOUS from %s says %s might be at %s, starting NAT-t",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP);
- RR->sw->rendezvous(withPeer,atAddr);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP);
+ RR->sw->rendezvous(withPeer,_localAddress,atAddr);
} else {
TRACE("dropped corrupt RENDEZVOUS from %s(%s) (bad address or port)",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
}
@@ -525,7 +525,7 @@ bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer>
RR->node->putFrame(network->id(),MAC(peer->address(),network->id()),network->mac(),etherType,0,field(ZT_PROTO_VERB_FRAME_IDX_PAYLOAD,payloadLen),payloadLen);
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP);
} else {
TRACE("dropped FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_remoteAddress.toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
}
@@ -602,7 +602,7 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
RR->node->putFrame(network->id(),from,to,etherType,0,field(comLen + ZT_PROTO_VERB_EXT_FRAME_IDX_PAYLOAD,payloadLen),payloadLen);
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_EXT_FRAME,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_EXT_FRAME,0,Packet::VERB_NOP);
} else {
TRACE("dropped EXT_FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_remoteAddress.toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
}
@@ -623,7 +623,7 @@ bool IncomingPacket::_doMULTICAST_LIKE(const RuntimeEnvironment *RR,const Shared
for(unsigned int ptr=ZT_PACKET_IDX_PAYLOAD;ptr<size();ptr+=18)
RR->mc->add(now,at<uint64_t>(ptr),MulticastGroup(MAC(field(ptr + 8,6),6),at<uint32_t>(ptr + 14)),peer->address());
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_LIKE,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_LIKE,0,Packet::VERB_NOP);
} catch (std::exception &ex) {
TRACE("dropped MULTICAST_LIKE from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
@@ -647,7 +647,7 @@ bool IncomingPacket::_doNETWORK_MEMBERSHIP_CERTIFICATE(const RuntimeEnvironment
}
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE,0,Packet::VERB_NOP);
} catch (std::exception &ex) {
TRACE("dropped NETWORK_MEMBERSHIP_CERTIFICATE from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
@@ -662,15 +662,15 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
const uint64_t nwid = at<uint64_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_NETWORK_ID);
const unsigned int metaDataLength = at<uint16_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT_LEN);
const Dictionary metaData((const char *)field(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT,metaDataLength),metaDataLength);
- const uint64_t haveRevision = ((ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength + 8) <= size()) ? at<uint64_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength) : 0ULL;
+ //const uint64_t haveRevision = ((ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength + 8) <= size()) ? at<uint64_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength) : 0ULL;
const unsigned int h = hops();
const uint64_t pid = packetId();
- peer->received(RR,_remoteAddress,h,pid,Packet::VERB_NETWORK_CONFIG_REQUEST,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,h,pid,Packet::VERB_NETWORK_CONFIG_REQUEST,0,Packet::VERB_NOP);
if (RR->localNetworkController) {
Dictionary netconf;
- switch(RR->localNetworkController->doNetworkConfigRequest((h > 0) ? InetAddress() : _remoteAddress,RR->identity,peer->identity(),nwid,metaData,haveRevision,netconf)) {
+ switch(RR->localNetworkController->doNetworkConfigRequest((h > 0) ? InetAddress() : _remoteAddress,RR->identity,peer->identity(),nwid,metaData,netconf)) {
case NetworkController::NETCONF_QUERY_OK: {
const std::string netconfStr(netconf.toString());
@@ -688,7 +688,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
if (outp.size() > ZT_PROTO_MAX_PACKET_LENGTH) {
TRACE("NETWORK_CONFIG_REQUEST failed: internal error: netconf size %u is too large",(unsigned int)netconfStr.length());
} else {
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
} break;
@@ -700,7 +700,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
outp.append(nwid);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} break;
case NetworkController::NETCONF_QUERY_ACCESS_DENIED: {
@@ -710,7 +710,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_NETWORK_ACCESS_DENIED_);
outp.append(nwid);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} break;
case NetworkController::NETCONF_QUERY_INTERNAL_SERVER_ERROR:
@@ -732,7 +732,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_UNSUPPORTED_OPERATION);
outp.append(nwid);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
} catch (std::exception &exc) {
TRACE("dropped NETWORK_CONFIG_REQUEST from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
@@ -753,7 +753,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REFRESH(const RuntimeEnvironment *RR,cons
nw->requestConfiguration();
ptr += 8;
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_CONFIG_REFRESH,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_CONFIG_REFRESH,0,Packet::VERB_NOP);
} catch (std::exception &exc) {
TRACE("dropped NETWORK_CONFIG_REFRESH from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
@@ -780,11 +780,11 @@ bool IncomingPacket::_doMULTICAST_GATHER(const RuntimeEnvironment *RR,const Shar
outp.append((uint32_t)mg.adi());
if (RR->mc->gather(peer->address(),nwid,mg,outp,gatherLimit)) {
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_GATHER,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_GATHER,0,Packet::VERB_NOP);
} catch (std::exception &exc) {
TRACE("dropped MULTICAST_GATHER from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
@@ -871,12 +871,12 @@ bool IncomingPacket::_doMULTICAST_FRAME(const RuntimeEnvironment *RR,const Share
outp.append((unsigned char)0x02); // flag 0x02 = contains gather results
if (RR->mc->gather(peer->address(),nwid,to,outp,gatherLimit)) {
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
} // else ignore -- not a member of this network
- peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_FRAME,0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_FRAME,0,Packet::VERB_NOP);
} catch (std::exception &exc) {
TRACE("dropped MULTICAST_FRAME from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
@@ -905,14 +905,14 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
if ( ((flags & (0x01 | 0x02)) == 0) && (Path::isAddressValidForPath(a)) ) {
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
- peer->attemptToContactAt(RR,a,RR->node->now());
+ peer->attemptToContactAt(RR,_localAddress,a,RR->node->now());
}
} break;
case 6: {
InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
if ( ((flags & (0x01 | 0x02)) == 0) && (Path::isAddressValidForPath(a)) ) {
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
- peer->attemptToContactAt(RR,a,RR->node->now());
+ peer->attemptToContactAt(RR,_localAddress,a,RR->node->now());
}
} break;
}
@@ -934,7 +934,7 @@ void IncomingPacket::_sendErrorNeedCertificate(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_NEED_MEMBERSHIP_CERTIFICATE);
outp.append(nwid);
outp.armor(peer->key(),true);
- RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
} // namespace ZeroTier
diff --git a/node/IncomingPacket.hpp b/node/IncomingPacket.hpp
index 3bf7737d..d19eb5c6 100644
--- a/node/IncomingPacket.hpp
+++ b/node/IncomingPacket.hpp
@@ -72,13 +72,15 @@ public:
*
* @param data Packet data
* @param len Packet length
+ * @param localAddress Local interface address
* @param remoteAddress Address from which packet came
* @param now Current time
* @throws std::out_of_range Range error processing packet
*/
- IncomingPacket(const void *data,unsigned int len,const InetAddress &remoteAddress,uint64_t now) :
+ IncomingPacket(const void *data,unsigned int len,const InetAddress &localAddress,const InetAddress &remoteAddress,uint64_t now) :
Packet(data,len),
_receiveTime(now),
+ _localAddress(localAddress),
_remoteAddress(remoteAddress),
__refCount()
{
@@ -127,6 +129,7 @@ private:
void _sendErrorNeedCertificate(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer,uint64_t nwid);
uint64_t _receiveTime;
+ InetAddress _localAddress;
InetAddress _remoteAddress;
AtomicCounter __refCount;
};
diff --git a/node/InetAddress.cpp b/node/InetAddress.cpp
index 1942c4cd..e542f0d4 100644
--- a/node/InetAddress.cpp
+++ b/node/InetAddress.cpp
@@ -399,4 +399,30 @@ InetAddress InetAddress::makeIpv6LinkLocal(const MAC &mac)
return InetAddress(sin6);
}
+InetAddress InetAddress::makeIpv6rfc4193(uint64_t nwid,uint64_t zeroTierAddress)
+ throw()
+{
+ InetAddress r;
+ struct sockaddr_in6 *const sin6 = reinterpret_cast<struct sockaddr_in6 *>(&r);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_addr.s6_addr[0] = 0xfd;
+ sin6->sin6_addr.s6_addr[1] = (uint8_t)(nwid >> 56);
+ sin6->sin6_addr.s6_addr[2] = (uint8_t)(nwid >> 48);
+ sin6->sin6_addr.s6_addr[3] = (uint8_t)(nwid >> 40);
+ sin6->sin6_addr.s6_addr[4] = (uint8_t)(nwid >> 32);
+ sin6->sin6_addr.s6_addr[5] = (uint8_t)(nwid >> 24);
+ sin6->sin6_addr.s6_addr[6] = (uint8_t)(nwid >> 16);
+ sin6->sin6_addr.s6_addr[7] = (uint8_t)(nwid >> 8);
+ sin6->sin6_addr.s6_addr[8] = (uint8_t)nwid;
+ sin6->sin6_addr.s6_addr[9] = 0x99;
+ sin6->sin6_addr.s6_addr[10] = 0x93;
+ sin6->sin6_addr.s6_addr[11] = (uint8_t)(zeroTierAddress >> 32);
+ sin6->sin6_addr.s6_addr[12] = (uint8_t)(zeroTierAddress >> 24);
+ sin6->sin6_addr.s6_addr[13] = (uint8_t)(zeroTierAddress >> 16);
+ sin6->sin6_addr.s6_addr[14] = (uint8_t)(zeroTierAddress >> 8);
+ sin6->sin6_addr.s6_addr[15] = (uint8_t)zeroTierAddress;
+ sin6->sin6_port = Utils::hton((uint16_t)88); // /88 includes 0xfd + network ID, discriminating by device ID below that
+ return r;
+}
+
} // namespace ZeroTier
diff --git a/node/InetAddress.hpp b/node/InetAddress.hpp
index e3537ce0..3c05d83b 100644
--- a/node/InetAddress.hpp
+++ b/node/InetAddress.hpp
@@ -375,6 +375,50 @@ struct InetAddress : public sockaddr_storage
*/
static InetAddress makeIpv6LinkLocal(const MAC &mac)
throw();
+
+ /**
+ * Compute private IPv6 unicast address from network ID and ZeroTier address
+ *
+ * This generates a private unicast IPv6 address that is mostly compliant
+ * with the letter of RFC4193 and certainly compliant in spirit.
+ *
+ * RFC4193 specifies a format of:
+ *
+ * | 7 bits |1| 40 bits | 16 bits | 64 bits |
+ * | Prefix |L| Global ID | Subnet ID | Interface ID |
+ *
+ * The 'L' bit is set to 1, yielding an address beginning with 0xfd. Then
+ * the network ID is filled into the global ID, subnet ID, and first byte
+ * of the "interface ID" field. Since the first 40 bits of the network ID
+ * is the unique ZeroTier address of its controller, this makes a very
+ * good random global ID. Since network IDs have 24 more bits, we let it
+ * overflow into the interface ID.
+ *
+ * After that we pad with two bytes: 0x99, 0x93, namely the default ZeroTier
+ * port in hex.
+ *
+ * Finally we fill the remaining 40 bits of the interface ID field with
+ * the 40-bit unique ZeroTier device ID of the network member.
+ *
+ * This yields a valid RFC4193 address with a random global ID, a
+ * meaningful subnet ID, and a unique interface ID, all mappable back onto
+ * ZeroTier space.
+ *
+ * This in turn could allow us, on networks numbered this way, to emulate
+ * IPv6 NDP and eliminate all multicast. This could be beneficial for
+ * small devices and huge networks, e.g. IoT applications.
+ *
+ * The returned address is given an odd prefix length of /88, since within
+ * a given network only the last 40 bits (device ID) are variable. This
+ * is a bit unusual but as far as we know should not cause any problems with
+ * any non-braindead IPv6 stack.
+ *
+ * @param nwid 64-bit network ID
+ * @param zeroTierAddress 40-bit device address (in least significant 40 bits, highest 24 bits ignored)
+ * @return IPv6 private unicast address with /88 netmask
+ */
+ static InetAddress makeIpv6rfc4193(uint64_t nwid,uint64_t zeroTierAddress)
+ throw();
};
} // namespace ZeroTier
diff --git a/node/MAC.hpp b/node/MAC.hpp
index 442a7a2e..a9cd43cf 100644
--- a/node/MAC.hpp
+++ b/node/MAC.hpp
@@ -242,12 +242,20 @@ public:
*/
inline unsigned int size() const throw() { return 6; }
+ inline unsigned long hashCode() const throw() { return (unsigned long)_m; }
+
inline MAC &operator=(const MAC &m)
throw()
{
_m = m._m;
return *this;
}
+ inline MAC &operator=(const uint64_t m)
+ throw()
+ {
+ _m = m;
+ return *this;
+ }
inline bool operator==(const MAC &m) const throw() { return (_m == m._m); }
inline bool operator!=(const MAC &m) const throw() { return (_m != m._m); }
diff --git a/node/MulticastGroup.hpp b/node/MulticastGroup.hpp
index 61fb55f2..fad433b5 100644
--- a/node/MulticastGroup.hpp
+++ b/node/MulticastGroup.hpp
@@ -141,6 +141,8 @@ public:
*/
inline uint32_t adi() const throw() { return _adi; }
+ inline unsigned long hashCode() const throw() { return (_mac.hashCode() ^ (unsigned long)_adi); }
+
inline bool operator==(const MulticastGroup &g) const throw() { return ((_mac == g._mac)&&(_adi == g._adi)); }
inline bool operator!=(const MulticastGroup &g) const throw() { return ((_mac != g._mac)||(_adi != g._adi)); }
inline bool operator<(const MulticastGroup &g) const throw()
diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp
index 489c170b..07792737 100644
--- a/node/Multicaster.cpp
+++ b/node/Multicaster.cpp
@@ -41,7 +41,9 @@
namespace ZeroTier {
Multicaster::Multicaster(const RuntimeEnvironment *renv) :
- RR(renv)
+ RR(renv),
+ _groups(1024),
+ _groups_m()
{
}
@@ -54,7 +56,7 @@ void Multicaster::addMultiple(uint64_t now,uint64_t nwid,const MulticastGroup &m
const unsigned char *p = (const unsigned char *)addresses;
const unsigned char *e = p + (5 * count);
Mutex::Lock _l(_groups_m);
- MulticastGroupStatus &gs = _groups[std::pair<uint64_t,MulticastGroup>(nwid,mg)];
+ MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
while (p != e) {
_add(now,nwid,mg,gs,Address(p,5));
p += 5;
@@ -64,11 +66,11 @@ void Multicaster::addMultiple(uint64_t now,uint64_t nwid,const MulticastGroup &m
void Multicaster::remove(uint64_t nwid,const MulticastGroup &mg,const Address &member)
{
Mutex::Lock _l(_groups_m);
- std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::iterator g(_groups.find(std::pair<uint64_t,MulticastGroup>(nwid,mg)));
- if (g != _groups.end()) {
- for(std::vector<MulticastGroupMember>::iterator m(g->second.members.begin());m!=g->second.members.end();++m) {
+ MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg));
+ if (s) {
+ for(std::vector<MulticastGroupMember>::iterator m(s->members.begin());m!=s->members.end();++m) {
if (m->address == member) {
- g->second.members.erase(m);
+ s->members.erase(m);
break;
}
}
@@ -102,18 +104,18 @@ unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const
Mutex::Lock _l(_groups_m);
- std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::const_iterator gs(_groups.find(std::pair<uint64_t,MulticastGroup>(nwid,mg)));
- if ((gs != _groups.end())&&(!gs->second.members.empty())) {
- totalKnown += (unsigned int)gs->second.members.size();
+ const MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg));
+ if ((s)&&(!s->members.empty())) {
+ totalKnown += (unsigned int)s->members.size();
// Members are returned in random order so that repeated gather queries
// will return different subsets of a large multicast group.
k = 0;
- while ((added < limit)&&(k < gs->second.members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_UDP_DEFAULT_PAYLOAD_MTU)) {
+ while ((added < limit)&&(k < s->members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_UDP_DEFAULT_PAYLOAD_MTU)) {
rptr = (unsigned int)RR->node->prng();
restart_member_scan:
- a = gs->second.members[rptr % (unsigned int)gs->second.members.size()].address.toInt();
+ a = s->members[rptr % (unsigned int)s->members.size()].address.toInt();
for(i=0;i<k;++i) {
if (picked[i] == a) {
++rptr;
@@ -146,10 +148,10 @@ std::vector<Address> Multicaster::getMembers(uint64_t nwid,const MulticastGroup
{
std::vector<Address> ls;
Mutex::Lock _l(_groups_m);
- std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::const_iterator gs(_groups.find(std::pair<uint64_t,MulticastGroup>(nwid,mg)));
- if (gs == _groups.end())
+ const MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg));
+ if (!s)
return ls;
- for(std::vector<MulticastGroupMember>::const_reverse_iterator m(gs->second.members.rbegin());m!=gs->second.members.rend();++m) {
+ for(std::vector<MulticastGroupMember>::const_reverse_iterator m(s->members.rbegin());m!=s->members.rend();++m) {
ls.push_back(m->address);
if (ls.size() >= limit)
break;
@@ -173,7 +175,7 @@ void Multicaster::send(
unsigned long *indexes = idxbuf;
Mutex::Lock _l(_groups_m);
- MulticastGroupStatus &gs = _groups[std::pair<uint64_t,MulticastGroup>(nwid,mg)];
+ MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
if (!gs.members.empty()) {
// Allocate a memory buffer if group is monstrous
@@ -291,18 +293,22 @@ void Multicaster::send(
void Multicaster::clean(uint64_t now)
{
Mutex::Lock _l(_groups_m);
- for(std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::iterator mm(_groups.begin());mm!=_groups.end();) {
- for(std::list<OutboundMulticast>::iterator tx(mm->second.txQueue.begin());tx!=mm->second.txQueue.end();) {
+
+ Multicaster::Key *k = (Multicaster::Key *)0;
+ MulticastGroupStatus *s = (MulticastGroupStatus *)0;
+ Hashtable<Multicaster::Key,MulticastGroupStatus>::Iterator mm(_groups);
+ while (mm.next(k,s)) {
+ for(std::list<OutboundMulticast>::iterator tx(s->txQueue.begin());tx!=s->txQueue.end();) {
if ((tx->expired(now))||(tx->atLimit()))
- mm->second.txQueue.erase(tx++);
+ s->txQueue.erase(tx++);
else ++tx;
}
unsigned long count = 0;
{
- std::vector<MulticastGroupMember>::iterator reader(mm->second.members.begin());
+ std::vector<MulticastGroupMember>::iterator reader(s->members.begin());
std::vector<MulticastGroupMember>::iterator writer(reader);
- while (reader != mm->second.members.end()) {
+ while (reader != s->members.end()) {
if ((now - reader->timestamp) < ZT_MULTICAST_LIKE_EXPIRE) {
*writer = *reader;
++writer;
@@ -313,13 +319,11 @@ void Multicaster::clean(uint64_t now)
}
if (count) {
- mm->second.members.resize(count);
- ++mm;
- } else if (mm->second.txQueue.empty()) {
- _groups.erase(mm++);
+ s->members.resize(count);
+ } else if (s->txQueue.empty()) {
+ _groups.erase(*k);
} else {
- mm->second.members.clear();
- ++mm;
+ s->members.clear();
}
}
}
diff --git a/node/Multicaster.hpp b/node/Multicaster.hpp
index 0dd199f9..898c4db7 100644
--- a/node/Multicaster.hpp
+++ b/node/Multicaster.hpp
@@ -36,6 +36,7 @@
#include <list>
#include "Constants.hpp"
+#include "Hashtable.hpp"
#include "Address.hpp"
#include "MAC.hpp"
#include "MulticastGroup.hpp"
@@ -56,6 +57,18 @@ class Packet;
class Multicaster : NonCopyable
{
private:
+ struct Key
+ {
+ Key() : nwid(0),mg() {}
+ Key(uint64_t n,const MulticastGroup &g) : nwid(n),mg(g) {}
+
+ uint64_t nwid;
+ MulticastGroup mg;
+
+ inline bool operator==(const Key &k) const throw() { return ((nwid == k.nwid)&&(mg == k.mg)); }
+ inline unsigned long hashCode() const throw() { return (mg.hashCode() ^ (unsigned long)(nwid ^ (nwid >> 32))); }
+ };
+
struct MulticastGroupMember
{
MulticastGroupMember() {}
@@ -89,7 +102,7 @@ public:
inline void add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,const Address &member)
{
Mutex::Lock _l(_groups_m);
- _add(now,nwid,mg,_groups[std::pair<uint64_t,MulticastGroup>(nwid,mg)],member);
+ _add(now,nwid,mg,_groups[Multicaster::Key(nwid,mg)],member);
}
/**
@@ -181,7 +194,7 @@ private:
void _add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,MulticastGroupStatus &gs,const Address &member);
const RuntimeEnvironment *RR;
- std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus > _groups;
+ Hashtable<Multicaster::Key,MulticastGroupStatus> _groups;
Mutex _groups_m;
};
diff --git a/node/Network.cpp b/node/Network.cpp
index 39042fab..2b24d5f9 100644
--- a/node/Network.cpp
+++ b/node/Network.cpp
@@ -92,7 +92,7 @@ Network::Network(const RuntimeEnvironment *renv,uint64_t nwid) :
com.deserialize2(p,e);
if (!com)
break;
- _membershipCertificates.insert(std::pair< Address,CertificateOfMembership >(com.issuedTo(),com));
+ _certInfo[com.issuedTo()].com = com;
}
}
}
@@ -100,44 +100,47 @@ Network::Network(const RuntimeEnvironment *renv,uint64_t nwid) :
}
if (!_portInitialized) {
- ZT1_VirtualNetworkConfig ctmp;
+ ZT_VirtualNetworkConfig ctmp;
_externalConfig(&ctmp);
- _portError = RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
+ _portError = RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
_portInitialized = true;
}
}
Network::~Network()
{
- ZT1_VirtualNetworkConfig ctmp;
+ ZT_VirtualNetworkConfig ctmp;
_externalConfig(&ctmp);
char n[128];
if (_destroyed) {
- RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
+ RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.conf",_id);
RR->node->dataStoreDelete(n);
Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.mcerts",_id);
RR->node->dataStoreDelete(n);
} else {
- RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN,&ctmp);
+ RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN,&ctmp);
clean();
- std::string buf("ZTMCD0");
Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.mcerts",_id);
- Mutex::Lock _l(_lock);
- if ((!_config)||(_config->isPublic())||(_membershipCertificates.size() == 0)) {
+ Mutex::Lock _l(_lock);
+ if ((!_config)||(_config->isPublic())||(_certInfo.empty())) {
RR->node->dataStoreDelete(n);
- return;
+ } else {
+ std::string buf("ZTMCD0");
+ Hashtable< Address,_RemoteMemberCertificateInfo >::Iterator i(_certInfo);
+ Address *a = (Address *)0;
+ _RemoteMemberCertificateInfo *ci = (_RemoteMemberCertificateInfo *)0;
+ while (i.next(a,ci)) {
+ if (ci->com)
+ ci->com.serialize2(buf);
+ }
+ RR->node->dataStorePut(n,buf,true);
}
-
- for(std::map<Address,CertificateOfMembership>::iterator c(_membershipCertificates.begin());c!=_membershipCertificates.end();++c)
- c->second.serialize2(buf);
-
- RR->node->dataStorePut(n,buf,true);
}
}
@@ -147,7 +150,7 @@ bool Network::subscribedToMulticastGroup(const MulticastGroup &mg,bool includeBr
if (std::binary_search(_myMulticastGroups.begin(),_myMulticastGroups.end(),mg))
return true;
else if (includeBridgedGroups)
- return (_multicastGroupsBehindMe.find(mg) != _multicastGroupsBehindMe.end());
+ return _multicastGroupsBehindMe.contains(mg);
else return false;
}
@@ -181,7 +184,7 @@ bool Network::applyConfiguration(const SharedPtr<NetworkConfig> &conf)
return false;
try {
if ((conf->networkId() == _id)&&(conf->issuedTo() == RR->identity.address())) {
- ZT1_VirtualNetworkConfig ctmp;
+ ZT_VirtualNetworkConfig ctmp;
bool portInitialized;
{
Mutex::Lock _l(_lock);
@@ -192,7 +195,7 @@ bool Network::applyConfiguration(const SharedPtr<NetworkConfig> &conf)
portInitialized = _portInitialized;
_portInitialized = true;
}
- _portError = RR->node->configureVirtualNetworkPort(_id,(portInitialized) ? ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE : ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
+ _portError = RR->node->configureVirtualNetworkPort(_id,(portInitialized) ? ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE : ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
return true;
} else {
TRACE("ignored invalid configuration for network %.16llx (configuration contains mismatched network ID or issued-to address)",(unsigned long long)_id);
@@ -237,7 +240,7 @@ void Network::requestConfiguration()
if (RR->localNetworkController) {
SharedPtr<NetworkConfig> nconf(config2());
Dictionary newconf;
- switch(RR->localNetworkController->doNetworkConfigRequest(InetAddress(),RR->identity,RR->identity,_id,Dictionary(),(nconf) ? nconf->revision() : (uint64_t)0,newconf)) {
+ switch(RR->localNetworkController->doNetworkConfigRequest(InetAddress(),RR->identity,RR->identity,_id,Dictionary(),newconf)) {
case NetworkController::NETCONF_QUERY_OK:
this->setConfiguration(newconf,true);
return;
@@ -284,11 +287,12 @@ bool Network::validateAndAddMembershipCertificate(const CertificateOfMembership
return false;
Mutex::Lock _l(_lock);
- CertificateOfMembership &old = _membershipCertificates[cert.issuedTo()];
- // Nothing to do if the cert hasn't changed -- we get duplicates due to zealous cert pushing
- if (old == cert)
- return true; // but if it's a duplicate of one we already accepted, return is 'true'
+ {
+ const _RemoteMemberCertificateInfo *ci = _certInfo.get(cert.issuedTo());
+ if ((ci)&&(ci->com == cert))
+ return true; // we already have it
+ }
// Check signature, log and return if cert is invalid
if (cert.signedBy() != controller()) {
@@ -322,9 +326,8 @@ bool Network::validateAndAddMembershipCertificate(const CertificateOfMembership
}
}
- // If we made it past authentication, update cert
- if (cert.revision() != old.revision())
- old = cert;
+ // If we made it past authentication, add or update cert in our cert info store
+ _certInfo[cert.issuedTo()].com = cert;
return true;
}
@@ -333,9 +336,9 @@ bool Network::peerNeedsOurMembershipCertificate(const Address &to,uint64_t now)
{
Mutex::Lock _l(_lock);
if ((_config)&&(!_config->isPublic())&&(_config->com())) {
- uint64_t &lastPushed = _lastPushedMembershipCertificate[to];
- if ((now - lastPushed) > (ZT_NETWORK_AUTOCONF_DELAY / 2)) {
- lastPushed = now;
+ _RemoteMemberCertificateInfo &ci = _certInfo[to];
+ if ((now - ci.lastPushed) > (ZT_NETWORK_AUTOCONF_DELAY / 2)) {
+ ci.lastPushed = now;
return true;
}
}
@@ -352,31 +355,28 @@ void Network::clean()
if ((_config)&&(_config->isPublic())) {
// Open (public) networks do not track certs or cert pushes at all.
- _membershipCertificates.clear();
- _lastPushedMembershipCertificate.clear();
+ _certInfo.clear();
} else if (_config) {
- // Clean certificates that are no longer valid from the cache.
- for(std::map<Address,CertificateOfMembership>::iterator c=(_membershipCertificates.begin());c!=_membershipCertificates.end();) {
- if (_config->com().agreesWith(c->second))
- ++c;
- else _membershipCertificates.erase(c++);
- }
-
- // Clean entries from the last pushed tracking map if they're so old as
- // to be no longer relevant.
- uint64_t forgetIfBefore = now - (ZT_PEER_ACTIVITY_TIMEOUT * 16); // arbitrary reasonable cutoff
- for(std::map<Address,uint64_t>::iterator lp(_lastPushedMembershipCertificate.begin());lp!=_lastPushedMembershipCertificate.end();) {
- if (lp->second < forgetIfBefore)
- _lastPushedMembershipCertificate.erase(lp++);
- else ++lp;
+ // Clean obsolete entries from private network cert info table
+ Hashtable< Address,_RemoteMemberCertificateInfo >::Iterator i(_certInfo);
+ Address *a = (Address *)0;
+ _RemoteMemberCertificateInfo *ci = (_RemoteMemberCertificateInfo *)0;
+ const uint64_t forgetIfBefore = now - (ZT_PEER_ACTIVITY_TIMEOUT * 16); // arbitrary reasonable cutoff
+ while (i.next(a,ci)) {
+ if ((ci->lastPushed < forgetIfBefore)&&(!ci->com.agreesWith(_config->com())))
+ _certInfo.erase(*a);
}
}
// Clean learned multicast groups if we haven't heard from them in a while
- for(std::map<MulticastGroup,uint64_t>::iterator mg(_multicastGroupsBehindMe.begin());mg!=_multicastGroupsBehindMe.end();) {
- if ((now - mg->second) > (ZT_MULTICAST_LIKE_EXPIRE * 2))
- _multicastGroupsBehindMe.erase(mg++);
- else ++mg;
+ {
+ Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe);
+ MulticastGroup *mg = (MulticastGroup *)0;
+ uint64_t *ts = (uint64_t *)0;
+ while (i.next(mg,ts)) {
+ if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2))
+ _multicastGroupsBehindMe.erase(*mg);
+ }
}
}
@@ -385,22 +385,34 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr)
Mutex::Lock _l(_lock);
_remoteBridgeRoutes[mac] = addr;
- // If _remoteBridgeRoutes exceeds sanity limit, trim worst offenders until below -- denial of service circuit breaker
+ // Anti-DOS circuit breaker to prevent nodes from spamming us with absurd numbers of bridge routes
while (_remoteBridgeRoutes.size() > ZT_MAX_BRIDGE_ROUTES) {
- std::map<Address,unsigned long> counts;
+ Hashtable< Address,unsigned long > counts;
Address maxAddr;
unsigned long maxCount = 0;
- for(std::map<MAC,Address>::iterator br(_remoteBridgeRoutes.begin());br!=_remoteBridgeRoutes.end();++br) {
- unsigned long c = ++counts[br->second];
- if (c > maxCount) {
- maxCount = c;
- maxAddr = br->second;
+
+ MAC *m = (MAC *)0;
+ Address *a = (Address *)0;
+
+ // Find the address responsible for the most entries
+ {
+ Hashtable<MAC,Address>::Iterator i(_remoteBridgeRoutes);
+ while (i.next(m,a)) {
+ const unsigned long c = ++counts[*a];
+ if (c > maxCount) {
+ maxCount = c;
+ maxAddr = *a;
+ }
}
}
- for(std::map<MAC,Address>::iterator br(_remoteBridgeRoutes.begin());br!=_remoteBridgeRoutes.end();) {
- if (br->second == maxAddr)
- _remoteBridgeRoutes.erase(br++);
- else ++br;
+
+ // Kill this address from our table, since it's most likely spamming us
+ {
+ Hashtable<MAC,Address>::Iterator i(_remoteBridgeRoutes);
+ while (i.next(m,a)) {
+ if (*a == maxAddr)
+ _remoteBridgeRoutes.erase(*m);
+ }
}
}
}
@@ -408,8 +420,8 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr)
void Network::learnBridgedMulticastGroup(const MulticastGroup &mg,uint64_t now)
{
Mutex::Lock _l(_lock);
- unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size();
- _multicastGroupsBehindMe[mg] = now;
+ const unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size();
+ _multicastGroupsBehindMe.set(mg,now);
if (tmp != _multicastGroupsBehindMe.size())
_announceMulticastGroups();
}
@@ -419,9 +431,9 @@ void Network::setEnabled(bool enabled)
Mutex::Lock _l(_lock);
if (_enabled != enabled) {
_enabled = enabled;
- ZT1_VirtualNetworkConfig ctmp;
+ ZT_VirtualNetworkConfig ctmp;
_externalConfig(&ctmp);
- _portError = RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE,&ctmp);
+ _portError = RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE,&ctmp);
}
}
@@ -432,24 +444,24 @@ void Network::destroy()
_destroyed = true;
}
-ZT1_VirtualNetworkStatus Network::_status() const
+ZT_VirtualNetworkStatus Network::_status() const
{
// assumes _lock is locked
if (_portError)
- return ZT1_NETWORK_STATUS_PORT_ERROR;
+ return ZT_NETWORK_STATUS_PORT_ERROR;
switch(_netconfFailure) {
case NETCONF_FAILURE_ACCESS_DENIED:
- return ZT1_NETWORK_STATUS_ACCESS_DENIED;
+ return ZT_NETWORK_STATUS_ACCESS_DENIED;
case NETCONF_FAILURE_NOT_FOUND:
- return ZT1_NETWORK_STATUS_NOT_FOUND;
+ return ZT_NETWORK_STATUS_NOT_FOUND;
case NETCONF_FAILURE_NONE:
- return ((_config) ? ZT1_NETWORK_STATUS_OK : ZT1_NETWORK_STATUS_REQUESTING_CONFIGURATION);
+ return ((_config) ? ZT_NETWORK_STATUS_OK : ZT_NETWORK_STATUS_REQUESTING_CONFIGURATION);
default:
- return ZT1_NETWORK_STATUS_PORT_ERROR;
+ return ZT_NETWORK_STATUS_PORT_ERROR;
}
}
-void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
+void Network::_externalConfig(ZT_VirtualNetworkConfig *ec) const
{
// assumes _lock is locked
ec->nwid = _id;
@@ -458,7 +470,7 @@ void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
Utils::scopy(ec->name,sizeof(ec->name),_config->name().c_str());
else ec->name[0] = (char)0;
ec->status = _status();
- ec->type = (_config) ? (_config->isPrivate() ? ZT1_NETWORK_TYPE_PRIVATE : ZT1_NETWORK_TYPE_PUBLIC) : ZT1_NETWORK_TYPE_PRIVATE;
+ ec->type = (_config) ? (_config->isPrivate() ? ZT_NETWORK_TYPE_PRIVATE : ZT_NETWORK_TYPE_PUBLIC) : ZT_NETWORK_TYPE_PRIVATE;
ec->mtu = ZT_IF_MTU;
ec->dhcp = 0;
ec->bridge = (_config) ? ((_config->allowPassiveBridging() || (std::find(_config->activeBridges().begin(),_config->activeBridges().end(),RR->identity.address()) != _config->activeBridges().end())) ? 1 : 0) : 0;
@@ -467,7 +479,7 @@ void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
ec->enabled = (_enabled) ? 1 : 0;
ec->netconfRevision = (_config) ? (unsigned long)_config->revision() : 0;
- ec->multicastSubscriptionCount = std::min((unsigned int)_myMulticastGroups.size(),(unsigned int)ZT1_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS);
+ ec->multicastSubscriptionCount = std::min((unsigned int)_myMulticastGroups.size(),(unsigned int)ZT_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS);
for(unsigned int i=0;i<ec->multicastSubscriptionCount;++i) {
ec->multicastSubscriptions[i].mac = _myMulticastGroups[i].mac().toInt();
ec->multicastSubscriptions[i].adi = _myMulticastGroups[i].adi();
@@ -475,7 +487,7 @@ void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
if (_config) {
ec->assignedAddressCount = (unsigned int)_config->staticIps().size();
- for(unsigned long i=0;i<ZT1_MAX_ZT_ASSIGNED_ADDRESSES;++i) {
+ for(unsigned long i=0;i<ZT_MAX_ZT_ASSIGNED_ADDRESSES;++i) {
if (i < _config->staticIps().size())
memcpy(&(ec->assignedAddresses[i]),&(_config->staticIps()[i]),sizeof(struct sockaddr_storage));
}
@@ -490,12 +502,10 @@ bool Network::_isAllowed(const Address &peer) const
return false;
if (_config->isPublic())
return true;
-
- std::map<Address,CertificateOfMembership>::const_iterator pc(_membershipCertificates.find(peer));
- if (pc == _membershipCertificates.end())
- return false; // no certificate on file
-
- return _config->com().agreesWith(pc->second); // is other cert valid against ours?
+ const _RemoteMemberCertificateInfo *ci = _certInfo.get(peer);
+ if (!ci)
+ return false;
+ return _config->com().agreesWith(ci->com);
} catch (std::exception &exc) {
TRACE("isAllowed() check failed for peer %s: unexpected exception: %s",peer.toString().c_str(),exc.what());
} catch ( ... ) {
@@ -510,8 +520,7 @@ std::vector<MulticastGroup> Network::_allMulticastGroups() const
std::vector<MulticastGroup> mgs;
mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1);
mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end());
- for(std::map< MulticastGroup,uint64_t >::const_iterator i(_multicastGroupsBehindMe.begin());i!=_multicastGroupsBehindMe.end();++i)
- mgs.push_back(i->first);
+ _multicastGroupsBehindMe.appendKeys(mgs);
if ((_config)&&(_config->enableBroadcast()))
mgs.push_back(Network::BROADCAST);
std::sort(mgs.begin(),mgs.end());
diff --git a/node/Network.hpp b/node/Network.hpp
index d7320d46..ad9f18de 100644
--- a/node/Network.hpp
+++ b/node/Network.hpp
@@ -40,6 +40,7 @@
#include "Constants.hpp"
#include "NonCopyable.hpp"
+#include "Hashtable.hpp"
#include "Address.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
@@ -221,7 +222,7 @@ public:
/**
* @return Status of this network
*/
- inline ZT1_VirtualNetworkStatus status() const
+ inline ZT_VirtualNetworkStatus status() const
{
Mutex::Lock _l(_lock);
return _status();
@@ -230,7 +231,7 @@ public:
/**
* @param ec Buffer to fill with externally-visible network configuration
*/
- inline void externalConfig(ZT1_VirtualNetworkConfig *ec) const
+ inline void externalConfig(ZT_VirtualNetworkConfig *ec) const
{
Mutex::Lock _l(_lock);
_externalConfig(ec);
@@ -297,10 +298,10 @@ public:
inline Address findBridgeTo(const MAC &mac) const
{
Mutex::Lock _l(_lock);
- std::map<MAC,Address>::const_iterator br(_remoteBridgeRoutes.find(mac));
- if (br == _remoteBridgeRoutes.end())
- return Address();
- return br->second;
+ const Address *const br = _remoteBridgeRoutes.get(mac);
+ if (br)
+ return *br;
+ return Address();
}
/**
@@ -346,8 +347,15 @@ public:
inline bool operator>=(const Network &n) const throw() { return (_id >= n._id); }
private:
- ZT1_VirtualNetworkStatus _status() const;
- void _externalConfig(ZT1_VirtualNetworkConfig *ec) const; // assumes _lock is locked
+ struct _RemoteMemberCertificateInfo
+ {
+ _RemoteMemberCertificateInfo() : com(),lastPushed(0) {}
+ CertificateOfMembership com; // remote member's COM
+ uint64_t lastPushed; // when did we last push ours to them?
+ };
+
+ ZT_VirtualNetworkStatus _status() const;
+ void _externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
bool _isAllowed(const Address &peer) const;
void _announceMulticastGroups();
std::vector<MulticastGroup> _allMulticastGroups() const;
@@ -358,13 +366,11 @@ private:
volatile bool _enabled;
volatile bool _portInitialized;
- std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to including those behind us (updated periodically)
- std::map< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each
-
- std::map<MAC,Address> _remoteBridgeRoutes; // remote addresses where given MACs are reachable
+ std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to (according to tap)
+ Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we last saw them (if we are a bridge)
+ Hashtable< MAC,Address > _remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices behind remote bridges)
- std::map<Address,CertificateOfMembership> _membershipCertificates; // Other members' certificates of membership
- std::map<Address,uint64_t> _lastPushedMembershipCertificate; // When did we last push our certificate to each remote member?
+ Hashtable< Address,_RemoteMemberCertificateInfo > _certInfo;
SharedPtr<NetworkConfig> _config; // Most recent network configuration, which is an immutable value-object
volatile uint64_t _lastConfigUpdate;
diff --git a/node/NetworkConfig.cpp b/node/NetworkConfig.cpp
index 7898646c..e46da4a4 100644
--- a/node/NetworkConfig.cpp
+++ b/node/NetworkConfig.cpp
@@ -108,7 +108,7 @@ void NetworkConfig::_fromDictionary(const Dictionary &d)
_private = (Utils::hexStrToUInt(d.get(ZT_NETWORKCONFIG_DICT_KEY_PRIVATE,one).c_str()) != 0);
_enableBroadcast = (Utils::hexStrToUInt(d.get(ZT_NETWORKCONFIG_DICT_KEY_ENABLE_BROADCAST,one).c_str()) != 0);
_name = d.get(ZT_NETWORKCONFIG_DICT_KEY_NAME);
- if (_name.length() > ZT1_MAX_NETWORK_SHORT_NAME_LENGTH)
+ if (_name.length() > ZT_MAX_NETWORK_SHORT_NAME_LENGTH)
throw std::invalid_argument("network short name too long (max: 255 characters)");
// In dictionary IPs are split into V4 and V6 addresses, but we don't really
@@ -142,8 +142,8 @@ void NetworkConfig::_fromDictionary(const Dictionary &d)
_localRoutes.push_back(addr);
else _staticIps.push_back(addr);
}
- if (_localRoutes.size() > ZT1_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned routes");
- if (_staticIps.size() > ZT1_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned IP addresses");
+ if (_localRoutes.size() > ZT_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned routes");
+ if (_staticIps.size() > ZT_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned IP addresses");
std::sort(_localRoutes.begin(),_localRoutes.end());
_localRoutes.erase(std::unique(_localRoutes.begin(),_localRoutes.end()),_localRoutes.end());
std::sort(_staticIps.begin(),_staticIps.end());
diff --git a/node/NetworkController.hpp b/node/NetworkController.hpp
index ee481a62..cf327d7e 100644
--- a/node/NetworkController.hpp
+++ b/node/NetworkController.hpp
@@ -75,7 +75,6 @@ public:
* @param identity Originating peer ZeroTier identity
* @param nwid 64-bit network ID
* @param metaData Meta-data bundled with request (empty if none)
- * @param haveRevision Network revision ID sent by requesting peer or 0 if none
* @param result Dictionary to receive resulting signed netconf on success
* @return Returns NETCONF_QUERY_OK if result dictionary is valid, or an error code on error
*/
@@ -85,7 +84,6 @@ public:
const Identity &identity,
uint64_t nwid,
const Dictionary &metaData,
- uint64_t haveRevision,
Dictionary &result) = 0;
};
diff --git a/node/Node.cpp b/node/Node.cpp
index 534c085d..6dc83d4e 100644
--- a/node/Node.cpp
+++ b/node/Node.cpp
@@ -48,6 +48,8 @@
#include "SelfAwareness.hpp"
#include "Defaults.hpp"
+const struct sockaddr_storage ZT_SOCKADDR_NULL = {0};
+
namespace ZeroTier {
/****************************************************************************/
@@ -57,12 +59,12 @@ namespace ZeroTier {
Node::Node(
uint64_t now,
void *uptr,
- ZT1_DataStoreGetFunction dataStoreGetFunction,
- ZT1_DataStorePutFunction dataStorePutFunction,
- ZT1_WirePacketSendFunction wirePacketSendFunction,
- ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
- ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
- ZT1_EventCallback eventCallback,
+ ZT_DataStoreGetFunction dataStoreGetFunction,
+ ZT_DataStorePutFunction dataStorePutFunction,
+ ZT_WirePacketSendFunction wirePacketSendFunction,
+ ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
+ ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
+ ZT_EventCallback eventCallback,
const char *overrideRootTopology) :
_RR(this),
RR(&_RR),
@@ -141,7 +143,7 @@ Node::Node(
}
RR->topology->setRootServers(Dictionary(rt.get("rootservers","")));
- postEvent(ZT1_EVENT_UP);
+ postEvent(ZT_EVENT_UP);
}
Node::~Node()
@@ -155,19 +157,20 @@ Node::~Node()
delete RR->sw;
}
-ZT1_ResultCode Node::processWirePacket(
+ZT_ResultCode Node::processWirePacket(
uint64_t now,
+ const struct sockaddr_storage *localAddress,
const struct sockaddr_storage *remoteAddress,
const void *packetData,
unsigned int packetLength,
volatile uint64_t *nextBackgroundTaskDeadline)
{
_now = now;
- RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
- return ZT1_RESULT_OK;
+ RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(localAddress)),*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
+ return ZT_RESULT_OK;
}
-ZT1_ResultCode Node::processVirtualNetworkFrame(
+ZT_ResultCode Node::processVirtualNetworkFrame(
uint64_t now,
uint64_t nwid,
uint64_t sourceMac,
@@ -182,8 +185,8 @@ ZT1_ResultCode Node::processVirtualNetworkFrame(
SharedPtr<Network> nw(this->network(nwid));
if (nw) {
RR->sw->onLocalEthernet(nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
- return ZT1_RESULT_OK;
- } else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
+ return ZT_RESULT_OK;
+ } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
class _PingPeersThatNeedPing
@@ -227,12 +230,14 @@ private:
std::vector<Address> _rootAddresses;
};
-ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
+ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
{
_now = now;
Mutex::Lock bl(_backgroundTasksLock);
- if ((now - _lastPingCheck) >= ZT_PING_CHECK_INVERVAL) {
+ unsigned long timeUntilNextPingCheck = ZT_PING_CHECK_INVERVAL;
+ const uint64_t timeSinceLastPingCheck = now - _lastPingCheck;
+ if (timeSinceLastPingCheck >= ZT_PING_CHECK_INVERVAL) {
try {
_lastPingCheck = now;
@@ -261,7 +266,7 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
if (nr->second) {
SharedPtr<Peer> rp(RR->topology->getPeer(nr->first));
if ((rp)&&(!rp->hasActiveDirectPath(now)))
- rp->attemptToContactAt(RR,nr->second,now);
+ rp->attemptToContactAt(RR,InetAddress(),nr->second,now);
}
}
@@ -273,10 +278,12 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
bool oldOnline = _online;
_online = ((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT);
if (oldOnline != _online)
- postEvent(_online ? ZT1_EVENT_ONLINE : ZT1_EVENT_OFFLINE);
+ postEvent(_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
+ } else {
+ timeUntilNextPingCheck -= (unsigned long)timeSinceLastPingCheck;
}
if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
@@ -286,30 +293,30 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
RR->sa->clean(now);
RR->mc->clean(now);
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
try {
- *nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min((unsigned long)ZT_PING_CHECK_INVERVAL,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
+ *nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
- return ZT1_RESULT_OK;
+ return ZT_RESULT_OK;
}
-ZT1_ResultCode Node::join(uint64_t nwid)
+ZT_ResultCode Node::join(uint64_t nwid)
{
Mutex::Lock _l(_networks_m);
SharedPtr<Network> nw = _network(nwid);
if(!nw)
_networks.push_back(std::pair< uint64_t,SharedPtr<Network> >(nwid,SharedPtr<Network>(new Network(RR,nwid))));
std::sort(_networks.begin(),_networks.end()); // will sort by nwid since it's the first in a pair<>
- return ZT1_RESULT_OK;
+ return ZT_RESULT_OK;
}
-ZT1_ResultCode Node::leave(uint64_t nwid)
+ZT_ResultCode Node::leave(uint64_t nwid)
{
std::vector< std::pair< uint64_t,SharedPtr<Network> > > newn;
Mutex::Lock _l(_networks_m);
@@ -319,25 +326,25 @@ ZT1_ResultCode Node::leave(uint64_t nwid)
else n->second->destroy();
}
_networks.swap(newn);
- return ZT1_RESULT_OK;
+ return ZT_RESULT_OK;
}
-ZT1_ResultCode Node::multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
+ZT_ResultCode Node::multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
{
SharedPtr<Network> nw(this->network(nwid));
if (nw) {
nw->multicastSubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
- return ZT1_RESULT_OK;
- } else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
+ return ZT_RESULT_OK;
+ } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
-ZT1_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
+ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
{
SharedPtr<Network> nw(this->network(nwid));
if (nw) {
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
- return ZT1_RESULT_OK;
- } else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
+ return ZT_RESULT_OK;
+ } else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
uint64_t Node::address() const
@@ -345,7 +352,7 @@ uint64_t Node::address() const
return RR->identity.address().toInt();
}
-void Node::status(ZT1_NodeStatus *status) const
+void Node::status(ZT_NodeStatus *status) const
{
status->address = RR->identity.address().toInt();
status->publicIdentity = RR->publicIdentityStr.c_str();
@@ -353,19 +360,20 @@ void Node::status(ZT1_NodeStatus *status) const
status->online = _online ? 1 : 0;
}
-ZT1_PeerList *Node::peers() const
+ZT_PeerList *Node::peers() const
{
- std::map< Address,SharedPtr<Peer> > peers(RR->topology->allPeers());
+ std::vector< std::pair< Address,SharedPtr<Peer> > > peers(RR->topology->allPeers());
+ std::sort(peers.begin(),peers.end());
- char *buf = (char *)::malloc(sizeof(ZT1_PeerList) + (sizeof(ZT1_Peer) * peers.size()));
+ char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
if (!buf)
- return (ZT1_PeerList *)0;
- ZT1_PeerList *pl = (ZT1_PeerList *)buf;
- pl->peers = (ZT1_Peer *)(buf + sizeof(ZT1_PeerList));
+ return (ZT_PeerList *)0;
+ ZT_PeerList *pl = (ZT_PeerList *)buf;
+ pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
pl->peerCount = 0;
- for(std::map< Address,SharedPtr<Peer> >::iterator pi(peers.begin());pi!=peers.end();++pi) {
- ZT1_Peer *p = &(pl->peers[pl->peerCount++]);
+ for(std::vector< std::pair< Address,SharedPtr<Peer> > >::iterator pi(peers.begin());pi!=peers.end();++pi) {
+ ZT_Peer *p = &(pl->peers[pl->peerCount++]);
p->address = pi->second->address().toInt();
p->lastUnicastFrame = pi->second->lastUnicastFrame();
p->lastMulticastFrame = pi->second->lastMulticastFrame();
@@ -379,7 +387,7 @@ ZT1_PeerList *Node::peers() const
p->versionRev = -1;
}
p->latency = pi->second->latency();
- p->role = RR->topology->isRoot(pi->second->identity()) ? ZT1_PEER_ROLE_ROOT : ZT1_PEER_ROLE_LEAF;
+ p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_ROOT : ZT_PEER_ROLE_LEAF;
std::vector<RemotePath> paths(pi->second->paths());
RemotePath *bestPath = pi->second->getBestPath(_now);
@@ -398,27 +406,27 @@ ZT1_PeerList *Node::peers() const
return pl;
}
-ZT1_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
+ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
{
Mutex::Lock _l(_networks_m);
SharedPtr<Network> nw = _network(nwid);
if(nw) {
- ZT1_VirtualNetworkConfig *nc = (ZT1_VirtualNetworkConfig *)::malloc(sizeof(ZT1_VirtualNetworkConfig));
+ ZT_VirtualNetworkConfig *nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
nw->externalConfig(nc);
return nc;
}
- return (ZT1_VirtualNetworkConfig *)0;
+ return (ZT_VirtualNetworkConfig *)0;
}
-ZT1_VirtualNetworkList *Node::networks() const
+ZT_VirtualNetworkList *Node::networks() const
{
Mutex::Lock _l(_networks_m);
- char *buf = (char *)::malloc(sizeof(ZT1_VirtualNetworkList) + (sizeof(ZT1_VirtualNetworkConfig) * _networks.size()));
+ char *buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * _networks.size()));
if (!buf)
- return (ZT1_VirtualNetworkList *)0;
- ZT1_VirtualNetworkList *nl = (ZT1_VirtualNetworkList *)buf;
- nl->networks = (ZT1_VirtualNetworkConfig *)(buf + sizeof(ZT1_VirtualNetworkList));
+ return (ZT_VirtualNetworkList *)0;
+ ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
+ nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
nl->networkCount = 0;
for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n)
@@ -433,7 +441,7 @@ void Node::freeQueryResult(void *qr)
::free(qr);
}
-int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust)
+int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust)
{
if (Path::isAddressValidForPath(*(reinterpret_cast<const InetAddress *>(addr)))) {
Mutex::Lock _l(_directPaths_m);
@@ -466,7 +474,7 @@ std::string Node::dataStoreGet(const char *name)
std::string r;
unsigned long olen = 0;
do {
- long n = _dataStoreGetFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,name,buf,sizeof(buf),(unsigned long)r.length(),&olen);
+ long n = _dataStoreGetFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,buf,sizeof(buf),(unsigned long)r.length(),&olen);
if (n <= 0)
return std::string();
r.append(buf,n);
@@ -480,7 +488,7 @@ void Node::postNewerVersionIfNewer(unsigned int major,unsigned int minor,unsigne
_newestVersionSeen[0] = major;
_newestVersionSeen[1] = minor;
_newestVersionSeen[2] = rev;
- this->postEvent(ZT1_EVENT_SAW_MORE_RECENT_VERSION,(const void *)_newestVersionSeen);
+ this->postEvent(ZT_EVENT_SAW_MORE_RECENT_VERSION,(const void *)_newestVersionSeen);
}
}
@@ -513,7 +521,7 @@ void Node::postTrace(const char *module,unsigned int line,const char *fmt,...)
tmp2[sizeof(tmp2)-1] = (char)0;
Utils::snprintf(tmp1,sizeof(tmp1),"[%s] %s:%u %s",nowstr,module,line,tmp2);
- postEvent(ZT1_EVENT_TRACE,tmp1);
+ postEvent(ZT_EVENT_TRACE,tmp1);
}
#endif // ZT_TRACE
@@ -533,58 +541,59 @@ uint64_t Node::prng()
extern "C" {
-enum ZT1_ResultCode ZT1_Node_new(
- ZT1_Node **node,
+enum ZT_ResultCode ZT_Node_new(
+ ZT_Node **node,
void *uptr,
uint64_t now,
- ZT1_DataStoreGetFunction dataStoreGetFunction,
- ZT1_DataStorePutFunction dataStorePutFunction,
- ZT1_WirePacketSendFunction wirePacketSendFunction,
- ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
- ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
- ZT1_EventCallback eventCallback,
+ ZT_DataStoreGetFunction dataStoreGetFunction,
+ ZT_DataStorePutFunction dataStorePutFunction,
+ ZT_WirePacketSendFunction wirePacketSendFunction,
+ ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
+ ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
+ ZT_EventCallback eventCallback,
const char *overrideRootTopology)
{
- *node = (ZT1_Node *)0;
+ *node = (ZT_Node *)0;
try {
- *node = reinterpret_cast<ZT1_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,eventCallback,overrideRootTopology));
- return ZT1_RESULT_OK;
+ *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,eventCallback,overrideRootTopology));
+ return ZT_RESULT_OK;
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch (std::runtime_error &exc) {
- return ZT1_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
+ return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-void ZT1_Node_delete(ZT1_Node *node)
+void ZT_Node_delete(ZT_Node *node)
{
try {
delete (reinterpret_cast<ZeroTier::Node *>(node));
} catch ( ... ) {}
}
-enum ZT1_ResultCode ZT1_Node_processWirePacket(
- ZT1_Node *node,
+enum ZT_ResultCode ZT_Node_processWirePacket(
+ ZT_Node *node,
uint64_t now,
+ const struct sockaddr_storage *localAddress,
const struct sockaddr_storage *remoteAddress,
const void *packetData,
unsigned int packetLength,
volatile uint64_t *nextBackgroundTaskDeadline)
{
try {
- return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,remoteAddress,packetData,packetLength,nextBackgroundTaskDeadline);
+ return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,localAddress,remoteAddress,packetData,packetLength,nextBackgroundTaskDeadline);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- reinterpret_cast<ZeroTier::Node *>(node)->postEvent(ZT1_EVENT_INVALID_PACKET,(const void *)remoteAddress);
- return ZT1_RESULT_OK;
+ reinterpret_cast<ZeroTier::Node *>(node)->postEvent(ZT_EVENT_INVALID_PACKET,(const void *)remoteAddress);
+ return ZT_RESULT_OK;
}
}
-enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
- ZT1_Node *node,
+enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
+ ZT_Node *node,
uint64_t now,
uint64_t nwid,
uint64_t sourceMac,
@@ -598,121 +607,121 @@ enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
try {
return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(now,nwid,sourceMac,destMac,etherType,vlanId,frameData,frameLength,nextBackgroundTaskDeadline);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
+enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(now,nextBackgroundTaskDeadline);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-enum ZT1_ResultCode ZT1_Node_join(ZT1_Node *node,uint64_t nwid)
+enum ZT_ResultCode ZT_Node_join(ZT_Node *node,uint64_t nwid)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-enum ZT1_ResultCode ZT1_Node_leave(ZT1_Node *node,uint64_t nwid)
+enum ZT_ResultCode ZT_Node_leave(ZT_Node *node,uint64_t nwid)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-enum ZT1_ResultCode ZT1_Node_multicastSubscribe(ZT1_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
+enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(nwid,multicastGroup,multicastAdi);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-enum ZT1_ResultCode ZT1_Node_multicastUnsubscribe(ZT1_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
+enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(nwid,multicastGroup,multicastAdi);
} catch (std::bad_alloc &exc) {
- return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
+ return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- return ZT1_RESULT_FATAL_ERROR_INTERNAL;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-uint64_t ZT1_Node_address(ZT1_Node *node)
+uint64_t ZT_Node_address(ZT_Node *node)
{
return reinterpret_cast<ZeroTier::Node *>(node)->address();
}
-void ZT1_Node_status(ZT1_Node *node,ZT1_NodeStatus *status)
+void ZT_Node_status(ZT_Node *node,ZT_NodeStatus *status)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->status(status);
} catch ( ... ) {}
}
-ZT1_PeerList *ZT1_Node_peers(ZT1_Node *node)
+ZT_PeerList *ZT_Node_peers(ZT_Node *node)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->peers();
} catch ( ... ) {
- return (ZT1_PeerList *)0;
+ return (ZT_PeerList *)0;
}
}
-ZT1_VirtualNetworkConfig *ZT1_Node_networkConfig(ZT1_Node *node,uint64_t nwid)
+ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node,uint64_t nwid)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
} catch ( ... ) {
- return (ZT1_VirtualNetworkConfig *)0;
+ return (ZT_VirtualNetworkConfig *)0;
}
}
-ZT1_VirtualNetworkList *ZT1_Node_networks(ZT1_Node *node)
+ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->networks();
} catch ( ... ) {
- return (ZT1_VirtualNetworkList *)0;
+ return (ZT_VirtualNetworkList *)0;
}
}
-void ZT1_Node_freeQueryResult(ZT1_Node *node,void *qr)
+void ZT_Node_freeQueryResult(ZT_Node *node,void *qr)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->freeQueryResult(qr);
} catch ( ... ) {}
}
-void ZT1_Node_setNetconfMaster(ZT1_Node *node,void *networkControllerInstance)
+void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->setNetconfMaster(networkControllerInstance);
} catch ( ... ) {}
}
-int ZT1_Node_addLocalInterfaceAddress(ZT1_Node *node,const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust)
+int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->addLocalInterfaceAddress(addr,metric,trust);
@@ -721,21 +730,21 @@ int ZT1_Node_addLocalInterfaceAddress(ZT1_Node *node,const struct sockaddr_stora
}
}
-void ZT1_Node_clearLocalInterfaceAddresses(ZT1_Node *node)
+void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->clearLocalInterfaceAddresses();
} catch ( ... ) {}
}
-void ZT1_version(int *major,int *minor,int *revision,unsigned long *featureFlags)
+void ZT_version(int *major,int *minor,int *revision,unsigned long *featureFlags)
{
if (major) *major = ZEROTIER_ONE_VERSION_MAJOR;
if (minor) *minor = ZEROTIER_ONE_VERSION_MINOR;
if (revision) *revision = ZEROTIER_ONE_VERSION_REVISION;
if (featureFlags) {
*featureFlags = (
- ZT1_FEATURE_FLAG_THREAD_SAFE
+ ZT_FEATURE_FLAG_THREAD_SAFE
);
}
}
diff --git a/node/Node.hpp b/node/Node.hpp
index 2a283eab..b81c1943 100644
--- a/node/Node.hpp
+++ b/node/Node.hpp
@@ -58,7 +58,7 @@ namespace ZeroTier {
/**
* Implementation of Node object as defined in CAPI
*
- * The pointer returned by ZT1_Node_new() is an instance of this class.
+ * The pointer returned by ZT_Node_new() is an instance of this class.
*/
class Node
{
@@ -66,25 +66,26 @@ public:
Node(
uint64_t now,
void *uptr,
- ZT1_DataStoreGetFunction dataStoreGetFunction,
- ZT1_DataStorePutFunction dataStorePutFunction,
- ZT1_WirePacketSendFunction wirePacketSendFunction,
- ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
- ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
- ZT1_EventCallback eventCallback,
+ ZT_DataStoreGetFunction dataStoreGetFunction,
+ ZT_DataStorePutFunction dataStorePutFunction,
+ ZT_WirePacketSendFunction wirePacketSendFunction,
+ ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
+ ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
+ ZT_EventCallback eventCallback,
const char *overrideRootTopology);
~Node();
// Public API Functions ----------------------------------------------------
- ZT1_ResultCode processWirePacket(
+ ZT_ResultCode processWirePacket(
uint64_t now,
+ const struct sockaddr_storage *localAddress,
const struct sockaddr_storage *remoteAddress,
const void *packetData,
unsigned int packetLength,
volatile uint64_t *nextBackgroundTaskDeadline);
- ZT1_ResultCode processVirtualNetworkFrame(
+ ZT_ResultCode processVirtualNetworkFrame(
uint64_t now,
uint64_t nwid,
uint64_t sourceMac,
@@ -94,18 +95,18 @@ public:
const void *frameData,
unsigned int frameLength,
volatile uint64_t *nextBackgroundTaskDeadline);
- ZT1_ResultCode processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline);
- ZT1_ResultCode join(uint64_t nwid);
- ZT1_ResultCode leave(uint64_t nwid);
- ZT1_ResultCode multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
- ZT1_ResultCode multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
+ ZT_ResultCode processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline);
+ ZT_ResultCode join(uint64_t nwid);
+ ZT_ResultCode leave(uint64_t nwid);
+ ZT_ResultCode multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
+ ZT_ResultCode multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
uint64_t address() const;
- void status(ZT1_NodeStatus *status) const;
- ZT1_PeerList *peers() const;
- ZT1_VirtualNetworkConfig *networkConfig(uint64_t nwid) const;
- ZT1_VirtualNetworkList *networks() const;
+ void status(ZT_NodeStatus *status) const;
+ ZT_PeerList *peers() const;
+ ZT_VirtualNetworkConfig *networkConfig(uint64_t nwid) const;
+ ZT_VirtualNetworkList *networks() const;
void freeQueryResult(void *qr);
- int addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust);
+ int addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust);
void clearLocalInterfaceAddresses();
void setNetconfMaster(void *networkControllerInstance);
@@ -119,16 +120,18 @@ public:
/**
* Enqueue a ZeroTier message to be sent
*
+ * @param localAddress Local address
* @param addr Destination address
* @param data Packet data
* @param len Packet length
* @return True if packet appears to have been sent
*/
- inline bool putPacket(const InetAddress &addr,const void *data,unsigned int len)
+ inline bool putPacket(const InetAddress &localAddress,const InetAddress &addr,const void *data,unsigned int len)
{
return (_wirePacketSendFunction(
- reinterpret_cast<ZT1_Node *>(this),
+ reinterpret_cast<ZT_Node *>(this),
_uPtr,
+ reinterpret_cast<const struct sockaddr_storage *>(&localAddress),
reinterpret_cast<const struct sockaddr_storage *>(&addr),
data,
len) == 0);
@@ -148,7 +151,7 @@ public:
inline void putFrame(uint64_t nwid,const MAC &source,const MAC &dest,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
{
_virtualNetworkFrameFunction(
- reinterpret_cast<ZT1_Node *>(this),
+ reinterpret_cast<ZT_Node *>(this),
_uPtr,
nwid,
source.toInt(),
@@ -184,9 +187,9 @@ public:
return _directPaths;
}
- inline bool dataStorePut(const char *name,const void *data,unsigned int len,bool secure) { return (_dataStorePutFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,name,data,len,(int)secure) == 0); }
+ inline bool dataStorePut(const char *name,const void *data,unsigned int len,bool secure) { return (_dataStorePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,data,len,(int)secure) == 0); }
inline bool dataStorePut(const char *name,const std::string &data,bool secure) { return dataStorePut(name,(const void *)data.data(),(unsigned int)data.length(),secure); }
- inline void dataStoreDelete(const char *name) { _dataStorePutFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,name,(const void *)0,0,0); }
+ inline void dataStoreDelete(const char *name) { _dataStorePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,(const void *)0,0,0); }
std::string dataStoreGet(const char *name);
/**
@@ -195,7 +198,7 @@ public:
* @param ev Event type
* @param md Meta-data (default: NULL/none)
*/
- inline void postEvent(ZT1_Event ev,const void *md = (const void *)0) { _eventCallback(reinterpret_cast<ZT1_Node *>(this),_uPtr,ev,md); }
+ inline void postEvent(ZT_Event ev,const void *md = (const void *)0) { _eventCallback(reinterpret_cast<ZT_Node *>(this),_uPtr,ev,md); }
/**
* Update virtual network port configuration
@@ -204,7 +207,7 @@ public:
* @param op Configuration operation
* @param nc Network configuration
*/
- inline int configureVirtualNetworkPort(uint64_t nwid,ZT1_VirtualNetworkConfigOperation op,const ZT1_VirtualNetworkConfig *nc) { return _virtualNetworkConfigFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,nwid,op,nc); }
+ inline int configureVirtualNetworkPort(uint64_t nwid,ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nc) { return _virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,nwid,op,nc); }
/**
* @return True if we appear to be online
@@ -241,12 +244,12 @@ private:
void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
- ZT1_DataStoreGetFunction _dataStoreGetFunction;
- ZT1_DataStorePutFunction _dataStorePutFunction;
- ZT1_WirePacketSendFunction _wirePacketSendFunction;
- ZT1_VirtualNetworkFrameFunction _virtualNetworkFrameFunction;
- ZT1_VirtualNetworkConfigFunction _virtualNetworkConfigFunction;
- ZT1_EventCallback _eventCallback;
+ ZT_DataStoreGetFunction _dataStoreGetFunction;
+ ZT_DataStorePutFunction _dataStorePutFunction;
+ ZT_WirePacketSendFunction _wirePacketSendFunction;
+ ZT_VirtualNetworkFrameFunction _virtualNetworkFrameFunction;
+ ZT_VirtualNetworkConfigFunction _virtualNetworkConfigFunction;
+ ZT_EventCallback _eventCallback;
std::vector< std::pair< uint64_t, SharedPtr<Network> > > _networks;
Mutex _networks_m;
diff --git a/node/Path.hpp b/node/Path.hpp
index 0e53772d..3fa06b58 100644
--- a/node/Path.hpp
+++ b/node/Path.hpp
@@ -57,7 +57,7 @@ public:
* Nearly all paths will be normal trust. The other levels are for high
* performance local SDN use only.
*
- * These values MUST match ZT1_LocalInterfaceAddressTrust in ZeroTierOne.h
+ * These values MUST match ZT_LocalInterfaceAddressTrust in ZeroTierOne.h
*/
enum Trust
{
@@ -93,7 +93,16 @@ public:
/**
* @return Preference rank, higher == better
*/
- inline int preferenceRank() const throw() { return (int)_ipScope; } // IP scopes are in ascending rank order in InetAddress.hpp
+ inline int preferenceRank() const throw()
+ {
+ // First, since the scope enum values in InetAddress.hpp are in order of
+ // use preference rank, we take that. Then we multiple by two, yielding
+ // a sequence like 0, 2, 4, 6, etc. Then if it's IPv6 we add one. This
+ // makes IPv6 addresses of a given scope outrank IPv4 addresses of the
+ // same scope -- e.g. 1 outranks 0. This makes us prefer IPv6, but not
+ // if the address scope/class is of a fundamentally lower rank.
+ return ( ((int)_ipScope * 2) + ((_addr.ss_family == AF_INET6) ? 1 : 0) );
+ }
/**
* @return Path trust level
diff --git a/node/Peer.cpp b/node/Peer.cpp
index c27afa8f..48b77c85 100644
--- a/node/Peer.cpp
+++ b/node/Peer.cpp
@@ -39,6 +39,9 @@
namespace ZeroTier {
+// Used to send varying values for NAT keepalive
+static uint32_t _natKeepaliveBuf = 0;
+
Peer::Peer(const Identity &myIdentity,const Identity &peerIdentity)
throw(std::runtime_error) :
_lastUsed(0),
@@ -61,6 +64,7 @@ Peer::Peer(const Identity &myIdentity,const Identity &peerIdentity)
void Peer::received(
const RuntimeEnvironment *RR,
+ const InetAddress &localAddr,
const InetAddress &remoteAddr,
unsigned int hops,
uint64_t packetId,
@@ -78,7 +82,7 @@ void Peer::received(
{
unsigned int np = _numPaths;
for(unsigned int p=0;p<np;++p) {
- if (_paths[p].address() == remoteAddr) {
+ if ((_paths[p].address() == remoteAddr)&&(_paths[p].localAddress() == localAddr)) {
_paths[p].received(now);
pathIsConfirmed = true;
break;
@@ -89,13 +93,13 @@ void Peer::received(
if ((verb == Packet::VERB_OK)&&(inReVerb == Packet::VERB_HELLO)) {
// Learn paths if they've been confirmed via a HELLO
RemotePath *slot = (RemotePath *)0;
- if (np < ZT1_MAX_PEER_NETWORK_PATHS) {
+ if (np < ZT_MAX_PEER_NETWORK_PATHS) {
// Add new path
slot = &(_paths[np++]);
} else {
// Replace oldest non-fixed path
uint64_t slotLRmin = 0xffffffffffffffffULL;
- for(unsigned int p=0;p<ZT1_MAX_PEER_NETWORK_PATHS;++p) {
+ for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
if ((!_paths[p].fixed())&&(_paths[p].lastReceived() <= slotLRmin)) {
slotLRmin = _paths[p].lastReceived();
slot = &(_paths[p]);
@@ -103,7 +107,7 @@ void Peer::received(
}
}
if (slot) {
- *slot = RemotePath(remoteAddr,false);
+ *slot = RemotePath(localAddr,remoteAddr,false);
slot->received(now);
_numPaths = np;
pathIsConfirmed = true;
@@ -116,7 +120,7 @@ void Peer::received(
if ((now - _lastPathConfirmationSent) >= ZT_MIN_PATH_CONFIRMATION_INTERVAL) {
_lastPathConfirmationSent = now;
TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),remoteAddr.toString().c_str());
- attemptToContactAt(RR,remoteAddr,now);
+ attemptToContactAt(RR,localAddr,remoteAddr,now);
}
}
}
@@ -138,7 +142,7 @@ void Peer::received(
for(std::vector<MulticastGroup>::const_iterator mg(mgs.begin());mg!=mgs.end();++mg) {
if ((outp.size() + 18) > ZT_UDP_DEFAULT_PAYLOAD_MTU) {
outp.armor(_key,true);
- RR->node->putPacket(remoteAddr,outp.data(),outp.size());
+ RR->node->putPacket(localAddr,remoteAddr,outp.data(),outp.size());
outp.reset(_id.address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
}
@@ -151,7 +155,7 @@ void Peer::received(
}
if (outp.size() > ZT_PROTO_MIN_PACKET_LENGTH) {
outp.armor(_key,true);
- RR->node->putPacket(remoteAddr,outp.data(),outp.size());
+ RR->node->putPacket(localAddr,remoteAddr,outp.data(),outp.size());
}
}
}
@@ -177,7 +181,7 @@ RemotePath *Peer::getBestPath(uint64_t now)
return bestPath;
}
-void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &atAddress,uint64_t now)
+void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now)
{
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
outp.append((unsigned char)ZT_PROTO_VERSION);
@@ -205,7 +209,7 @@ void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &at
}
outp.armor(_key,false); // HELLO is sent in the clear
- RR->node->putPacket(atAddress,outp.data(),outp.size());
+ RR->node->putPacket(localAddr,atAddress,outp.data(),outp.size());
}
void Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now)
@@ -214,11 +218,12 @@ void Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now)
if (bestPath) {
if ((now - bestPath->lastReceived()) >= ZT_PEER_DIRECT_PING_DELAY) {
TRACE("PING %s(%s)",_id.address().toString().c_str(),bestPath->address().toString().c_str());
- attemptToContactAt(RR,bestPath->address(),now);
+ attemptToContactAt(RR,bestPath->localAddress(),bestPath->address(),now);
bestPath->sent(now);
} else if (((now - bestPath->lastSend()) >= ZT_NAT_KEEPALIVE_DELAY)&&(!bestPath->reliable())) {
+ _natKeepaliveBuf += (uint32_t)((now * 0x9e3779b1) >> 1); // tumble this around to send constantly varying (meaningless) payloads
TRACE("NAT keepalive %s(%s)",_id.address().toString().c_str(),bestPath->address().toString().c_str());
- RR->node->putPacket(bestPath->address(),"",0);
+ RR->node->putPacket(bestPath->localAddress(),bestPath->address(),&_natKeepaliveBuf,sizeof(_natKeepaliveBuf));
bestPath->sent(now);
}
}
@@ -306,13 +311,13 @@ void Peer::addPath(const RemotePath &newp)
}
RemotePath *slot = (RemotePath *)0;
- if (np < ZT1_MAX_PEER_NETWORK_PATHS) {
+ if (np < ZT_MAX_PEER_NETWORK_PATHS) {
// Add new path
slot = &(_paths[np++]);
} else {
// Replace oldest non-fixed path
uint64_t slotLRmin = 0xffffffffffffffffULL;
- for(unsigned int p=0;p<ZT1_MAX_PEER_NETWORK_PATHS;++p) {
+ for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
if ((!_paths[p].fixed())&&(_paths[p].lastReceived() <= slotLRmin)) {
slotLRmin = _paths[p].lastReceived();
slot = &(_paths[p]);
@@ -350,7 +355,7 @@ bool Peer::resetWithinScope(const RuntimeEnvironment *RR,InetAddress::IpScope sc
while (x < np) {
if (_paths[x].address().ipScope() == scope) {
if (_paths[x].fixed()) {
- attemptToContactAt(RR,_paths[x].address(),now);
+ attemptToContactAt(RR,_paths[x].localAddress(),_paths[x].address(),now);
_paths[y++] = _paths[x]; // keep fixed paths
}
} else {
diff --git a/node/Peer.hpp b/node/Peer.hpp
index 283e3f33..4d1031b8 100644
--- a/node/Peer.hpp
+++ b/node/Peer.hpp
@@ -40,6 +40,7 @@
#include "../include/ZeroTierOne.h"
#include "RuntimeEnvironment.hpp"
+#include "CertificateOfMembership.hpp"
#include "RemotePath.hpp"
#include "Address.hpp"
#include "Utils.hpp"
@@ -104,6 +105,7 @@ public:
* and appears to be valid.
*
* @param RR Runtime environment
+ * @param localAddr Local address
* @param remoteAddr Internet address of sender
* @param hops ZeroTier (not IP) hops
* @param packetId Packet ID
@@ -113,6 +115,7 @@ public:
*/
void received(
const RuntimeEnvironment *RR,
+ const InetAddress &localAddr,
const InetAddress &remoteAddr,
unsigned int hops,
uint64_t packetId,
@@ -154,10 +157,11 @@ public:
* for NAT traversal and path verification.
*
* @param RR Runtime environment
+ * @param localAddr Local address
* @param atAddress Destination address
* @param now Current time
*/
- void attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &atAddress,uint64_t now);
+ void attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now);
/**
* Send pings or keepalives depending on configured timeouts
@@ -413,7 +417,7 @@ private:
uint16_t _vMinor;
uint16_t _vRevision;
Identity _id;
- RemotePath _paths[ZT1_MAX_PEER_NETWORK_PATHS];
+ RemotePath _paths[ZT_MAX_PEER_NETWORK_PATHS];
unsigned int _numPaths;
unsigned int _latency;
diff --git a/node/RemotePath.hpp b/node/RemotePath.hpp
index 291943c9..0034242e 100644
--- a/node/RemotePath.hpp
+++ b/node/RemotePath.hpp
@@ -53,14 +53,18 @@ public:
Path(),
_lastSend(0),
_lastReceived(0),
+ _localAddress(),
_fixed(false) {}
- RemotePath(const InetAddress &addr,bool fixed) :
+ RemotePath(const InetAddress &localAddress,const InetAddress &addr,bool fixed) :
Path(addr,0,TRUST_NORMAL),
_lastSend(0),
_lastReceived(0),
+ _localAddress(localAddress),
_fixed(fixed) {}
+ inline const InetAddress &localAddress() const throw() { return _localAddress; }
+
inline uint64_t lastSend() const throw() { return _lastSend; }
inline uint64_t lastReceived() const throw() { return _lastReceived; }
@@ -123,7 +127,7 @@ public:
*/
inline bool send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
{
- if (RR->node->putPacket(address(),data,len)) {
+ if (RR->node->putPacket(_localAddress,address(),data,len)) {
sent(now);
RR->antiRec->logOutgoingZT(data,len);
return true;
@@ -134,6 +138,7 @@ public:
private:
uint64_t _lastSend;
uint64_t _lastReceived;
+ InetAddress _localAddress;
bool _fixed;
};
diff --git a/node/SHA512.cpp b/node/SHA512.cpp
index 197d6323..ddff4839 100644
--- a/node/SHA512.cpp
+++ b/node/SHA512.cpp
@@ -48,10 +48,8 @@ Public domain.
#define uint64 uint64_t
-#define load_bigendian(x) Utils::ntoh(*((const uint64_t *)(x)))
-#define store_bigendian(x,u) (*((uint64_t *)(x)) = Utils::hton((u)))
+#ifdef ZT_NO_TYPE_PUNNING
-#if 0
static uint64 load_bigendian(const unsigned char *x)
{
return
@@ -77,7 +75,13 @@ static void store_bigendian(unsigned char *x,uint64 u)
x[1] = u; u >>= 8;
x[0] = u;
}
-#endif
+
+#else // !ZT_NO_TYPE_PUNNING
+
+#define load_bigendian(x) Utils::ntoh(*((const uint64_t *)(x)))
+#define store_bigendian(x,u) (*((uint64_t *)(x)) = Utils::hton((u)))
+
+#endif // ZT_NO_TYPE_PUNNING
#define SHR(x,c) ((x) >> (c))
#define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c))))
diff --git a/node/SelfAwareness.cpp b/node/SelfAwareness.cpp
index 716cf7f3..7329322a 100644
--- a/node/SelfAwareness.cpp
+++ b/node/SelfAwareness.cpp
@@ -107,10 +107,14 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
// Erase all entries (other than this one) for this scope to prevent thrashing
// Note: we should probably not use 'entry' after this
- for(std::map< PhySurfaceKey,PhySurfaceEntry >::iterator p(_phy.begin());p!=_phy.end();) {
- if ((p->first.reporter != reporter)&&(p->first.scope == scope))
- _phy.erase(p++);
- else ++p;
+ {
+ Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
+ PhySurfaceKey *k = (PhySurfaceKey *)0;
+ PhySurfaceEntry *e = (PhySurfaceEntry *)0;
+ while (i.next(k,e)) {
+ if ((k->reporter != reporter)&&(k->scope == scope))
+ _phy.erase(*k);
+ }
}
_ResetWithinScope rset(RR,now,(InetAddress::IpScope)scope);
@@ -140,26 +144,13 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
void SelfAwareness::clean(uint64_t now)
{
Mutex::Lock _l(_phy_m);
- for(std::map< PhySurfaceKey,PhySurfaceEntry >::iterator p(_phy.begin());p!=_phy.end();) {
- if ((now - p->second.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
- _phy.erase(p++);
- else ++p;
- }
-}
-
-bool SelfAwareness::areGlobalIPv4PortsRandomized() const
-{
- int port = 0;
- Mutex::Lock _l(_phy_m);
- for(std::map< PhySurfaceKey,PhySurfaceEntry >::const_iterator p(_phy.begin());p!=_phy.end();++p) {
- if ((p->first.scope == InetAddress::IP_SCOPE_GLOBAL)&&(p->second.mySurface.ss_family == AF_INET)) {
- const int tmp = (int)p->second.mySurface.port();
- if ((port)&&(tmp != port))
- return true;
- else port = tmp;
- }
+ Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
+ PhySurfaceKey *k = (PhySurfaceKey *)0;
+ PhySurfaceEntry *e = (PhySurfaceEntry *)0;
+ while (i.next(k,e)) {
+ if ((now - e->ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
+ _phy.erase(*k);
}
- return false;
}
} // namespace ZeroTier
diff --git a/node/SelfAwareness.hpp b/node/SelfAwareness.hpp
index d3b79d18..3133553e 100644
--- a/node/SelfAwareness.hpp
+++ b/node/SelfAwareness.hpp
@@ -28,10 +28,9 @@
#ifndef ZT_SELFAWARENESS_HPP
#define ZT_SELFAWARENESS_HPP
-#include <map>
-#include <vector>
-
+#include "Constants.hpp"
#include "InetAddress.hpp"
+#include "Hashtable.hpp"
#include "Address.hpp"
#include "Mutex.hpp"
@@ -66,17 +65,14 @@ public:
*/
void clean(uint64_t now);
- /**
- * @return True if our external (global scope) IPv4 ports appear to be randomized by a NAT device
- */
- bool areGlobalIPv4PortsRandomized() const;
-
private:
struct PhySurfaceKey
{
Address reporter;
InetAddress::IpScope scope;
+ inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); }
+
PhySurfaceKey() : reporter(),scope(InetAddress::IP_SCOPE_NONE) {}
PhySurfaceKey(const Address &r,InetAddress::IpScope s) : reporter(r),scope(s) {}
inline bool operator<(const PhySurfaceKey &k) const throw() { return ((reporter < k.reporter) ? true : ((reporter == k.reporter) ? ((int)scope < (int)k.scope) : false)); }
@@ -93,7 +89,7 @@ private:
const RuntimeEnvironment *RR;
- std::map< PhySurfaceKey,PhySurfaceEntry > _phy;
+ Hashtable< PhySurfaceKey,PhySurfaceEntry > _phy;
Mutex _phy_m;
};
diff --git a/node/Switch.cpp b/node/Switch.cpp
index 989f497a..ecae9b76 100644
--- a/node/Switch.cpp
+++ b/node/Switch.cpp
@@ -67,7 +67,10 @@ static const char *etherTypeName(const unsigned int etherType)
Switch::Switch(const RuntimeEnvironment *renv) :
RR(renv),
- _lastBeaconResponse(0)
+ _lastBeaconResponse(0),
+ _outstandingWhoisRequests(32),
+ _defragQueue(32),
+ _lastUniteAttempt(8) // only really used on root servers and upstreams, and it'll grow there just fine
{
}
@@ -75,7 +78,7 @@ Switch::~Switch()
{
}
-void Switch::onRemotePacket(const InetAddress &fromAddr,const void *data,unsigned int len)
+void Switch::onRemotePacket(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
{
try {
if (len == 13) {
@@ -93,14 +96,14 @@ void Switch::onRemotePacket(const InetAddress &fromAddr,const void *data,unsigne
_lastBeaconResponse = now;
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NOP);
outp.armor(peer->key(),false);
- RR->node->putPacket(fromAddr,outp.data(),outp.size());
+ RR->node->putPacket(localAddr,fromAddr,outp.data(),outp.size());
}
}
} else if (len > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
if (((const unsigned char *)data)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
- _handleRemotePacketFragment(fromAddr,data,len);
+ _handleRemotePacketFragment(localAddr,fromAddr,data,len);
} else if (len >= ZT_PROTO_MIN_PACKET_LENGTH) {
- _handleRemotePacketHead(fromAddr,data,len);
+ _handleRemotePacketHead(localAddr,fromAddr,data,len);
}
}
} catch (std::exception &ex) {
@@ -291,7 +294,7 @@ void Switch::send(const Packet &packet,bool encrypt,uint64_t nwid)
if (!_trySend(packet,encrypt,nwid)) {
Mutex::Lock _l(_txQueue_m);
- _txQueue.insert(std::pair< Address,TXQueueEntry >(packet.destination(),TXQueueEntry(RR->node->now(),packet,encrypt,nwid)));
+ _txQueue.push_back(TXQueueEntry(packet.destination(),RR->node->now(),packet,encrypt,nwid));
}
}
@@ -309,31 +312,18 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
const uint64_t now = RR->node->now();
- std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
- if (!(cg.first))
- return false;
-
- if (cg.first.ipScope() != cg.second.ipScope())
- return false;
-
- // Addresses are sorted in key for last unite attempt map for order
- // invariant lookup: (p1,p2) == (p2,p1)
- Array<Address,2> uniteKey;
- if (p1 >= p2) {
- uniteKey[0] = p2;
- uniteKey[1] = p1;
- } else {
- uniteKey[0] = p1;
- uniteKey[1] = p2;
- }
{
Mutex::Lock _l(_lastUniteAttempt_m);
- std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey));
- if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL))
+ uint64_t &luts = _lastUniteAttempt[_LastUniteKey(p1,p2)];
+ if (((now - luts) < ZT_MIN_UNITE_INTERVAL)&&(!force))
return false;
- else _lastUniteAttempt[uniteKey] = now;
+ luts = now;
}
+ std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
+ if ((!(cg.first))||(cg.first.ipScope() != cg.second.ipScope()))
+ return false;
+
TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str());
/* Tell P1 where to find P2 and vice versa, sending the packets to P1 and
@@ -386,14 +376,14 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
return true;
}
-void Switch::rendezvous(const SharedPtr<Peer> &peer,const InetAddress &atAddr)
+void Switch::rendezvous(const SharedPtr<Peer> &peer,const InetAddress &localAddr,const InetAddress &atAddr)
{
TRACE("sending NAT-t message to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
const uint64_t now = RR->node->now();
- peer->attemptToContactAt(RR,atAddr,now);
+ peer->attemptToContactAt(RR,localAddr,atAddr,now);
{
Mutex::Lock _l(_contactQueue_m);
- _contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,atAddr));
+ _contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,localAddr,atAddr));
}
}
@@ -402,10 +392,13 @@ void Switch::requestWhois(const Address &addr)
bool inserted = false;
{
Mutex::Lock _l(_outstandingWhoisRequests_m);
- std::pair< std::map< Address,WhoisRequest >::iterator,bool > entry(_outstandingWhoisRequests.insert(std::pair<Address,WhoisRequest>(addr,WhoisRequest())));
- if ((inserted = entry.second))
- entry.first->second.lastSent = RR->node->now();
- entry.first->second.retries = 0; // reset retry count if entry already existed
+ WhoisRequest &r = _outstandingWhoisRequests[addr];
+ if (r.lastSent) {
+ r.retries = 0; // reset retry count if entry already existed, but keep waiting and retry again after normal timeout
+ } else {
+ r.lastSent = RR->node->now();
+ inserted = true;
+ }
}
if (inserted)
_sendWhoisRequest(addr,(const Address *)0,0);
@@ -435,11 +428,12 @@ void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
{ // finish sending any packets waiting on peer's public key / identity
Mutex::Lock _l(_txQueue_m);
- std::pair< std::multimap< Address,TXQueueEntry >::iterator,std::multimap< Address,TXQueueEntry >::iterator > waitingTxQueueItems(_txQueue.equal_range(peer->address()));
- for(std::multimap< Address,TXQueueEntry >::iterator txi(waitingTxQueueItems.first);txi!=waitingTxQueueItems.second;) {
- if (_trySend(txi->second.packet,txi->second.encrypt,txi->second.nwid))
- _txQueue.erase(txi++);
- else ++txi;
+ for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) {
+ if (txi->dest == peer->address()) {
+ if (_trySend(txi->packet,txi->encrypt,txi->nwid))
+ _txQueue.erase(txi++);
+ else ++txi;
+ } else ++txi;
}
}
}
@@ -459,14 +453,14 @@ unsigned long Switch::doTimerTasks(uint64_t now)
} else {
if (qi->strategyIteration == 0) {
// First strategy: send packet directly to destination
- qi->peer->attemptToContactAt(RR,qi->inaddr,now);
+ qi->peer->attemptToContactAt(RR,qi->localAddr,qi->inaddr,now);
} else if (qi->strategyIteration <= 4) {
// Strategies 1-4: try escalating ports for symmetric NATs that remap sequentially
InetAddress tmpaddr(qi->inaddr);
int p = (int)qi->inaddr.port() + qi->strategyIteration;
if (p < 0xffff) {
tmpaddr.setPort((unsigned int)p);
- qi->peer->attemptToContactAt(RR,tmpaddr,now);
+ qi->peer->attemptToContactAt(RR,qi->localAddr,tmpaddr,now);
} else qi->strategyIteration = 5;
} else {
// All strategies tried, expire entry
@@ -486,36 +480,37 @@ unsigned long Switch::doTimerTasks(uint64_t now)
{ // Retry outstanding WHOIS requests
Mutex::Lock _l(_outstandingWhoisRequests_m);
- for(std::map< Address,WhoisRequest >::iterator i(_outstandingWhoisRequests.begin());i!=_outstandingWhoisRequests.end();) {
- unsigned long since = (unsigned long)(now - i->second.lastSent);
+ Hashtable< Address,WhoisRequest >::Iterator i(_outstandingWhoisRequests);
+ Address *a = (Address *)0;
+ WhoisRequest *r = (WhoisRequest *)0;
+ while (i.next(a,r)) {
+ const unsigned long since = (unsigned long)(now - r->lastSent);
if (since >= ZT_WHOIS_RETRY_DELAY) {
- if (i->second.retries >= ZT_MAX_WHOIS_RETRIES) {
- TRACE("WHOIS %s timed out",i->first.toString().c_str());
- _outstandingWhoisRequests.erase(i++);
- continue;
+ if (r->retries >= ZT_MAX_WHOIS_RETRIES) {
+ TRACE("WHOIS %s timed out",a->toString().c_str());
+ _outstandingWhoisRequests.erase(*a);
} else {
- i->second.lastSent = now;
- i->second.peersConsulted[i->second.retries] = _sendWhoisRequest(i->first,i->second.peersConsulted,i->second.retries);
- ++i->second.retries;
- TRACE("WHOIS %s (retry %u)",i->first.toString().c_str(),i->second.retries);
+ r->lastSent = now;
+ r->peersConsulted[r->retries] = _sendWhoisRequest(*a,r->peersConsulted,r->retries);
+ ++r->retries;
+ TRACE("WHOIS %s (retry %u)",a->toString().c_str(),r->retries);
nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY);
}
} else {
nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since);
}
- ++i;
}
}
{ // Time out TX queue packets that never got WHOIS lookups or other info.
Mutex::Lock _l(_txQueue_m);
- for(std::multimap< Address,TXQueueEntry >::iterator i(_txQueue.begin());i!=_txQueue.end();) {
- if (_trySend(i->second.packet,i->second.encrypt,i->second.nwid))
- _txQueue.erase(i++);
- else if ((now - i->second.creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
- TRACE("TX %s -> %s timed out",i->second.packet.source().toString().c_str(),i->second.packet.destination().toString().c_str());
- _txQueue.erase(i++);
- } else ++i;
+ for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) {
+ if (_trySend(txi->packet,txi->encrypt,txi->nwid))
+ _txQueue.erase(txi++);
+ else if ((now - txi->creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
+ TRACE("TX %s -> %s timed out",txi->packet.source().toString().c_str(),txi->packet.destination().toString().c_str());
+ _txQueue.erase(txi++);
+ } else ++txi;
}
}
@@ -531,18 +526,32 @@ unsigned long Switch::doTimerTasks(uint64_t now)
{ // Time out packets that didn't get all their fragments.
Mutex::Lock _l(_defragQueue_m);
- for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) {
- if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
- TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first);
- _defragQueue.erase(i++);
- } else ++i;
+ Hashtable< uint64_t,DefragQueueEntry >::Iterator i(_defragQueue);
+ uint64_t *packetId = (uint64_t *)0;
+ DefragQueueEntry *qe = (DefragQueueEntry *)0;
+ while (i.next(packetId,qe)) {
+ if ((now - qe->creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
+ TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",*packetId);
+ _defragQueue.erase(*packetId);
+ }
+ }
+ }
+
+ { // Remove really old last unite attempt entries to keep table size controlled
+ Mutex::Lock _l(_lastUniteAttempt_m);
+ Hashtable< _LastUniteKey,uint64_t >::Iterator i(_lastUniteAttempt);
+ _LastUniteKey *k = (_LastUniteKey *)0;
+ uint64_t *v = (uint64_t *)0;
+ while (i.next(k,v)) {
+ if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 16))
+ _lastUniteAttempt.erase(*k);
}
}
return nextDelay;
}
-void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void *data,unsigned int len)
+void Switch::_handleRemotePacketFragment(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
{
Packet::Fragment fragment(data,len);
Address destination(fragment.destination());
@@ -577,32 +586,31 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
// seeing a Packet::Fragment?
Mutex::Lock _l(_defragQueue_m);
- std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
+ DefragQueueEntry &dq = _defragQueue[pid];
- if (dqe == _defragQueue.end()) {
+ if (!dq.creationTime) {
// We received a Packet::Fragment without its head, so queue it and wait
- DefragQueueEntry &dq = _defragQueue[pid];
dq.creationTime = RR->node->now();
dq.frags[fno - 1] = fragment;
dq.totalFragments = tf; // total fragment count is known
dq.haveFragments = 1 << fno; // we have only this fragment
//TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
- } else if (!(dqe->second.haveFragments & (1 << fno))) {
+ } else if (!(dq.haveFragments & (1 << fno))) {
// We have other fragments and maybe the head, so add this one and check
- dqe->second.frags[fno - 1] = fragment;
- dqe->second.totalFragments = tf;
+ dq.frags[fno - 1] = fragment;
+ dq.totalFragments = tf;
//TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
- if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) {
+ if (Utils::countBits(dq.haveFragments |= (1 << fno)) == tf) {
// We have all fragments -- assemble and process full Packet
//TRACE("packet %.16llx is complete, assembling and processing...",pid);
- SharedPtr<IncomingPacket> packet(dqe->second.frag0);
+ SharedPtr<IncomingPacket> packet(dq.frag0);
for(unsigned int f=1;f<tf;++f)
- packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
- _defragQueue.erase(dqe);
+ packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength());
+ _defragQueue.erase(pid); // dq no longer valid after this
if (!packet->tryDecode(RR)) {
Mutex::Lock _l(_rxQueue_m);
@@ -614,9 +622,9 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
}
}
-void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *data,unsigned int len)
+void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
{
- SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,fromAddr,RR->node->now()));
+ SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,localAddr,fromAddr,RR->node->now()));
Address source(packet->source());
Address destination(packet->destination());
@@ -645,26 +653,27 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
uint64_t pid = packet->packetId();
Mutex::Lock _l(_defragQueue_m);
- std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
+ DefragQueueEntry &dq = _defragQueue[pid];
- if (dqe == _defragQueue.end()) {
+ if (!dq.creationTime) {
// If we have no other fragments yet, create an entry and save the head
- DefragQueueEntry &dq = _defragQueue[pid];
+
dq.creationTime = RR->node->now();
dq.frag0 = packet;
dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment
dq.haveFragments = 1; // head is first bit (left to right)
//TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str());
- } else if (!(dqe->second.haveFragments & 1)) {
+ } else if (!(dq.haveFragments & 1)) {
// If we have other fragments but no head, see if we are complete with the head
- if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) {
+
+ if ((dq.totalFragments)&&(Utils::countBits(dq.haveFragments |= 1) == dq.totalFragments)) {
// We have all fragments -- assemble and process full Packet
//TRACE("packet %.16llx is complete, assembling and processing...",pid);
// packet already contains head, so append fragments
- for(unsigned int f=1;f<dqe->second.totalFragments;++f)
- packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
- _defragQueue.erase(dqe);
+ for(unsigned int f=1;f<dq.totalFragments;++f)
+ packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength());
+ _defragQueue.erase(pid); // dq no longer valid after this
if (!packet->tryDecode(RR)) {
Mutex::Lock _l(_rxQueue_m);
@@ -672,7 +681,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
}
} else {
// Still waiting on more fragments, so queue the head
- dqe->second.frag0 = packet;
+ dq.frag0 = packet;
}
} // else this is a duplicate head, ignore
} else {
diff --git a/node/Switch.hpp b/node/Switch.hpp
index ac85606e..3bdc0c47 100644
--- a/node/Switch.hpp
+++ b/node/Switch.hpp
@@ -45,6 +45,7 @@
#include "Network.hpp"
#include "SharedPtr.hpp"
#include "IncomingPacket.hpp"
+#include "Hashtable.hpp"
/* Ethernet frame types that might be relevant to us */
#define ZT_ETHERTYPE_IPV4 0x0800
@@ -78,11 +79,12 @@ public:
/**
* Called when a packet is received from the real network
*
+ * @param localAddr Local interface address
* @param fromAddr Internet IP address of origin
* @param data Packet data
* @param len Packet length
*/
- void onRemotePacket(const InetAddress &fromAddr,const void *data,unsigned int len);
+ void onRemotePacket(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len);
/**
* Called when a packet comes from a local Ethernet tap
@@ -139,9 +141,10 @@ public:
* Attempt NAT traversal to peer at a given physical address
*
* @param peer Peer to contact
+ * @param localAddr Local interface address
* @param atAddr Address of peer
*/
- void rendezvous(const SharedPtr<Peer> &peer,const InetAddress &atAddr);
+ void rendezvous(const SharedPtr<Peer> &peer,const InetAddress &localAddr,const InetAddress &atAddr);
/**
* Request WHOIS on a given address
@@ -178,8 +181,8 @@ public:
unsigned long doTimerTasks(uint64_t now);
private:
- void _handleRemotePacketFragment(const InetAddress &fromAddr,const void *data,unsigned int len);
- void _handleRemotePacketHead(const InetAddress &fromAddr,const void *data,unsigned int len);
+ void _handleRemotePacketFragment(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len);
+ void _handleRemotePacketHead(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len);
Address _sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted);
bool _trySend(const Packet &packet,bool encrypt,uint64_t nwid);
@@ -189,64 +192,87 @@ private:
// Outsanding WHOIS requests and how many retries they've undergone
struct WhoisRequest
{
+ WhoisRequest() : lastSent(0),retries(0) {}
uint64_t lastSent;
Address peersConsulted[ZT_MAX_WHOIS_RETRIES]; // by retry
unsigned int retries; // 0..ZT_MAX_WHOIS_RETRIES
};
- std::map< Address,WhoisRequest > _outstandingWhoisRequests;
+ Hashtable< Address,WhoisRequest > _outstandingWhoisRequests;
Mutex _outstandingWhoisRequests_m;
// Packet defragmentation queue -- comes before RX queue in path
struct DefragQueueEntry
{
+ DefragQueueEntry() : creationTime(0),totalFragments(0),haveFragments(0) {}
uint64_t creationTime;
SharedPtr<IncomingPacket> frag0;
Packet::Fragment frags[ZT_MAX_PACKET_FRAGMENTS - 1];
unsigned int totalFragments; // 0 if only frag0 received, waiting for frags
uint32_t haveFragments; // bit mask, LSB to MSB
};
- std::map< uint64_t,DefragQueueEntry > _defragQueue;
+ Hashtable< uint64_t,DefragQueueEntry > _defragQueue;
Mutex _defragQueue_m;
// ZeroTier-layer RX queue of incoming packets in the process of being decoded
std::list< SharedPtr<IncomingPacket> > _rxQueue;
Mutex _rxQueue_m;
- // ZeroTier-layer TX queue by destination ZeroTier address
+ // ZeroTier-layer TX queue entry
struct TXQueueEntry
{
TXQueueEntry() {}
- TXQueueEntry(uint64_t ct,const Packet &p,bool enc,uint64_t nw) :
+ TXQueueEntry(Address d,uint64_t ct,const Packet &p,bool enc,uint64_t nw) :
+ dest(d),
creationTime(ct),
nwid(nw),
packet(p),
encrypt(enc) {}
+ Address dest;
uint64_t creationTime;
uint64_t nwid;
Packet packet; // unencrypted/unMAC'd packet -- this is done at send time
bool encrypt;
};
- std::multimap< Address,TXQueueEntry > _txQueue;
+ std::list< TXQueueEntry > _txQueue;
Mutex _txQueue_m;
// Tracks sending of VERB_RENDEZVOUS to relaying peers
- std::map< Array< Address,2 >,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior
+ struct _LastUniteKey
+ {
+ _LastUniteKey() : x(0),y(0) {}
+ _LastUniteKey(const Address &a1,const Address &a2)
+ {
+ if (a1 > a2) {
+ x = a2.toInt();
+ y = a1.toInt();
+ } else {
+ x = a1.toInt();
+ y = a2.toInt();
+ }
+ }
+ inline unsigned long hashCode() const throw() { return ((unsigned long)x ^ (unsigned long)y); }
+ inline bool operator==(const _LastUniteKey &k) const throw() { return ((x == k.x)&&(y == k.y)); }
+ uint64_t x,y;
+ };
+ Hashtable< _LastUniteKey,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior
Mutex _lastUniteAttempt_m;
// Active attempts to contact remote peers, including state of multi-phase NAT traversal
struct ContactQueueEntry
{
ContactQueueEntry() {}
- ContactQueueEntry(const SharedPtr<Peer> &p,uint64_t ft,const InetAddress &a) :
+ ContactQueueEntry(const SharedPtr<Peer> &p,uint64_t ft,const InetAddress &laddr,const InetAddress &a) :
peer(p),
fireAtTime(ft),
inaddr(a),
+ localAddr(laddr),
strategyIteration(0) {}
SharedPtr<Peer> peer;
uint64_t fireAtTime;
InetAddress inaddr;
+ InetAddress localAddr;
unsigned int strategyIteration;
};
std::list<ContactQueueEntry> _contactQueue;
diff --git a/node/Topology.cpp b/node/Topology.cpp
index b255080e..e931df1e 100644
--- a/node/Topology.cpp
+++ b/node/Topology.cpp
@@ -62,7 +62,7 @@ void Topology::setRootServers(const std::map< Identity,std::vector<InetAddress>
if (!p)
p = SharedPtr<Peer>(new Peer(RR->identity,i->first));
for(std::vector<InetAddress>::const_iterator j(i->second.begin());j!=i->second.end();++j)
- p->addPath(RemotePath(*j,true));
+ p->addPath(RemotePath(InetAddress(),*j,true));
p->use(now);
_rootPeers.push_back(p);
}
@@ -103,7 +103,7 @@ SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
const uint64_t now = RR->node->now();
Mutex::Lock _l(_lock);
- SharedPtr<Peer> p(_activePeers.insert(std::pair< Address,SharedPtr<Peer> >(peer->address(),peer)).first->second);
+ SharedPtr<Peer> &p = _activePeers.set(peer->address(),peer);
p->use(now);
_saveIdentity(p->identity());
@@ -160,9 +160,9 @@ SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCou
if (++sna == _rootAddresses.end())
sna = _rootAddresses.begin(); // wrap around at end
if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
- std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.find(*sna));
- if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) {
- bestRoot = p->second;
+ SharedPtr<Peer> *p = _activePeers.get(*sna);
+ if ((p)&&((*p)->hasActiveDirectPath(now))) {
+ bestRoot = *p;
break;
}
}
@@ -249,10 +249,12 @@ bool Topology::isRoot(const Identity &id) const
void Topology::clean(uint64_t now)
{
Mutex::Lock _l(_lock);
- for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();) {
- if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->first) == _rootAddresses.end())) {
- _activePeers.erase(p++);
- } else ++p;
+ Hashtable< Address,SharedPtr<Peer> >::Iterator i(_activePeers);
+ Address *a = (Address *)0;
+ SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+ while (i.next(a,p))
+ if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) {
+ _activePeers.erase(*a);
}
}
diff --git a/node/Topology.hpp b/node/Topology.hpp
index 1c5cca00..3066b50c 100644
--- a/node/Topology.hpp
+++ b/node/Topology.hpp
@@ -44,6 +44,7 @@
#include "Mutex.hpp"
#include "InetAddress.hpp"
#include "Dictionary.hpp"
+#include "Hashtable.hpp"
namespace ZeroTier {
@@ -163,17 +164,20 @@ public:
inline void eachPeer(F f)
{
Mutex::Lock _l(_lock);
- for(std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p)
- f(*this,p->second);
+ Hashtable< Address,SharedPtr<Peer> >::Iterator i(_activePeers);
+ Address *a = (Address *)0;
+ SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+ while (i.next(a,p))
+ f(*this,*p);
}
/**
* @return All currently active peers by address
*/
- inline std::map< Address,SharedPtr<Peer> > allPeers() const
+ inline std::vector< std::pair< Address,SharedPtr<Peer> > > allPeers() const
{
Mutex::Lock _l(_lock);
- return _activePeers;
+ return _activePeers.entries();
}
/**
@@ -190,7 +194,7 @@ private:
const RuntimeEnvironment *RR;
- std::map< Address,SharedPtr<Peer> > _activePeers;
+ Hashtable< Address,SharedPtr<Peer> > _activePeers;
std::map< Identity,std::vector<InetAddress> > _roots;
std::vector< Address > _rootAddresses;
std::vector< SharedPtr<Peer> > _rootPeers;