summaryrefslogtreecommitdiff
path: root/node
diff options
context:
space:
mode:
Diffstat (limited to 'node')
-rw-r--r--node/AntiRecursion.hpp73
-rw-r--r--node/BinarySemaphore.hpp106
-rw-r--r--node/Cluster.cpp913
-rw-r--r--node/Cluster.hpp418
-rw-r--r--node/Constants.hpp72
-rw-r--r--node/Defaults.cpp82
-rw-r--r--node/DeferredPackets.cpp95
-rw-r--r--node/DeferredPackets.hpp98
-rw-r--r--node/Hashtable.hpp9
-rw-r--r--node/Identity.cpp11
-rw-r--r--node/Identity.hpp21
-rw-r--r--node/IncomingPacket.cpp677
-rw-r--r--node/IncomingPacket.hpp40
-rw-r--r--node/InetAddress.cpp53
-rw-r--r--node/InetAddress.hpp143
-rw-r--r--node/Multicaster.cpp228
-rw-r--r--node/Multicaster.hpp2
-rw-r--r--node/Network.cpp123
-rw-r--r--node/Network.hpp5
-rw-r--r--node/NetworkConfig.cpp3
-rw-r--r--node/Node.cpp378
-rw-r--r--node/Node.hpp45
-rw-r--r--node/Packet.cpp15
-rw-r--r--node/Packet.hpp207
-rw-r--r--node/Path.cpp (renamed from node/Defaults.hpp)53
-rw-r--r--node/Path.hpp170
-rw-r--r--node/Peer.cpp340
-rw-r--r--node/Peer.hpp225
-rw-r--r--node/Poly1305.cpp314
-rw-r--r--node/RemotePath.hpp179
-rw-r--r--node/RuntimeEnvironment.hpp32
-rw-r--r--node/Salsa20.cpp1247
-rw-r--r--node/Salsa20.hpp43
-rw-r--r--node/SelfAwareness.cpp56
-rw-r--r--node/SelfAwareness.hpp10
-rw-r--r--node/SharedPtr.hpp33
-rw-r--r--node/Switch.cpp180
-rw-r--r--node/Switch.hpp7
-rw-r--r--node/Topology.cpp419
-rw-r--r--node/Topology.hpp152
-rw-r--r--node/Utils.cpp22
-rw-r--r--node/World.hpp241
42 files changed, 5625 insertions, 1915 deletions
diff --git a/node/AntiRecursion.hpp b/node/AntiRecursion.hpp
index c5aa92d8..4d9df465 100644
--- a/node/AntiRecursion.hpp
+++ b/node/AntiRecursion.hpp
@@ -35,30 +35,35 @@
namespace ZeroTier {
-#define ZT_ANTIRECURSION_TAIL_LEN 256
+/**
+ * Size of anti-recursion history
+ */
+#define ZT_ANTIRECURSION_HISTORY_SIZE 16
/**
* Filter to prevent recursion (ZeroTier-over-ZeroTier)
*
* This works by logging ZeroTier packets that we send. It's then invoked
- * again against packets read from local Ethernet taps. If the last N
+ * again against packets read from local Ethernet taps. If the last 32
* bytes representing the ZeroTier packet match in the tap frame, then
* the frame is a re-injection of a frame that we sent and is rejected.
*
* This means that ZeroTier packets simply will not traverse ZeroTier
* networks, which would cause all sorts of weird problems.
*
- * NOTE: this is applied to low-level packets before they are sent to
- * SocketManager and/or sockets, not to fully assembled packets before
- * (possible) fragmentation.
+ * This is highly optimized code since it's checked for every packet.
*/
class AntiRecursion
{
public:
AntiRecursion()
- throw()
{
- memset(_history,0,sizeof(_history));
+ for(int i=0;i<ZT_ANTIRECURSION_HISTORY_SIZE;++i) {
+ _history[i].tail[0] = 0;
+ _history[i].tail[1] = 0;
+ _history[i].tail[2] = 0;
+ _history[i].tail[3] = 0;
+ }
_ptr = 0;
}
@@ -68,13 +73,20 @@ public:
* @param data ZT packet data
* @param len Length of packet
*/
- inline void logOutgoingZT(const void *data,unsigned int len)
- throw()
+ inline void logOutgoingZT(const void *const data,const unsigned int len)
{
- ArItem *i = &(_history[_ptr++ % ZT_ANTIRECURSION_HISTORY_SIZE]);
- const unsigned int tl = (len > ZT_ANTIRECURSION_TAIL_LEN) ? ZT_ANTIRECURSION_TAIL_LEN : len;
- memcpy(i->tail,((const unsigned char *)data) + (len - tl),tl);
- i->len = tl;
+ if (len < 32)
+ return;
+#ifdef ZT_NO_TYPE_PUNNING
+ memcpy(_history[++_ptr % ZT_ANTIRECURSION_HISTORY_SIZE].tail,reinterpret_cast<const uint8_t *>(data) + (len - 32),32);
+#else
+ uint64_t *t = _history[++_ptr % ZT_ANTIRECURSION_HISTORY_SIZE].tail;
+ const uint64_t *p = reinterpret_cast<const uint64_t *>(reinterpret_cast<const uint8_t *>(data) + (len - 32));
+ *(t++) = *(p++);
+ *(t++) = *(p++);
+ *(t++) = *(p++);
+ *t = *p;
+#endif
}
/**
@@ -84,25 +96,36 @@ public:
* @param len Length of frame
* @return True if frame is OK to be passed, false if it's a ZT frame that we sent
*/
- inline bool checkEthernetFrame(const void *data,unsigned int len)
- throw()
+ inline bool checkEthernetFrame(const void *const data,const unsigned int len) const
{
- for(unsigned int h=0;h<ZT_ANTIRECURSION_HISTORY_SIZE;++h) {
- ArItem *i = &(_history[h]);
- if ((i->len > 0)&&(len >= i->len)&&(!memcmp(((const unsigned char *)data) + (len - i->len),i->tail,i->len)))
+ if (len < 32)
+ return true;
+ const uint8_t *const pp = reinterpret_cast<const uint8_t *>(data) + (len - 32);
+ const _ArItem *i = _history;
+ const _ArItem *const end = i + ZT_ANTIRECURSION_HISTORY_SIZE;
+ while (i != end) {
+#ifdef ZT_NO_TYPE_PUNNING
+ if (!memcmp(pp,i->tail,32))
return false;
+#else
+ const uint64_t *t = i->tail;
+ const uint64_t *p = reinterpret_cast<const uint64_t *>(pp);
+ uint64_t bits = *(t++) ^ *(p++);
+ bits |= *(t++) ^ *(p++);
+ bits |= *(t++) ^ *(p++);
+ bits |= *t ^ *p;
+ if (!bits)
+ return false;
+#endif
+ ++i;
}
return true;
}
private:
- struct ArItem
- {
- unsigned char tail[ZT_ANTIRECURSION_TAIL_LEN];
- unsigned int len;
- };
- ArItem _history[ZT_ANTIRECURSION_HISTORY_SIZE];
- volatile unsigned int _ptr;
+ struct _ArItem { uint64_t tail[4]; };
+ _ArItem _history[ZT_ANTIRECURSION_HISTORY_SIZE];
+ volatile unsigned long _ptr;
};
} // namespace ZeroTier
diff --git a/node/BinarySemaphore.hpp b/node/BinarySemaphore.hpp
new file mode 100644
index 00000000..97d0d1c4
--- /dev/null
+++ b/node/BinarySemaphore.hpp
@@ -0,0 +1,106 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ *
+ * If you would like to embed ZeroTier into a commercial application or
+ * redistribute it in a modified binary form, please contact ZeroTier Networks
+ * LLC. Start here: http://www.zerotier.com/
+ */
+
+#ifndef ZT_BINARYSEMAPHORE_HPP
+#define ZT_BINARYSEMAPHORE_HPP
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "Constants.hpp"
+#include "NonCopyable.hpp"
+
+#ifdef __WINDOWS__
+
+#include <Windows.h>
+
+namespace ZeroTier {
+
+class BinarySemaphore : NonCopyable
+{
+public:
+ BinarySemaphore() throw() { _sem = CreateSemaphore(NULL,0,1,NULL); }
+ ~BinarySemaphore() { CloseHandle(_sem); }
+ inline void wait() { WaitForSingleObject(_sem,INFINITE); }
+ inline void post() { ReleaseSemaphore(_sem,1,NULL); }
+private:
+ HANDLE _sem;
+};
+
+} // namespace ZeroTier
+
+#else // !__WINDOWS__
+
+#include <pthread.h>
+
+namespace ZeroTier {
+
+class BinarySemaphore : NonCopyable
+{
+public:
+ BinarySemaphore()
+ {
+ pthread_mutex_init(&_mh,(const pthread_mutexattr_t *)0);
+ pthread_cond_init(&_cond,(const pthread_condattr_t *)0);
+ _f = false;
+ }
+
+ ~BinarySemaphore()
+ {
+ pthread_cond_destroy(&_cond);
+ pthread_mutex_destroy(&_mh);
+ }
+
+ inline void wait()
+ {
+ pthread_mutex_lock(const_cast <pthread_mutex_t *>(&_mh));
+ while (!_f)
+ pthread_cond_wait(const_cast <pthread_cond_t *>(&_cond),const_cast <pthread_mutex_t *>(&_mh));
+ _f = false;
+ pthread_mutex_unlock(const_cast <pthread_mutex_t *>(&_mh));
+ }
+
+ inline void post()
+ {
+ pthread_mutex_lock(const_cast <pthread_mutex_t *>(&_mh));
+ _f = true;
+ pthread_mutex_unlock(const_cast <pthread_mutex_t *>(&_mh));
+ pthread_cond_signal(const_cast <pthread_cond_t *>(&_cond));
+ }
+
+private:
+ pthread_cond_t _cond;
+ pthread_mutex_t _mh;
+ volatile bool _f;
+};
+
+} // namespace ZeroTier
+
+#endif // !__WINDOWS__
+
+#endif
diff --git a/node/Cluster.cpp b/node/Cluster.cpp
new file mode 100644
index 00000000..fa9a1611
--- /dev/null
+++ b/node/Cluster.cpp
@@ -0,0 +1,913 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ *
+ * If you would like to embed ZeroTier into a commercial application or
+ * redistribute it in a modified binary form, please contact ZeroTier Networks
+ * LLC. Start here: http://www.zerotier.com/
+ */
+
+#ifdef ZT_ENABLE_CLUSTER
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#include <map>
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <list>
+#include <stdexcept>
+
+#include "../version.h"
+
+#include "Cluster.hpp"
+#include "RuntimeEnvironment.hpp"
+#include "MulticastGroup.hpp"
+#include "CertificateOfMembership.hpp"
+#include "Salsa20.hpp"
+#include "Poly1305.hpp"
+#include "Identity.hpp"
+#include "Topology.hpp"
+#include "Packet.hpp"
+#include "Switch.hpp"
+#include "Node.hpp"
+#include "Array.hpp"
+
+namespace ZeroTier {
+
+static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
+ throw()
+{
+ double dx = ((double)x2 - (double)x1);
+ double dy = ((double)y2 - (double)y1);
+ double dz = ((double)z2 - (double)z1);
+ return sqrt((dx * dx) + (dy * dy) + (dz * dz));
+}
+
+// An entry in _ClusterSendQueue
+struct _ClusterSendQueueEntry
+{
+ uint64_t timestamp;
+ Address fromPeerAddress;
+ Address toPeerAddress;
+ // if we ever support larger transport MTUs this must be increased
+ unsigned char data[ZT_CLUSTER_SEND_QUEUE_DATA_MAX];
+ unsigned int len;
+ bool unite;
+};
+
+// A multi-index map with entry memory pooling -- this allows our queue to
+// be O(log(N)) and is complex enough that it makes the code a lot cleaner
+// to break it out from Cluster.
+class _ClusterSendQueue
+{
+public:
+ _ClusterSendQueue() :
+ _poolCount(0) {}
+ ~_ClusterSendQueue() {} // memory is automatically freed when _chunks is destroyed
+
+ inline void enqueue(uint64_t now,const Address &from,const Address &to,const void *data,unsigned int len,bool unite)
+ {
+ if (len > ZT_CLUSTER_SEND_QUEUE_DATA_MAX)
+ return;
+
+ Mutex::Lock _l(_lock);
+
+ // Delete oldest queue entry for this sender if this enqueue() would take them over the per-sender limit
+ {
+ std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(from,(_ClusterSendQueueEntry *)0)));
+ std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator oldest(qi);
+ unsigned long countForSender = 0;
+ while ((qi != _bySrc.end())&&(qi->first == from)) {
+ if (qi->second->timestamp < oldest->second->timestamp)
+ oldest = qi;
+ ++countForSender;
+ ++qi;
+ }
+ if (countForSender >= ZT_CLUSTER_MAX_QUEUE_PER_SENDER) {
+ _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(oldest->second->toPeerAddress,oldest->second));
+ _pool[_poolCount++] = oldest->second;
+ _bySrc.erase(oldest);
+ }
+ }
+
+ _ClusterSendQueueEntry *e;
+ if (_poolCount > 0) {
+ e = _pool[--_poolCount];
+ } else {
+ if (_chunks.size() >= ZT_CLUSTER_MAX_QUEUE_CHUNKS)
+ return; // queue is totally full!
+ _chunks.push_back(Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE>());
+ e = &(_chunks.back().data[0]);
+ for(unsigned int i=1;i<ZT_CLUSTER_QUEUE_CHUNK_SIZE;++i)
+ _pool[_poolCount++] = &(_chunks.back().data[i]);
+ }
+
+ e->timestamp = now;
+ e->fromPeerAddress = from;
+ e->toPeerAddress = to;
+ memcpy(e->data,data,len);
+ e->len = len;
+ e->unite = unite;
+
+ _bySrc.insert(std::pair<Address,_ClusterSendQueueEntry *>(from,e));
+ _byDest.insert(std::pair<Address,_ClusterSendQueueEntry *>(to,e));
+ }
+
+ inline void expire(uint64_t now)
+ {
+ Mutex::Lock _l(_lock);
+ for(std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_bySrc.begin());qi!=_bySrc.end();) {
+ if ((now - qi->second->timestamp) > ZT_CLUSTER_QUEUE_EXPIRATION) {
+ _byDest.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->toPeerAddress,qi->second));
+ _pool[_poolCount++] = qi->second;
+ _bySrc.erase(qi++);
+ } else ++qi;
+ }
+ }
+
+ /**
+ * Get and dequeue entries for a given destination address
+ *
+ * After use these entries must be returned with returnToPool()!
+ *
+ * @param dest Destination address
+ * @param results Array to fill with results
+ * @param maxResults Size of results[] in pointers
+ * @return Number of actual results returned
+ */
+ inline unsigned int getByDest(const Address &dest,_ClusterSendQueueEntry **results,unsigned int maxResults)
+ {
+ unsigned int count = 0;
+ Mutex::Lock _l(_lock);
+ std::set< std::pair<Address,_ClusterSendQueueEntry *> >::iterator qi(_byDest.lower_bound(std::pair<Address,_ClusterSendQueueEntry *>(dest,(_ClusterSendQueueEntry *)0)));
+ while ((qi != _byDest.end())&&(qi->first == dest)) {
+ _bySrc.erase(std::pair<Address,_ClusterSendQueueEntry *>(qi->second->fromPeerAddress,qi->second));
+ results[count++] = qi->second;
+ if (count == maxResults)
+ break;
+ _byDest.erase(qi++);
+ }
+ return count;
+ }
+
+ /**
+ * Return entries to pool after use
+ *
+ * @param entries Array of entries
+ * @param count Number of entries
+ */
+ inline void returnToPool(_ClusterSendQueueEntry **entries,unsigned int count)
+ {
+ Mutex::Lock _l(_lock);
+ for(unsigned int i=0;i<count;++i)
+ _pool[_poolCount++] = entries[i];
+ }
+
+private:
+ std::list< Array<_ClusterSendQueueEntry,ZT_CLUSTER_QUEUE_CHUNK_SIZE> > _chunks;
+ _ClusterSendQueueEntry *_pool[ZT_CLUSTER_QUEUE_CHUNK_SIZE * ZT_CLUSTER_MAX_QUEUE_CHUNKS];
+ unsigned long _poolCount;
+ std::set< std::pair<Address,_ClusterSendQueueEntry *> > _bySrc;
+ std::set< std::pair<Address,_ClusterSendQueueEntry *> > _byDest;
+ Mutex _lock;
+};
+
+Cluster::Cluster(
+ const RuntimeEnvironment *renv,
+ uint16_t id,
+ const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
+ int32_t x,
+ int32_t y,
+ int32_t z,
+ void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
+ void *sendFunctionArg,
+ int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
+ void *addressToLocationFunctionArg) :
+ RR(renv),
+ _sendQueue(new _ClusterSendQueue()),
+ _sendFunction(sendFunction),
+ _sendFunctionArg(sendFunctionArg),
+ _addressToLocationFunction(addressToLocationFunction),
+ _addressToLocationFunctionArg(addressToLocationFunctionArg),
+ _x(x),
+ _y(y),
+ _z(z),
+ _id(id),
+ _zeroTierPhysicalEndpoints(zeroTierPhysicalEndpoints),
+ _members(new _Member[ZT_CLUSTER_MAX_MEMBERS]),
+ _lastFlushed(0),
+ _lastCleanedRemotePeers(0),
+ _lastCleanedQueue(0)
+{
+ uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
+
+ // Generate master secret by hashing the secret from our Identity key pair
+ RR->identity.sha512PrivateKey(_masterSecret);
+
+ // Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
+ memcpy(stmp,_masterSecret,sizeof(stmp));
+ stmp[0] ^= Utils::hton(id);
+ SHA512::hash(stmp,stmp,sizeof(stmp));
+ SHA512::hash(stmp,stmp,sizeof(stmp));
+ memcpy(_key,stmp,sizeof(_key));
+ Utils::burn(stmp,sizeof(stmp));
+}
+
+Cluster::~Cluster()
+{
+ Utils::burn(_masterSecret,sizeof(_masterSecret));
+ Utils::burn(_key,sizeof(_key));
+ delete [] _members;
+ delete _sendQueue;
+}
+
+void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
+{
+ Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
+ {
+ // FORMAT: <[16] iv><[8] MAC><... data>
+ if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
+ return;
+
+ // 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
+ char keytmp[32];
+ memcpy(keytmp,_key,32);
+ for(int i=0;i<8;++i)
+ keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
+ Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
+ Utils::burn(keytmp,sizeof(keytmp));
+
+ // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
+ char polykey[ZT_POLY1305_KEY_LEN];
+ memset(polykey,0,sizeof(polykey));
+ s20.encrypt12(polykey,polykey,sizeof(polykey));
+
+ // Compute 16-byte MAC
+ char mac[ZT_POLY1305_MAC_LEN];
+ Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
+
+ // Check first 8 bytes of MAC against 64-bit MAC in stream
+ if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
+ return;
+
+ // Decrypt!
+ dmsg.setSize(len - 24);
+ s20.decrypt12(reinterpret_cast<const char *>(msg) + 24,const_cast<void *>(dmsg.data()),dmsg.size());
+ }
+
+ if (dmsg.size() < 4)
+ return;
+ const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
+ unsigned int ptr = 2;
+ if (fromMemberId == _id) // sanity check: we don't talk to ourselves
+ return;
+ const uint16_t toMemberId = dmsg.at<uint16_t>(ptr);
+ ptr += 2;
+ if (toMemberId != _id) // sanity check: message not for us?
+ return;
+
+ { // make sure sender is actually considered a member
+ Mutex::Lock _l3(_memberIds_m);
+ if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end())
+ return;
+ }
+
+ try {
+ while (ptr < dmsg.size()) {
+ const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
+ const unsigned int nextPtr = ptr + mlen;
+ if (nextPtr > dmsg.size())
+ break;
+
+ int mtype = -1;
+ try {
+ switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
+ default:
+ break;
+
+ case CLUSTER_MESSAGE_ALIVE: {
+ _Member &m = _members[fromMemberId];
+ Mutex::Lock mlck(m.lock);
+ ptr += 7; // skip version stuff, not used yet
+ m.x = dmsg.at<int32_t>(ptr); ptr += 4;
+ m.y = dmsg.at<int32_t>(ptr); ptr += 4;
+ m.z = dmsg.at<int32_t>(ptr); ptr += 4;
+ ptr += 8; // skip local clock, not used
+ m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
+ m.peers = dmsg.at<uint64_t>(ptr); ptr += 8;
+ ptr += 8; // skip flags, unused
+#ifdef ZT_TRACE
+ std::string addrs;
+#endif
+ unsigned int physicalAddressCount = dmsg[ptr++];
+ m.zeroTierPhysicalEndpoints.clear();
+ for(unsigned int i=0;i<physicalAddressCount;++i) {
+ m.zeroTierPhysicalEndpoints.push_back(InetAddress());
+ ptr += m.zeroTierPhysicalEndpoints.back().deserialize(dmsg,ptr);
+ if (!(m.zeroTierPhysicalEndpoints.back())) {
+ m.zeroTierPhysicalEndpoints.pop_back();
+ }
+#ifdef ZT_TRACE
+ else {
+ if (addrs.length() > 0)
+ addrs.push_back(',');
+ addrs.append(m.zeroTierPhysicalEndpoints.back().toString());
+ }
+#endif
+ }
+#ifdef ZT_TRACE
+ if ((RR->node->now() - m.lastReceivedAliveAnnouncement) >= ZT_CLUSTER_TIMEOUT) {
+ TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str());
+ }
+#endif
+ m.lastReceivedAliveAnnouncement = RR->node->now();
+ } break;
+
+ case CLUSTER_MESSAGE_HAVE_PEER: {
+ Identity id;
+ ptr += id.deserialize(dmsg,ptr);
+ if (id) {
+ RR->topology->saveIdentity(id);
+
+ {
+ Mutex::Lock _l(_remotePeers_m);
+ _remotePeers[std::pair<Address,unsigned int>(id.address(),(unsigned int)fromMemberId)] = RR->node->now();
+ }
+
+ _ClusterSendQueueEntry *q[16384]; // 16384 is "tons"
+ unsigned int qc = _sendQueue->getByDest(id.address(),q,16384);
+ for(unsigned int i=0;i<qc;++i)
+ this->sendViaCluster(q[i]->fromPeerAddress,q[i]->toPeerAddress,q[i]->data,q[i]->len,q[i]->unite);
+ _sendQueue->returnToPool(q,qc);
+
+ TRACE("[%u] has %s (retried %u queued sends)",(unsigned int)fromMemberId,id.address().toString().c_str(),qc);
+ }
+ } break;
+
+ case CLUSTER_MESSAGE_WANT_PEER: {
+ const Address zeroTierAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
+ SharedPtr<Peer> peer(RR->topology->getPeerNoCache(zeroTierAddress));
+ if ( (peer) && (peer->hasClusterOptimalPath(RR->node->now())) ) {
+ Buffer<1024> buf;
+ peer->identity().serialize(buf);
+ Mutex::Lock _l2(_members[fromMemberId].lock);
+ _send(fromMemberId,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
+ }
+ } break;
+
+ case CLUSTER_MESSAGE_REMOTE_PACKET: {
+ const unsigned int plen = dmsg.at<uint16_t>(ptr); ptr += 2;
+ if (plen) {
+ Packet remotep(dmsg.field(ptr,plen),plen); ptr += plen;
+ //TRACE("remote %s from %s via %u (%u bytes)",Packet::verbString(remotep.verb()),remotep.source().toString().c_str(),fromMemberId,plen);
+ switch(remotep.verb()) {
+ case Packet::VERB_WHOIS: _doREMOTE_WHOIS(fromMemberId,remotep); break;
+ case Packet::VERB_MULTICAST_GATHER: _doREMOTE_MULTICAST_GATHER(fromMemberId,remotep); break;
+ default: break; // ignore things we don't care about across cluster
+ }
+ }
+ } break;
+
+ case CLUSTER_MESSAGE_PROXY_UNITE: {
+ const Address localPeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
+ const Address remotePeerAddress(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
+ const unsigned int numRemotePeerPaths = dmsg[ptr++];
+ InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
+ for(unsigned int i=0;i<numRemotePeerPaths;++i)
+ ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
+
+ TRACE("[%u] requested that we unite local %s with remote %s",(unsigned int)fromMemberId,localPeerAddress.toString().c_str(),remotePeerAddress.toString().c_str());
+
+ const uint64_t now = RR->node->now();
+ SharedPtr<Peer> localPeer(RR->topology->getPeerNoCache(localPeerAddress));
+ if ((localPeer)&&(numRemotePeerPaths > 0)) {
+ InetAddress bestLocalV4,bestLocalV6;
+ localPeer->getBestActiveAddresses(now,bestLocalV4,bestLocalV6);
+
+ InetAddress bestRemoteV4,bestRemoteV6;
+ for(unsigned int i=0;i<numRemotePeerPaths;++i) {
+ if ((bestRemoteV4)&&(bestRemoteV6))
+ break;
+ switch(remotePeerPaths[i].ss_family) {
+ case AF_INET:
+ if (!bestRemoteV4)
+ bestRemoteV4 = remotePeerPaths[i];
+ break;
+ case AF_INET6:
+ if (!bestRemoteV6)
+ bestRemoteV6 = remotePeerPaths[i];
+ break;
+ }
+ }
+
+ Packet rendezvousForLocal(localPeerAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
+ rendezvousForLocal.append((uint8_t)0);
+ remotePeerAddress.appendTo(rendezvousForLocal);
+
+ Buffer<2048> rendezvousForRemote;
+ remotePeerAddress.appendTo(rendezvousForRemote);
+ rendezvousForRemote.append((uint8_t)Packet::VERB_RENDEZVOUS);
+ rendezvousForRemote.addSize(2); // space for actual packet payload length
+ rendezvousForRemote.append((uint8_t)0); // flags == 0
+ localPeerAddress.appendTo(rendezvousForRemote);
+
+ bool haveMatch = false;
+ if ((bestLocalV6)&&(bestRemoteV6)) {
+ haveMatch = true;
+
+ rendezvousForLocal.append((uint16_t)bestRemoteV6.port());
+ rendezvousForLocal.append((uint8_t)16);
+ rendezvousForLocal.append(bestRemoteV6.rawIpData(),16);
+
+ rendezvousForRemote.append((uint16_t)bestLocalV6.port());
+ rendezvousForRemote.append((uint8_t)16);
+ rendezvousForRemote.append(bestLocalV6.rawIpData(),16);
+ rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 16));
+ } else if ((bestLocalV4)&&(bestRemoteV4)) {
+ haveMatch = true;
+
+ rendezvousForLocal.append((uint16_t)bestRemoteV4.port());
+ rendezvousForLocal.append((uint8_t)4);
+ rendezvousForLocal.append(bestRemoteV4.rawIpData(),4);
+
+ rendezvousForRemote.append((uint16_t)bestLocalV4.port());
+ rendezvousForRemote.append((uint8_t)4);
+ rendezvousForRemote.append(bestLocalV4.rawIpData(),4);
+ rendezvousForRemote.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(9 + 4));
+ }
+
+ if (haveMatch) {
+ {
+ Mutex::Lock _l2(_members[fromMemberId].lock);
+ _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,rendezvousForRemote.data(),rendezvousForRemote.size());
+ }
+ RR->sw->send(rendezvousForLocal,true,0);
+ }
+ }
+ } break;
+
+ case CLUSTER_MESSAGE_PROXY_SEND: {
+ const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
+ const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
+ const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
+ Packet outp(rcpt,RR->identity.address(),verb);
+ outp.append(dmsg.field(ptr,len),len); ptr += len;
+ RR->sw->send(outp,true,0);
+ //TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len);
+ } break;
+ }
+ } catch ( ... ) {
+ TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
+ // drop invalids
+ }
+
+ ptr = nextPtr;
+ }
+ } catch ( ... ) {
+ TRACE("invalid message (outer loop), discarding");
+ // drop invalids
+ }
+}
+
+void Cluster::broadcastHavePeer(const Identity &id)
+{
+ Buffer<1024> buf;
+ id.serialize(buf);
+ Mutex::Lock _l(_memberIds_m);
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ Mutex::Lock _l2(_members[*mid].lock);
+ _send(*mid,CLUSTER_MESSAGE_HAVE_PEER,buf.data(),buf.size());
+ }
+}
+
+void Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite)
+{
+ if (len > ZT_PROTO_MAX_PACKET_LENGTH) // sanity check
+ return;
+
+ const uint64_t now = RR->node->now();
+
+ uint64_t mostRecentTs = 0;
+ unsigned int mostRecentMemberId = 0xffffffff;
+ {
+ Mutex::Lock _l2(_remotePeers_m);
+ std::map< std::pair<Address,unsigned int>,uint64_t >::const_iterator rpe(_remotePeers.lower_bound(std::pair<Address,unsigned int>(toPeerAddress,0)));
+ for(;;) {
+ if ((rpe == _remotePeers.end())||(rpe->first.first != toPeerAddress))
+ break;
+ else if (rpe->second > mostRecentTs) {
+ mostRecentTs = rpe->second;
+ mostRecentMemberId = rpe->first.second;
+ }
+ ++rpe;
+ }
+ }
+
+ const uint64_t age = now - mostRecentTs;
+ if (age >= (ZT_PEER_ACTIVITY_TIMEOUT / 3)) {
+ const bool enqueueAndWait = ((age >= ZT_PEER_ACTIVITY_TIMEOUT)||(mostRecentMemberId > 0xffff));
+
+ // Poll everyone with WANT_PEER if the age of our most recent entry is
+ // approaching expiration (or has expired, or does not exist).
+ char tmp[ZT_ADDRESS_LENGTH];
+ toPeerAddress.copyTo(tmp,ZT_ADDRESS_LENGTH);
+ {
+ Mutex::Lock _l(_memberIds_m);
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ Mutex::Lock _l2(_members[*mid].lock);
+ _send(*mid,CLUSTER_MESSAGE_WANT_PEER,tmp,ZT_ADDRESS_LENGTH);
+ }
+ }
+
+ // If there isn't a good place to send via, then enqueue this for retrying
+ // later and return after having broadcasted a WANT_PEER.
+ if (enqueueAndWait) {
+ TRACE("sendViaCluster %s -> %s enqueueing to wait for HAVE_PEER",fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str());
+ _sendQueue->enqueue(now,fromPeerAddress,toPeerAddress,data,len,unite);
+ return;
+ }
+ }
+
+ Buffer<1024> buf;
+ if (unite) {
+ InetAddress v4,v6;
+ if (fromPeerAddress) {
+ SharedPtr<Peer> fromPeer(RR->topology->getPeerNoCache(fromPeerAddress));
+ if (fromPeer)
+ fromPeer->getBestActiveAddresses(now,v4,v6);
+ }
+ uint8_t addrCount = 0;
+ if (v4)
+ ++addrCount;
+ if (v6)
+ ++addrCount;
+ if (addrCount) {
+ toPeerAddress.appendTo(buf);
+ fromPeerAddress.appendTo(buf);
+ buf.append(addrCount);
+ if (v4)
+ v4.serialize(buf);
+ if (v6)
+ v6.serialize(buf);
+ }
+ }
+
+ {
+ Mutex::Lock _l2(_members[mostRecentMemberId].lock);
+ if (buf.size() > 0)
+ _send(mostRecentMemberId,CLUSTER_MESSAGE_PROXY_UNITE,buf.data(),buf.size());
+ if (_members[mostRecentMemberId].zeroTierPhysicalEndpoints.size() > 0) {
+ TRACE("sendViaCluster relaying %u bytes from %s to %s by way of %u",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)mostRecentMemberId);
+ RR->node->putPacket(InetAddress(),_members[mostRecentMemberId].zeroTierPhysicalEndpoints.front(),data,len);
+ }
+ }
+}
+
+void Cluster::sendDistributedQuery(const Packet &pkt)
+{
+ Buffer<4096> buf;
+ buf.append((uint16_t)pkt.size());
+ buf.append(pkt.data(),pkt.size());
+ Mutex::Lock _l(_memberIds_m);
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ Mutex::Lock _l2(_members[*mid].lock);
+ _send(*mid,CLUSTER_MESSAGE_REMOTE_PACKET,buf.data(),buf.size());
+ }
+}
+
+void Cluster::doPeriodicTasks()
+{
+ const uint64_t now = RR->node->now();
+
+ if ((now - _lastFlushed) >= ZT_CLUSTER_FLUSH_PERIOD) {
+ _lastFlushed = now;
+
+ Mutex::Lock _l(_memberIds_m);
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ Mutex::Lock _l2(_members[*mid].lock);
+
+ if ((now - _members[*mid].lastAnnouncedAliveTo) >= ((ZT_CLUSTER_TIMEOUT / 2) - 1000)) {
+ _members[*mid].lastAnnouncedAliveTo = now;
+
+ Buffer<2048> alive;
+ alive.append((uint16_t)ZEROTIER_ONE_VERSION_MAJOR);
+ alive.append((uint16_t)ZEROTIER_ONE_VERSION_MINOR);
+ alive.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
+ alive.append((uint8_t)ZT_PROTO_VERSION);
+ if (_addressToLocationFunction) {
+ alive.append((int32_t)_x);
+ alive.append((int32_t)_y);
+ alive.append((int32_t)_z);
+ } else {
+ alive.append((int32_t)0);
+ alive.append((int32_t)0);
+ alive.append((int32_t)0);
+ }
+ alive.append((uint64_t)now);
+ alive.append((uint64_t)0); // TODO: compute and send load average
+ alive.append((uint64_t)RR->topology->countActive(now));
+ alive.append((uint64_t)0); // unused/reserved flags
+ alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
+ for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
+ pe->serialize(alive);
+ _send(*mid,CLUSTER_MESSAGE_ALIVE,alive.data(),alive.size());
+ }
+
+ _flush(*mid);
+ }
+ }
+
+ if ((now - _lastCleanedRemotePeers) >= (ZT_PEER_ACTIVITY_TIMEOUT * 2)) {
+ _lastCleanedRemotePeers = now;
+
+ Mutex::Lock _l(_remotePeers_m);
+ for(std::map< std::pair<Address,unsigned int>,uint64_t >::iterator rp(_remotePeers.begin());rp!=_remotePeers.end();) {
+ if ((now - rp->second) >= ZT_PEER_ACTIVITY_TIMEOUT)
+ _remotePeers.erase(rp++);
+ else ++rp;
+ }
+ }
+
+ if ((now - _lastCleanedQueue) >= ZT_CLUSTER_QUEUE_EXPIRATION) {
+ _lastCleanedQueue = now;
+ _sendQueue->expire(now);
+ }
+}
+
+void Cluster::addMember(uint16_t memberId)
+{
+ if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id))
+ return;
+
+ Mutex::Lock _l2(_members[memberId].lock);
+
+ {
+ Mutex::Lock _l(_memberIds_m);
+ if (std::find(_memberIds.begin(),_memberIds.end(),memberId) != _memberIds.end())
+ return;
+ _memberIds.push_back(memberId);
+ std::sort(_memberIds.begin(),_memberIds.end());
+ }
+
+ _members[memberId].clear();
+
+ // Generate this member's message key from the master and its ID
+ uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
+ memcpy(stmp,_masterSecret,sizeof(stmp));
+ stmp[0] ^= Utils::hton(memberId);
+ SHA512::hash(stmp,stmp,sizeof(stmp));
+ SHA512::hash(stmp,stmp,sizeof(stmp));
+ memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
+ Utils::burn(stmp,sizeof(stmp));
+
+ // Prepare q
+ _members[memberId].q.clear();
+ char iv[16];
+ Utils::getSecureRandom(iv,16);
+ _members[memberId].q.append(iv,16);
+ _members[memberId].q.addSize(8); // room for MAC
+ _members[memberId].q.append((uint16_t)_id);
+ _members[memberId].q.append((uint16_t)memberId);
+}
+
+void Cluster::removeMember(uint16_t memberId)
+{
+ Mutex::Lock _l(_memberIds_m);
+ std::vector<uint16_t> newMemberIds;
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ if (*mid != memberId)
+ newMemberIds.push_back(*mid);
+ }
+ _memberIds = newMemberIds;
+}
+
+bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload)
+{
+ if (_addressToLocationFunction) {
+ // Pick based on location if it can be determined
+ int px = 0,py = 0,pz = 0;
+ if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
+ TRACE("no geolocation data for %s (geo-lookup is lazy/async so it may work next time)",peerPhysicalAddress.toIpString().c_str());
+ return false;
+ }
+
+ // Find member closest to this peer
+ const uint64_t now = RR->node->now();
+ std::vector<InetAddress> best;
+ const double currentDistance = _dist3d(_x,_y,_z,px,py,pz);
+ double bestDistance = (offload ? 2147483648.0 : currentDistance);
+ unsigned int bestMember = _id;
+ {
+ Mutex::Lock _l(_memberIds_m);
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ _Member &m = _members[*mid];
+ Mutex::Lock _ml(m.lock);
+
+ // Consider member if it's alive and has sent us a location and one or more physical endpoints to send peers to
+ if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) {
+ const double mdist = _dist3d(m.x,m.y,m.z,px,py,pz);
+ if (mdist < bestDistance) {
+ bestDistance = mdist;
+ bestMember = *mid;
+ best = m.zeroTierPhysicalEndpoints;
+ }
+ }
+ }
+ }
+
+ // Redirect to a closer member if it has a ZeroTier endpoint address in the same ss_family
+ for(std::vector<InetAddress>::const_iterator a(best.begin());a!=best.end();++a) {
+ if (a->ss_family == peerPhysicalAddress.ss_family) {
+ TRACE("%s at [%d,%d,%d] is %f from us but %f from %u, can redirect to %s",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestDistance,bestMember,a->toString().c_str());
+ redirectTo = *a;
+ return true;
+ }
+ }
+ TRACE("%s at [%d,%d,%d] is %f from us, no better endpoints found",peerAddress.toString().c_str(),px,py,pz,currentDistance);
+ return false;
+ } else {
+ // TODO: pick based on load if no location info?
+ return false;
+ }
+}
+
+void Cluster::status(ZT_ClusterStatus &status) const
+{
+ const uint64_t now = RR->node->now();
+ memset(&status,0,sizeof(ZT_ClusterStatus));
+
+ status.myId = _id;
+
+ {
+ ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
+ s->id = _id;
+ s->alive = 1;
+ s->x = _x;
+ s->y = _y;
+ s->z = _z;
+ s->load = 0; // TODO
+ s->peers = RR->topology->countActive(now);
+ for(std::vector<InetAddress>::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) {
+ if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
+ break;
+ memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
+ }
+ }
+
+ {
+ Mutex::Lock _l1(_memberIds_m);
+ for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
+ if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check
+ break;
+
+ _Member &m = _members[*mid];
+ Mutex::Lock ml(m.lock);
+
+ ZT_ClusterMemberStatus *const s = &(status.members[status.clusterSize++]);
+ s->id = *mid;
+ s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement));
+ s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0;
+ s->x = m.x;
+ s->y = m.y;
+ s->z = m.z;
+ s->load = m.load;
+ s->peers = m.peers;
+ for(std::vector<InetAddress>::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) {
+ if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check
+ break;
+ memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage));
+ }
+ }
+ }
+}
+
+void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
+{
+ if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check
+ return;
+ _Member &m = _members[memberId];
+ // assumes m.lock is locked!
+ if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
+ _flush(memberId);
+ m.q.append((uint16_t)(len + 1));
+ m.q.append((uint8_t)type);
+ m.q.append(msg,len);
+}
+
+void Cluster::_flush(uint16_t memberId)
+{
+ _Member &m = _members[memberId];
+ // assumes m.lock is locked!
+ if (m.q.size() > (24 + 2 + 2)) { // 16-byte IV + 8-byte MAC + 2 byte from-member-ID + 2 byte to-member-ID
+ // Create key from member's key and IV
+ char keytmp[32];
+ memcpy(keytmp,m.key,32);
+ for(int i=0;i<8;++i)
+ keytmp[i] ^= m.q[i];
+ Salsa20 s20(keytmp,256,m.q.field(8,8));
+ Utils::burn(keytmp,sizeof(keytmp));
+
+ // One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
+ char polykey[ZT_POLY1305_KEY_LEN];
+ memset(polykey,0,sizeof(polykey));
+ s20.encrypt12(polykey,polykey,sizeof(polykey));
+
+ // Encrypt m.q in place
+ s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
+
+ // Add MAC for authentication (encrypt-then-MAC)
+ char mac[ZT_POLY1305_MAC_LEN];
+ Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
+ memcpy(m.q.field(16,8),mac,8);
+
+ // Send!
+ _sendFunction(_sendFunctionArg,memberId,m.q.data(),m.q.size());
+
+ // Prepare for more
+ m.q.clear();
+ char iv[16];
+ Utils::getSecureRandom(iv,16);
+ m.q.append(iv,16);
+ m.q.addSize(8); // room for MAC
+ m.q.append((uint16_t)_id); // from member ID
+ m.q.append((uint16_t)memberId); // to member ID
+ }
+}
+
+void Cluster::_doREMOTE_WHOIS(uint64_t fromMemberId,const Packet &remotep)
+{
+ if (remotep.payloadLength() >= ZT_ADDRESS_LENGTH) {
+ Identity queried(RR->topology->getIdentity(Address(remotep.payload(),ZT_ADDRESS_LENGTH)));
+ if (queried) {
+ Buffer<1024> routp;
+ remotep.source().appendTo(routp);
+ routp.append((uint8_t)Packet::VERB_OK);
+ routp.addSize(2); // space for length
+ routp.append((uint8_t)Packet::VERB_WHOIS);
+ routp.append(remotep.packetId());
+ queried.serialize(routp);
+ routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
+
+ TRACE("responding to remote WHOIS from %s @ %u with identity of %s",remotep.source().toString().c_str(),(unsigned int)fromMemberId,queried.address().toString().c_str());
+ Mutex::Lock _l2(_members[fromMemberId].lock);
+ _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
+ }
+ }
+}
+
+void Cluster::_doREMOTE_MULTICAST_GATHER(uint64_t fromMemberId,const Packet &remotep)
+{
+ const uint64_t nwid = remotep.at<uint64_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_NETWORK_ID);
+ const MulticastGroup mg(MAC(remotep.field(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_MAC,6),6),remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_ADI));
+ unsigned int gatherLimit = remotep.at<uint32_t>(ZT_PROTO_VERB_MULTICAST_GATHER_IDX_GATHER_LIMIT);
+ const Address remotePeerAddress(remotep.source());
+
+ if (gatherLimit) {
+ Buffer<ZT_PROTO_MAX_PACKET_LENGTH> routp;
+ remotePeerAddress.appendTo(routp);
+ routp.append((uint8_t)Packet::VERB_OK);
+ routp.addSize(2); // space for length
+ routp.append((uint8_t)Packet::VERB_MULTICAST_GATHER);
+ routp.append(remotep.packetId());
+ routp.append(nwid);
+ mg.mac().appendTo(routp);
+ routp.append((uint32_t)mg.adi());
+
+ if (gatherLimit > ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5))
+ gatherLimit = ((ZT_CLUSTER_MAX_MESSAGE_LENGTH - 80) / 5);
+ if (RR->mc->gather(remotePeerAddress,nwid,mg,routp,gatherLimit)) {
+ routp.setAt<uint16_t>(ZT_ADDRESS_LENGTH + 1,(uint16_t)(routp.size() - ZT_ADDRESS_LENGTH - 3));
+
+ TRACE("responding to remote MULTICAST_GATHER from %s @ %u with %u bytes",remotePeerAddress.toString().c_str(),(unsigned int)fromMemberId,routp.size());
+ Mutex::Lock _l2(_members[fromMemberId].lock);
+ _send(fromMemberId,CLUSTER_MESSAGE_PROXY_SEND,routp.data(),routp.size());
+ }
+ }
+}
+
+} // namespace ZeroTier
+
+#endif // ZT_ENABLE_CLUSTER
diff --git a/node/Cluster.hpp b/node/Cluster.hpp
new file mode 100644
index 00000000..ccf0c12a
--- /dev/null
+++ b/node/Cluster.hpp
@@ -0,0 +1,418 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ *
+ * If you would like to embed ZeroTier into a commercial application or
+ * redistribute it in a modified binary form, please contact ZeroTier Networks
+ * LLC. Start here: http://www.zerotier.com/
+ */
+
+#ifndef ZT_CLUSTER_HPP
+#define ZT_CLUSTER_HPP
+
+#ifdef ZT_ENABLE_CLUSTER
+
+#include <map>
+
+#include "Constants.hpp"
+#include "../include/ZeroTierOne.h"
+#include "Address.hpp"
+#include "InetAddress.hpp"
+#include "SHA512.hpp"
+#include "Utils.hpp"
+#include "Buffer.hpp"
+#include "Mutex.hpp"
+#include "SharedPtr.hpp"
+#include "Hashtable.hpp"
+#include "Packet.hpp"
+#include "SharedPtr.hpp"
+
+/**
+ * Timeout for cluster members being considered "alive"
+ *
+ * A cluster member is considered dead and will no longer have peers
+ * redirected to it if we have not heard a heartbeat in this long.
+ */
+#define ZT_CLUSTER_TIMEOUT 5000
+
+/**
+ * Desired period between doPeriodicTasks() in milliseconds
+ */
+#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 50
+
+/**
+ * How often to flush outgoing message queues (maximum interval)
+ */
+#define ZT_CLUSTER_FLUSH_PERIOD 100
+
+/**
+ * Maximum number of queued outgoing packets per sender address
+ */
+#define ZT_CLUSTER_MAX_QUEUE_PER_SENDER 8
+
+/**
+ * Expiration time for send queue entries
+ */
+#define ZT_CLUSTER_QUEUE_EXPIRATION 5000
+
+/**
+ * Chunk size for allocating queue entries
+ *
+ * Queue entries are allocated in chunks of this many and are added to a pool.
+ * ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this.
+ */
+#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32
+
+/**
+ * Maximum number of chunks to ever allocate
+ *
+ * This is a global sanity limit to prevent resource exhaustion attacks. It
+ * works out to about 600mb of RAM. You'll never see this on a normal edge
+ * node. We're unlikely to see this on a root server unless someone is DOSing
+ * us. In that case cluster relaying will be affected but other functions
+ * should continue to operate normally.
+ */
+#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194
+
+/**
+ * Max data per queue entry
+ *
+ * If we ever support larger transport MTUs this must be increased. The plus
+ * 16 is just a small margin and has no special meaning.
+ */
+#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16)
+
+namespace ZeroTier {
+
+class RuntimeEnvironment;
+class MulticastGroup;
+class Peer;
+class Identity;
+
+// Internal class implemented inside Cluster.cpp
+class _ClusterSendQueue;
+
+/**
+ * Multi-homing cluster state replication and packet relaying
+ *
+ * Multi-homing means more than one node sharing the same ZeroTier identity.
+ * There is nothing in the protocol to prevent this, but to make it work well
+ * requires the devices sharing an identity to cooperate and share some
+ * information.
+ *
+ * There are three use cases we want to fulfill:
+ *
+ * (1) Multi-homing of root servers with handoff for efficient routing,
+ * HA, and load balancing across many commodity nodes.
+ * (2) Multi-homing of network controllers for the same reason.
+ * (3) Multi-homing of nodes on virtual networks, such as domain servers
+ * and other important endpoints.
+ *
+ * These use cases are in order of escalating difficulty. The initial
+ * version of Cluster is aimed at satisfying the first, though you are
+ * free to try #2 and #3.
+ */
+class Cluster
+{
+public:
+ /**
+ * State message types
+ */
+ enum StateMessageType
+ {
+ CLUSTER_MESSAGE_NOP = 0,
+
+ /**
+ * This cluster member is alive:
+ * <[2] version minor>
+ * <[2] version major>
+ * <[2] version revision>
+ * <[1] protocol version>
+ * <[4] X location (signed 32-bit)>
+ * <[4] Y location (signed 32-bit)>
+ * <[4] Z location (signed 32-bit)>
+ * <[8] local clock at this member>
+ * <[8] load average>
+ * <[8] number of peers>
+ * <[8] flags (currently unused, must be zero)>
+ * <[1] number of preferred ZeroTier endpoints>
+ * <[...] InetAddress(es) of preferred ZeroTier endpoint(s)>
+ *
+ * Cluster members constantly broadcast an alive heartbeat and will only
+ * receive peer redirects if they've done so within the timeout.
+ */
+ CLUSTER_MESSAGE_ALIVE = 1,
+
+ /**
+ * Cluster member has this peer:
+ * <[...] serialized identity of peer>
+ *
+ * This is typically sent in response to WANT_PEER but can also be pushed
+ * to prepopulate if this makes sense.
+ */
+ CLUSTER_MESSAGE_HAVE_PEER = 2,
+
+ /**
+ * Cluster member wants this peer:
+ * <[5] ZeroTier address of peer>
+ *
+ * Members that have a direct link to this peer will respond with
+ * HAVE_PEER.
+ */
+ CLUSTER_MESSAGE_WANT_PEER = 3,
+
+ /**
+ * A remote packet that we should also possibly respond to:
+ * <[2] 16-bit length of remote packet>
+ * <[...] remote packet payload>
+ *
+ * Cluster members may relay requests by relaying the request packet.
+ * These may include requests such as WHOIS and MULTICAST_GATHER. The
+ * packet must be already decrypted, decompressed, and authenticated.
+ *
+ * This can only be used for small request packets as per the cluster
+ * message size limit, but since these are the only ones in question
+ * this is fine.
+ *
+ * If a response is generated it is sent via PROXY_SEND.
+ */
+ CLUSTER_MESSAGE_REMOTE_PACKET = 4,
+
+ /**
+ * Request that VERB_RENDEZVOUS be sent to a peer that we have:
+ * <[5] ZeroTier address of peer on recipient's side>
+ * <[5] ZeroTier address of peer on sender's side>
+ * <[1] 8-bit number of sender's peer's active path addresses>
+ * <[...] series of serialized InetAddresses of sender's peer's paths>
+ *
+ * This requests that we perform NAT-t introduction between a peer that
+ * we have and one on the sender's side. The sender furnishes contact
+ * info for its peer, and we send VERB_RENDEZVOUS to both sides: to ours
+ * directly and with PROXY_SEND to theirs.
+ */
+ CLUSTER_MESSAGE_PROXY_UNITE = 5,
+
+ /**
+ * Request that a cluster member send a packet to a locally-known peer:
+ * <[5] ZeroTier address of recipient>
+ * <[1] packet verb>
+ * <[2] length of packet payload>
+ * <[...] packet payload>
+ *
+ * This differs from RELAY in that it requests the receiving cluster
+ * member to actually compose a ZeroTier Packet from itself to the
+ * provided recipient. RELAY simply says "please forward this blob."
+ * RELAY is used to implement peer-to-peer relaying with RENDEZVOUS,
+ * while PROXY_SEND is used to implement proxy sending (which right
+ * now is only used to send RENDEZVOUS).
+ */
+ CLUSTER_MESSAGE_PROXY_SEND = 6,
+
+ /**
+ * Replicate a network config for a network we belong to:
+ * <[8] 64-bit network ID>
+ * <[2] 16-bit length of network config>
+ * <[...] serialized network config>
+ *
+ * This is used by clusters to avoid every member having to query
+ * for the same netconf for networks all members belong to.
+ *
+ * TODO: not implemented yet!
+ */
+ CLUSTER_MESSAGE_NETWORK_CONFIG = 7
+ };
+
+ /**
+ * Construct a new cluster
+ */
+ Cluster(
+ const RuntimeEnvironment *renv,
+ uint16_t id,
+ const std::vector<InetAddress> &zeroTierPhysicalEndpoints,
+ int32_t x,
+ int32_t y,
+ int32_t z,
+ void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
+ void *sendFunctionArg,
+ int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
+ void *addressToLocationFunctionArg);
+
+ ~Cluster();
+
+ /**
+ * @return This cluster member's ID
+ */
+ inline uint16_t id() const throw() { return _id; }
+
+ /**
+ * Handle an incoming intra-cluster message
+ *
+ * @param data Message data
+ * @param len Message length (max: ZT_CLUSTER_MAX_MESSAGE_LENGTH)
+ */
+ void handleIncomingStateMessage(const void *msg,unsigned int len);
+
+ /**
+ * Broadcast that we have a given peer
+ *
+ * This should be done when new peers are first contacted.
+ *
+ * @param id Identity of peer
+ */
+ void broadcastHavePeer(const Identity &id);
+
+ /**
+ * Send this packet via another node in this cluster if another node has this peer
+ *
+ * This is used in the outgoing packet and relaying logic in Switch to
+ * relay packets to other cluster members. It isn't PROXY_SEND-- that is
+ * used internally in Cluster to send responses to peer queries.
+ *
+ * @param fromPeerAddress Source peer address (if known, should be NULL for fragments)
+ * @param toPeerAddress Destination peer address
+ * @param data Packet or packet fragment data
+ * @param len Length of packet or fragment
+ * @param unite If true, also request proxy unite across cluster
+ */
+ void sendViaCluster(const Address &fromPeerAddress,const Address &toPeerAddress,const void *data,unsigned int len,bool unite);
+
+ /**
+ * Send a distributed query to other cluster members
+ *
+ * Some queries such as WHOIS or MULTICAST_GATHER need a response from other
+ * cluster members. Replies (if any) will be sent back to the peer via
+ * PROXY_SEND across the cluster.
+ *
+ * @param pkt Packet to distribute
+ */
+ void sendDistributedQuery(const Packet &pkt);
+
+ /**
+ * Call every ~ZT_CLUSTER_PERIODIC_TASK_PERIOD milliseconds.
+ */
+ void doPeriodicTasks();
+
+ /**
+ * Add a member ID to this cluster
+ *
+ * @param memberId Member ID
+ */
+ void addMember(uint16_t memberId);
+
+ /**
+ * Remove a member ID from this cluster
+ *
+ * @param memberId Member ID to remove
+ */
+ void removeMember(uint16_t memberId);
+
+ /**
+ * Find a better cluster endpoint for this peer (if any)
+ *
+ * @param redirectTo InetAddress to be set to a better endpoint (if there is one)
+ * @param peerAddress Address of peer to (possibly) redirect
+ * @param peerPhysicalAddress Physical address of peer's current best path (where packet was most recently received or getBestPath()->address())
+ * @param offload Always redirect if possible -- can be used to offload peers during shutdown
+ * @return True if redirectTo was set to a new address, false if redirectTo was not modified
+ */
+ bool findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload);
+
+ /**
+ * Fill out ZT_ClusterStatus structure (from core API)
+ *
+ * @param status Reference to structure to hold result (anything there is replaced)
+ */
+ void status(ZT_ClusterStatus &status) const;
+
+private:
+ void _send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len);
+ void _flush(uint16_t memberId);
+
+ void _doREMOTE_WHOIS(uint64_t fromMemberId,const Packet &remotep);
+ void _doREMOTE_MULTICAST_GATHER(uint64_t fromMemberId,const Packet &remotep);
+
+ // These are initialized in the constructor and remain immutable ------------
+ uint16_t _masterSecret[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
+ unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH];
+ const RuntimeEnvironment *RR;
+ _ClusterSendQueue *const _sendQueue;
+ void (*_sendFunction)(void *,unsigned int,const void *,unsigned int);
+ void *_sendFunctionArg;
+ int (*_addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *);
+ void *_addressToLocationFunctionArg;
+ const int32_t _x;
+ const int32_t _y;
+ const int32_t _z;
+ const uint16_t _id;
+ const std::vector<InetAddress> _zeroTierPhysicalEndpoints;
+ // end immutable fields -----------------------------------------------------
+
+ struct _Member
+ {
+ unsigned char key[ZT_PEER_SECRET_KEY_LENGTH];
+
+ uint64_t lastReceivedAliveAnnouncement;
+ uint64_t lastAnnouncedAliveTo;
+
+ uint64_t load;
+ uint64_t peers;
+ int32_t x,y,z;
+
+ std::vector<InetAddress> zeroTierPhysicalEndpoints;
+
+ Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> q;
+
+ Mutex lock;
+
+ inline void clear()
+ {
+ lastReceivedAliveAnnouncement = 0;
+ lastAnnouncedAliveTo = 0;
+ load = 0;
+ peers = 0;
+ x = 0;
+ y = 0;
+ z = 0;
+ zeroTierPhysicalEndpoints.clear();
+ q.clear();
+ }
+
+ _Member() { this->clear(); }
+ ~_Member() { Utils::burn(key,sizeof(key)); }
+ };
+ _Member *const _members;
+
+ std::vector<uint16_t> _memberIds;
+ Mutex _memberIds_m;
+
+ std::map< std::pair<Address,unsigned int>,uint64_t > _remotePeers; // we need ordered behavior and lower_bound here
+ Mutex _remotePeers_m;
+
+ uint64_t _lastFlushed;
+ uint64_t _lastCleanedRemotePeers;
+ uint64_t _lastCleanedQueue;
+};
+
+} // namespace ZeroTier
+
+#endif // ZT_ENABLE_CLUSTER
+
+#endif
diff --git a/node/Constants.hpp b/node/Constants.hpp
index 6e9f5b15..220d78ed 100644
--- a/node/Constants.hpp
+++ b/node/Constants.hpp
@@ -173,16 +173,11 @@
/**
* Timeout for receipt of fragmented packets in ms
- *
- * Since there's no retransmits, this is just a really bad case scenario for
- * transit time. It's short enough that a DOS attack from exhausing buffers is
- * very unlikely, as the transfer rate would have to be fast enough to fill
- * system memory in this time.
*/
-#define ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT 1000
+#define ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT 500
/**
- * Length of secret key in bytes -- 256-bit for Salsa20
+ * Length of secret key in bytes -- 256-bit -- do not change
*/
#define ZT_PEER_SECRET_KEY_LENGTH 32
@@ -194,7 +189,7 @@
/**
* Overriding granularity for timer tasks to prevent CPU-intensive thrashing on every packet
*/
-#define ZT_CORE_TIMER_TASK_GRANULARITY 1000
+#define ZT_CORE_TIMER_TASK_GRANULARITY 500
/**
* How long to remember peer records in RAM if they haven't been used
@@ -204,7 +199,7 @@
/**
* Delay between WHOIS retries in ms
*/
-#define ZT_WHOIS_RETRY_DELAY 500
+#define ZT_WHOIS_RETRY_DELAY 1000
/**
* Maximum identity WHOIS retries (each attempt tries consulting a different peer)
@@ -264,33 +259,22 @@
* This is also how often pings will be retried to upstream peers (relays, roots)
* constantly until something is heard.
*/
-#define ZT_PING_CHECK_INVERVAL 6250
+#define ZT_PING_CHECK_INVERVAL 9500
/**
* Delay between ordinary case pings of direct links
*/
-#define ZT_PEER_DIRECT_PING_DELAY 120000
-
-/**
- * Delay between requests for updated network autoconf information
- */
-#define ZT_NETWORK_AUTOCONF_DELAY 60000
+#define ZT_PEER_DIRECT_PING_DELAY 60000
/**
* Timeout for overall peer activity (measured from last receive)
*/
-#define ZT_PEER_ACTIVITY_TIMEOUT (ZT_PEER_DIRECT_PING_DELAY + (ZT_PING_CHECK_INVERVAL * 3))
+#define ZT_PEER_ACTIVITY_TIMEOUT ((ZT_PEER_DIRECT_PING_DELAY * 4) + ZT_PING_CHECK_INVERVAL)
/**
- * Stop relaying via peers that have not responded to direct sends
- *
- * When we send something (including frames), we generally expect a response.
- * Switching relays if no response in a short period of time causes more
- * rapid failover if a root server goes down or becomes unreachable. In the
- * mistaken case, little harm is done as it'll pick the next-fastest
- * root server and will switch back eventually.
+ * Delay between requests for updated network autoconf information
*/
-#define ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD 10000
+#define ZT_NETWORK_AUTOCONF_DELAY 60000
/**
* Minimum interval between attempts by relays to unite peers
@@ -299,7 +283,7 @@
* a RENDEZVOUS message no more than this often. This instructs the peers
* to attempt NAT-t and gives each the other's corresponding IP:port pair.
*/
-#define ZT_MIN_UNITE_INTERVAL 60000
+#define ZT_MIN_UNITE_INTERVAL 30000
/**
* Delay between initial direct NAT-t packet and more aggressive techniques
@@ -310,19 +294,9 @@
#define ZT_NAT_T_TACTICAL_ESCALATION_DELAY 1000
/**
- * Size of anti-recursion history (see AntiRecursion.hpp)
- */
-#define ZT_ANTIRECURSION_HISTORY_SIZE 16
-
-/**
* Minimum delay between attempts to confirm new paths to peers (to avoid HELLO flooding)
*/
-#define ZT_MIN_PATH_CONFIRMATION_INTERVAL 5000
-
-/**
- * Interval between direct path pushes in milliseconds
- */
-#define ZT_DIRECT_PATH_PUSH_INTERVAL 300000
+#define ZT_MIN_PATH_CONFIRMATION_INTERVAL 1000
/**
* How long (max) to remember network certificates of membership?
@@ -348,6 +322,30 @@
#define ZT_MAX_BRIDGE_SPAM 16
/**
+ * Interval between direct path pushes in milliseconds
+ */
+#define ZT_DIRECT_PATH_PUSH_INTERVAL 120000
+
+/**
+ * Time horizon for push direct paths cutoff
+ */
+#define ZT_PUSH_DIRECT_PATHS_CUTOFF_TIME 60000
+
+/**
+ * Maximum number of direct path pushes within cutoff time
+ *
+ * This limits response to PUSH_DIRECT_PATHS to CUTOFF_LIMIT responses
+ * per CUTOFF_TIME milliseconds per peer to prevent this from being
+ * useful for DOS amplification attacks.
+ */
+#define ZT_PUSH_DIRECT_PATHS_CUTOFF_LIMIT 5
+
+/**
+ * Maximum number of paths per IP scope (e.g. global, link-local) and family (e.g. v4/v6)
+ */
+#define ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY 1
+
+/**
* A test pseudo-network-ID that can be joined
*
* Joining this network ID will result in a network with no IP addressing
diff --git a/node/Defaults.cpp b/node/Defaults.cpp
deleted file mode 100644
index b311fb6a..00000000
--- a/node/Defaults.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * ZeroTier One - Network Virtualization Everywhere
- * Copyright (C) 2011-2015 ZeroTier, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * --
- *
- * ZeroTier may be used and distributed under the terms of the GPLv3, which
- * are available at: http://www.gnu.org/licenses/gpl-3.0.html
- *
- * If you would like to embed ZeroTier into a commercial application or
- * redistribute it in a modified binary form, please contact ZeroTier Networks
- * LLC. Start here: http://www.zerotier.com/
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "../include/ZeroTierOne.h"
-
-#include "Constants.hpp"
-#include "Defaults.hpp"
-#include "Utils.hpp"
-
-// bin2c'd signed default root topology dictionary
-#include "../root-topology/ZT_DEFAULT_ROOT_TOPOLOGY.c"
-
-#ifdef __WINDOWS__
-#include <WinSock2.h>
-#include <Windows.h>
-#include <ShlObj.h>
-#endif
-
-namespace ZeroTier {
-
-const Defaults ZT_DEFAULTS;
-
-static inline std::map< Address,Identity > _mkRootTopologyAuth()
-{
- std::map< Address,Identity > ua;
-
- { // 0001
- Identity id("77792b1c02:0:b5c361e8e9c2154e82c3e902fdfc337468b092a7c4d8dc685c37eb10ee4f3c17cc0bb1d024167e8cb0824d12263428373582da3d0a9a14b36e4546c317e811e6");
- ua[id.address()] = id;
- }
- { // 0002
- Identity id("86921e6de1:0:9ba04f9f12ed54ef567f548cb69d31e404537d7b0ee000c63f3d7c8d490a1a47a5a5b2af0cbe12d23f9194270593f298d936d7c872612ea509ef1c67ce2c7fc1");
- ua[id.address()] = id;
- }
- { // 0003
- Identity id("90302b7025:0:358154a57af1b7afa07d0d91b69b92eaad2f11ade7f02343861f0c1b757d15626e8cb7f08fc52993d2202a39cbf5128c5647ee8c63d27d92db5a1d0fbe1eba19");
- ua[id.address()] = id;
- }
- { // 0004
- Identity id("e5174078ee:0:c3f90daa834a74ee47105f5726ae2e29fc8ae0e939c9326788b52b16d847354de8de3b13a81896bbb509b91e1da21763073a30bbfb2b8e994550798d30a2d709");
- ua[id.address()] = id;
- }
-
- return ua;
-}
-
-Defaults::Defaults() :
- defaultRootTopology((const char *)ZT_DEFAULT_ROOT_TOPOLOGY,ZT_DEFAULT_ROOT_TOPOLOGY_LEN),
- rootTopologyAuthorities(_mkRootTopologyAuth()),
- v4Broadcast(((uint32_t)0xffffffff),ZT_DEFAULT_PORT)
-{
-}
-
-} // namespace ZeroTier
diff --git a/node/DeferredPackets.cpp b/node/DeferredPackets.cpp
new file mode 100644
index 00000000..923e1339
--- /dev/null
+++ b/node/DeferredPackets.cpp
@@ -0,0 +1,95 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ *
+ * If you would like to embed ZeroTier into a commercial application or
+ * redistribute it in a modified binary form, please contact ZeroTier Networks
+ * LLC. Start here: http://www.zerotier.com/
+ */
+
+#include "Constants.hpp"
+#include "DeferredPackets.hpp"
+#include "IncomingPacket.hpp"
+#include "RuntimeEnvironment.hpp"
+#include "Node.hpp"
+
+namespace ZeroTier {
+
+DeferredPackets::DeferredPackets(const RuntimeEnvironment *renv) :
+ RR(renv),
+ _readPtr(0),
+ _writePtr(0),
+ _die(false)
+{
+}
+
+DeferredPackets::~DeferredPackets()
+{
+ _q_m.lock();
+ _die = true;
+ _q_m.unlock();
+ _q_s.post();
+}
+
+bool DeferredPackets::enqueue(IncomingPacket *pkt)
+{
+ _q_m.lock();
+ const unsigned long p = _writePtr % ZT_DEFFEREDPACKETS_MAX;
+ if (_q[p]) {
+ _q_m.unlock();
+ return false;
+ } else {
+ _q[p].setToUnsafe(pkt);
+ ++_writePtr;
+ _q_m.unlock();
+ _q_s.post();
+ return true;
+ }
+}
+
+int DeferredPackets::process()
+{
+ SharedPtr<IncomingPacket> pkt;
+
+ _q_m.lock();
+ if (_die) {
+ _q_m.unlock();
+ _q_s.post();
+ return -1;
+ }
+ while (_readPtr == _writePtr) {
+ _q_m.unlock();
+ _q_s.wait();
+ _q_m.lock();
+ if (_die) {
+ _q_m.unlock();
+ _q_s.post();
+ return -1;
+ }
+ }
+ pkt.swap(_q[_readPtr++ % ZT_DEFFEREDPACKETS_MAX]);
+ _q_m.unlock();
+
+ pkt->tryDecode(RR,true);
+ return 1;
+}
+
+} // namespace ZeroTier
diff --git a/node/DeferredPackets.hpp b/node/DeferredPackets.hpp
new file mode 100644
index 00000000..1ea65f3c
--- /dev/null
+++ b/node/DeferredPackets.hpp
@@ -0,0 +1,98 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ *
+ * If you would like to embed ZeroTier into a commercial application or
+ * redistribute it in a modified binary form, please contact ZeroTier Networks
+ * LLC. Start here: http://www.zerotier.com/
+ */
+
+#ifndef ZT_DEFERREDPACKETS_HPP
+#define ZT_DEFERREDPACKETS_HPP
+
+#include "Constants.hpp"
+#include "SharedPtr.hpp"
+#include "Mutex.hpp"
+#include "DeferredPackets.hpp"
+#include "BinarySemaphore.hpp"
+
+/**
+ * Maximum number of deferred packets
+ */
+#define ZT_DEFFEREDPACKETS_MAX 1024
+
+namespace ZeroTier {
+
+class IncomingPacket;
+class RuntimeEnvironment;
+
+/**
+ * Deferred packets
+ *
+ * IncomingPacket can defer its decoding this way by enqueueing itself here.
+ * When this is done, deferredDecode() is called later. This is done for
+ * operations that may be expensive to allow them to potentially be handled
+ * in the background or rate limited to maintain quality of service for more
+ * routine operations.
+ */
+class DeferredPackets
+{
+public:
+ DeferredPackets(const RuntimeEnvironment *renv);
+ ~DeferredPackets();
+
+ /**
+ * Enqueue a packet
+ *
+ * Since packets enqueue themselves, they call it with 'this' and we wrap
+ * them in a SharedPtr<>. This is safe as SharedPtr<> is introspective and
+ * supports this. This should not be called from any other code outside
+ * IncomingPacket.
+ *
+ * @param pkt Packet to process later (possibly in the background)
+ * @return False if queue is full
+ */
+ bool enqueue(IncomingPacket *pkt);
+
+ /**
+ * Wait for and then process a deferred packet
+ *
+ * If we are shutting down (in destructor), this returns -1 and should
+ * not be called again. Otherwise it returns the number of packets
+ * processed.
+ *
+ * @return Number processed or -1 if shutting down
+ */
+ int process();
+
+private:
+ SharedPtr<IncomingPacket> _q[ZT_DEFFEREDPACKETS_MAX];
+ const RuntimeEnvironment *const RR;
+ unsigned long _readPtr;
+ unsigned long _writePtr;
+ bool _die;
+ Mutex _q_m;
+ BinarySemaphore _q_s;
+};
+
+} // namespace ZeroTier
+
+#endif
diff --git a/node/Hashtable.hpp b/node/Hashtable.hpp
index beef1468..aee24989 100644
--- a/node/Hashtable.hpp
+++ b/node/Hashtable.hpp
@@ -103,7 +103,7 @@ public:
private:
unsigned long _idx;
Hashtable *_ht;
- Hashtable::_Bucket *_b;
+ _Bucket *_b;
};
friend class Hashtable::Iterator;
@@ -322,7 +322,6 @@ public:
b->next = _t[bidx];
_t[bidx] = b;
++_s;
-
return b->v;
}
@@ -351,7 +350,6 @@ public:
b->next = _t[bidx];
_t[bidx] = b;
++_s;
-
return b->v;
}
@@ -382,7 +380,10 @@ private:
}
static inline unsigned long _hc(const uint32_t i)
{
- // In the uint32_t case we use a simple multiplier for hashing to ensure coverage
+ return ((unsigned long)i * (unsigned long)0x9e3779b1);
+ }
+ static inline unsigned long _hc(const uint16_t i)
+ {
return ((unsigned long)i * (unsigned long)0x9e3779b1);
}
diff --git a/node/Identity.cpp b/node/Identity.cpp
index 8765da51..4611f6a5 100644
--- a/node/Identity.cpp
+++ b/node/Identity.cpp
@@ -41,7 +41,6 @@
#define ZT_IDENTITY_GEN_HASHCASH_FIRST_BYTE_LESS_THAN 17
#define ZT_IDENTITY_GEN_MEMORY 2097152
-#define ZT_IDENTITY_GEN_SALSA20_ROUNDS 20
namespace ZeroTier {
@@ -55,8 +54,8 @@ static inline void _computeMemoryHardHash(const void *publicKey,unsigned int pub
// ordinary Salsa20 is randomly seekable. This is good for a cipher
// but is not what we want for sequential memory-harndess.
memset(genmem,0,ZT_IDENTITY_GEN_MEMORY);
- Salsa20 s20(digest,256,(char *)digest + 32,ZT_IDENTITY_GEN_SALSA20_ROUNDS);
- s20.encrypt((char *)genmem,(char *)genmem,64);
+ Salsa20 s20(digest,256,(char *)digest + 32);
+ s20.encrypt20((char *)genmem,(char *)genmem,64);
for(unsigned long i=64;i<ZT_IDENTITY_GEN_MEMORY;i+=64) {
unsigned long k = i - 64;
*((uint64_t *)((char *)genmem + i)) = *((uint64_t *)((char *)genmem + k));
@@ -67,7 +66,7 @@ static inline void _computeMemoryHardHash(const void *publicKey,unsigned int pub
*((uint64_t *)((char *)genmem + i + 40)) = *((uint64_t *)((char *)genmem + k + 40));
*((uint64_t *)((char *)genmem + i + 48)) = *((uint64_t *)((char *)genmem + k + 48));
*((uint64_t *)((char *)genmem + i + 56)) = *((uint64_t *)((char *)genmem + k + 56));
- s20.encrypt((char *)genmem + i,(char *)genmem + i,64);
+ s20.encrypt20((char *)genmem + i,(char *)genmem + i,64);
}
// Render final digest using genmem as a lookup table
@@ -77,7 +76,7 @@ static inline void _computeMemoryHardHash(const void *publicKey,unsigned int pub
uint64_t tmp = ((uint64_t *)genmem)[idx2];
((uint64_t *)genmem)[idx2] = ((uint64_t *)digest)[idx1];
((uint64_t *)digest)[idx1] = tmp;
- s20.encrypt(digest,digest,64);
+ s20.encrypt20(digest,digest,64);
}
}
@@ -159,7 +158,7 @@ bool Identity::fromString(const char *str)
return false;
char *saveptr = (char *)0;
- char tmp[4096];
+ char tmp[1024];
if (!Utils::scopy(tmp,sizeof(tmp),str))
return false;
diff --git a/node/Identity.hpp b/node/Identity.hpp
index 18e67eb6..6c33e74f 100644
--- a/node/Identity.hpp
+++ b/node/Identity.hpp
@@ -38,8 +38,7 @@
#include "Address.hpp"
#include "C25519.hpp"
#include "Buffer.hpp"
-
-#define ZT_IDENTITY_MAX_BINARY_SERIALIZED_LENGTH (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_PUBLIC_KEY_LEN + 1 + ZT_C25519_PRIVATE_KEY_LEN)
+#include "SHA512.hpp"
namespace ZeroTier {
@@ -93,8 +92,7 @@ public:
}
template<unsigned int C>
- Identity(const Buffer<C> &b,unsigned int startAt = 0)
- throw(std::out_of_range,std::invalid_argument) :
+ Identity(const Buffer<C> &b,unsigned int startAt = 0) :
_privateKey((C25519::Private *)0)
{
deserialize(b,startAt);
@@ -140,6 +138,21 @@ public:
inline bool hasPrivate() const throw() { return (_privateKey != (C25519::Private *)0); }
/**
+ * Compute the SHA512 hash of our private key (if we have one)
+ *
+ * @param sha Buffer to receive SHA512 (MUST be ZT_SHA512_DIGEST_LEN (64) bytes in length)
+ * @return True on success, false if no private key
+ */
+ inline bool sha512PrivateKey(void *sha) const
+ {
+ if (_privateKey) {
+ SHA512::hash(sha,_privateKey->data,ZT_C25519_PRIVATE_KEY_LEN);
+ return true;
+ }
+ return false;
+ }
+
+ /**
* Sign a message with this identity (private key required)
*
* @param data Data to sign
diff --git a/node/IncomingPacket.cpp b/node/IncomingPacket.cpp
index 305232ee..cffa0b9a 100644
--- a/node/IncomingPacket.cpp
+++ b/node/IncomingPacket.cpp
@@ -33,7 +33,6 @@
#include "../include/ZeroTierOne.h"
#include "Constants.hpp"
-#include "Defaults.hpp"
#include "RuntimeEnvironment.hpp"
#include "IncomingPacket.hpp"
#include "Topology.hpp"
@@ -41,44 +40,63 @@
#include "Peer.hpp"
#include "NetworkController.hpp"
#include "SelfAwareness.hpp"
+#include "Salsa20.hpp"
+#include "SHA512.hpp"
+#include "World.hpp"
+#include "Cluster.hpp"
+#include "Node.hpp"
+#include "AntiRecursion.hpp"
+#include "DeferredPackets.hpp"
namespace ZeroTier {
-bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR)
+bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR,bool deferred)
{
+ const Address sourceAddress(source());
try {
if ((cipher() == ZT_PROTO_CIPHER_SUITE__C25519_POLY1305_NONE)&&(verb() == Packet::VERB_HELLO)) {
- // Unencrypted HELLOs are handled here since they are used to
- // populate our identity cache in the first place. _doHELLO() is special
- // in that it contains its own authentication logic.
- return _doHELLO(RR);
+ // Unencrypted HELLOs require some potentially expensive verification, so
+ // do this in the background if background processing is enabled.
+ if ((RR->dpEnabled > 0)&&(!deferred)) {
+ RR->dp->enqueue(this);
+ return true; // 'handled' via deferring to background thread(s)
+ } else {
+ // A null pointer for peer to _doHELLO() tells it to run its own
+ // special internal authentication logic. This is done for unencrypted
+ // HELLOs to learn new identities, etc.
+ SharedPtr<Peer> tmp;
+ return _doHELLO(RR,tmp);
+ }
}
- SharedPtr<Peer> peer = RR->topology->getPeer(source());
+ SharedPtr<Peer> peer(RR->topology->getPeer(sourceAddress));
if (peer) {
if (!dearmor(peer->key())) {
- TRACE("dropped packet from %s(%s), MAC authentication failed (size: %u)",source().toString().c_str(),_remoteAddress.toString().c_str(),size());
+ TRACE("dropped packet from %s(%s), MAC authentication failed (size: %u)",peer->address().toString().c_str(),_remoteAddress.toString().c_str(),size());
return true;
}
if (!uncompress()) {
- TRACE("dropped packet from %s(%s), compressed data invalid",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped packet from %s(%s), compressed data invalid",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
- //TRACE("<< %s from %s(%s)",Packet::verbString(verb()),source().toString().c_str(),_remoteAddress.toString().c_str());
+ //TRACE("<< %s from %s(%s)",Packet::verbString(v),sourceAddress.toString().c_str(),_remoteAddress.toString().c_str());
- switch(verb()) {
+ const Packet::Verb v = verb();
+ switch(v) {
//case Packet::VERB_NOP:
default: // ignore unknown verbs, but if they pass auth check they are "received"
- peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),verb(),0,Packet::VERB_NOP);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),v,0,Packet::VERB_NOP);
return true;
- case Packet::VERB_HELLO: return _doHELLO(RR);
+
+ case Packet::VERB_HELLO: return _doHELLO(RR,peer);
case Packet::VERB_ERROR: return _doERROR(RR,peer);
case Packet::VERB_OK: return _doOK(RR,peer);
case Packet::VERB_WHOIS: return _doWHOIS(RR,peer);
case Packet::VERB_RENDEZVOUS: return _doRENDEZVOUS(RR,peer);
case Packet::VERB_FRAME: return _doFRAME(RR,peer);
case Packet::VERB_EXT_FRAME: return _doEXT_FRAME(RR,peer);
+ case Packet::VERB_ECHO: return _doECHO(RR,peer);
case Packet::VERB_MULTICAST_LIKE: return _doMULTICAST_LIKE(RR,peer);
case Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE: return _doNETWORK_MEMBERSHIP_CERTIFICATE(RR,peer);
case Packet::VERB_NETWORK_CONFIG_REQUEST: return _doNETWORK_CONFIG_REQUEST(RR,peer);
@@ -88,15 +106,16 @@ bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR)
case Packet::VERB_PUSH_DIRECT_PATHS: return _doPUSH_DIRECT_PATHS(RR,peer);
case Packet::VERB_CIRCUIT_TEST: return _doCIRCUIT_TEST(RR,peer);
case Packet::VERB_CIRCUIT_TEST_REPORT: return _doCIRCUIT_TEST_REPORT(RR,peer);
+ case Packet::VERB_REQUEST_PROOF_OF_WORK: return _doREQUEST_PROOF_OF_WORK(RR,peer);
}
} else {
- RR->sw->requestWhois(source());
+ RR->sw->requestWhois(sourceAddress);
return false;
}
} catch ( ... ) {
// Exceptions are more informatively caught in _do...() handlers but
// this outer try/catch will catch anything else odd.
- TRACE("dropped ??? from %s(%s): unexpected exception in tryDecode()",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped ??? from %s(%s): unexpected exception in tryDecode()",sourceAddress.toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
}
@@ -108,17 +127,14 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
const uint64_t inRePacketId = at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_IN_RE_PACKET_ID);
const Packet::ErrorCode errorCode = (Packet::ErrorCode)(*this)[ZT_PROTO_VERB_ERROR_IDX_ERROR_CODE];
- //TRACE("ERROR %s from %s(%s) in-re %s",Packet::errorString(errorCode),source().toString().c_str(),_remoteAddress.toString().c_str(),Packet::verbString(inReVerb));
+ //TRACE("ERROR %s from %s(%s) in-re %s",Packet::errorString(errorCode),peer->address().toString().c_str(),_remoteAddress.toString().c_str(),Packet::verbString(inReVerb));
switch(errorCode) {
case Packet::ERROR_OBJ_NOT_FOUND:
- if (inReVerb == Packet::VERB_WHOIS) {
- if (RR->topology->isRoot(peer->identity()))
- RR->sw->cancelWhoisRequest(Address(field(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH));
- } else if (inReVerb == Packet::VERB_NETWORK_CONFIG_REQUEST) {
+ if (inReVerb == Packet::VERB_NETWORK_CONFIG_REQUEST) {
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)));
- if ((network)&&(network->controller() == source()))
+ if ((network)&&(network->controller() == peer->address()))
network->setNotFound();
}
break;
@@ -126,7 +142,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
case Packet::ERROR_UNSUPPORTED_OPERATION:
if (inReVerb == Packet::VERB_NETWORK_CONFIG_REQUEST) {
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)));
- if ((network)&&(network->controller() == source()))
+ if ((network)&&(network->controller() == peer->address()))
network->setNotFound();
}
break;
@@ -147,6 +163,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
nconf->com().serialize(outp);
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
@@ -154,7 +171,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
case Packet::ERROR_NETWORK_ACCESS_DENIED_: {
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)));
- if ((network)&&(network->controller() == source()))
+ if ((network)&&(network->controller() == peer->address()))
network->setAccessDenied();
} break;
@@ -169,15 +186,13 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
}
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_ERROR,inRePacketId,inReVerb);
- } catch (std::exception &ex) {
- TRACE("dropped ERROR from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
- TRACE("dropped ERROR from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped ERROR from %s(%s): unexpected exception",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
-bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
+bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR,SharedPtr<Peer> &peer)
{
/* Note: this is the only packet ever sent in the clear, and it's also
* the only packet that we authenticate via a different path. Authentication
@@ -187,141 +202,154 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
* in the first place. */
try {
+ const uint64_t pid = packetId();
+ const Address fromAddress(source());
const unsigned int protoVersion = (*this)[ZT_PROTO_VERB_HELLO_IDX_PROTOCOL_VERSION];
const unsigned int vMajor = (*this)[ZT_PROTO_VERB_HELLO_IDX_MAJOR_VERSION];
const unsigned int vMinor = (*this)[ZT_PROTO_VERB_HELLO_IDX_MINOR_VERSION];
const unsigned int vRevision = at<uint16_t>(ZT_PROTO_VERB_HELLO_IDX_REVISION);
const uint64_t timestamp = at<uint64_t>(ZT_PROTO_VERB_HELLO_IDX_TIMESTAMP);
+
Identity id;
- unsigned int destAddrPtr = id.deserialize(*this,ZT_PROTO_VERB_HELLO_IDX_IDENTITY) + ZT_PROTO_VERB_HELLO_IDX_IDENTITY;
+ InetAddress externalSurfaceAddress;
+ uint64_t worldId = ZT_WORLD_ID_NULL;
+ uint64_t worldTimestamp = 0;
+ {
+ unsigned int ptr = ZT_PROTO_VERB_HELLO_IDX_IDENTITY + id.deserialize(*this,ZT_PROTO_VERB_HELLO_IDX_IDENTITY);
+ if (ptr < size()) // ZeroTier One < 1.0.3 did not include physical destination address info
+ ptr += externalSurfaceAddress.deserialize(*this,ptr);
+ if ((ptr + 16) <= size()) { // older versions also did not include World IDs or timestamps
+ worldId = at<uint64_t>(ptr); ptr += 8;
+ worldTimestamp = at<uint64_t>(ptr);
+ }
+ }
if (protoVersion < ZT_PROTO_VERSION_MIN) {
TRACE("dropped HELLO from %s(%s): protocol version too old",id.address().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
- if (source() != id.address()) {
- TRACE("dropped HELLO from %s(%s): identity not for sending address",source().toString().c_str(),_remoteAddress.toString().c_str());
+ if (fromAddress != id.address()) {
+ TRACE("dropped HELLO from %s(%s): identity not for sending address",fromAddress.toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
- InetAddress destAddr;
- if (destAddrPtr < size()) { // ZeroTier One < 1.0.3 did not include this field
- const unsigned int destAddrType = (*this)[destAddrPtr++];
- switch(destAddrType) {
- case ZT_PROTO_DEST_ADDRESS_TYPE_IPV4:
- destAddr.set(field(destAddrPtr,4),4,at<uint16_t>(destAddrPtr + 4));
- break;
- case ZT_PROTO_DEST_ADDRESS_TYPE_IPV6:
- destAddr.set(field(destAddrPtr,16),16,at<uint16_t>(destAddrPtr + 16));
- break;
- }
- }
-
- SharedPtr<Peer> peer(RR->topology->getPeer(id.address()));
- if (peer) {
- // We already have an identity with this address -- check for collisions
-
- if (peer->identity() != id) {
- // Identity is different from the one we already have -- address collision
-
- unsigned char key[ZT_PEER_SECRET_KEY_LENGTH];
- if (RR->identity.agree(id,key,ZT_PEER_SECRET_KEY_LENGTH)) {
- if (dearmor(key)) { // ensure packet is authentic, otherwise drop
- RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
- TRACE("rejected HELLO from %s(%s): address already claimed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
- Packet outp(id.address(),RR->identity.address(),Packet::VERB_ERROR);
- outp.append((unsigned char)Packet::VERB_HELLO);
- outp.append(packetId());
- outp.append((unsigned char)Packet::ERROR_IDENTITY_COLLISION);
- outp.armor(key,true);
- RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
+ if (!peer) { // peer == NULL is the normal case here
+ peer = RR->topology->getPeer(id.address());
+ if (peer) {
+ // We already have an identity with this address -- check for collisions
+
+ if (peer->identity() != id) {
+ // Identity is different from the one we already have -- address collision
+
+ unsigned char key[ZT_PEER_SECRET_KEY_LENGTH];
+ if (RR->identity.agree(id,key,ZT_PEER_SECRET_KEY_LENGTH)) {
+ if (dearmor(key)) { // ensure packet is authentic, otherwise drop
+ TRACE("rejected HELLO from %s(%s): address already claimed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
+ Packet outp(id.address(),RR->identity.address(),Packet::VERB_ERROR);
+ outp.append((unsigned char)Packet::VERB_HELLO);
+ outp.append((uint64_t)pid);
+ outp.append((unsigned char)Packet::ERROR_IDENTITY_COLLISION);
+ outp.armor(key,true);
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
+ } else {
+ TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
+ }
} else {
- RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
- TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("rejected HELLO from %s(%s): key agreement failed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
}
+
+ return true;
} else {
- RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
- TRACE("rejected HELLO from %s(%s): key agreement failed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
- }
+ // Identity is the same as the one we already have -- check packet integrity
- return true;
+ if (!dearmor(peer->key())) {
+ TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
+ return true;
+ }
+
+ // Continue at // VALID
+ }
} else {
- // Identity is the same as the one we already have -- check packet integrity
+ // We don't already have an identity with this address -- validate and learn it
+
+ // Check identity proof of work
+ if (!id.locallyValidate()) {
+ TRACE("dropped HELLO from %s(%s): identity invalid",id.address().toString().c_str(),_remoteAddress.toString().c_str());
+ return true;
+ }
- if (!dearmor(peer->key())) {
- RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
+ // Check packet integrity and authentication
+ SharedPtr<Peer> newPeer(new Peer(RR->identity,id));
+ if (!dearmor(newPeer->key())) {
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
+ peer = RR->topology->addPeer(newPeer);
// Continue at // VALID
}
- } else {
- // We don't already have an identity with this address -- validate and learn it
-
- // Check identity proof of work
- if (!id.locallyValidate()) {
- RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
- TRACE("dropped HELLO from %s(%s): identity invalid",id.address().toString().c_str(),_remoteAddress.toString().c_str());
- return true;
- }
-
- // Check packet integrity and authentication
- SharedPtr<Peer> newPeer(new Peer(RR->identity,id));
- if (!dearmor(newPeer->key())) {
- RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
- TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
- return true;
- }
-
- peer = RR->topology->addPeer(newPeer);
- // Continue at // VALID
+ // VALID -- if we made it here, packet passed identity and authenticity checks!
}
- // VALID -- continues here
-
- peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_HELLO,0,Packet::VERB_NOP);
- peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
-
- bool trusted = false;
- if (RR->topology->isRoot(id)) {
- RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
- trusted = true;
- }
- if (destAddr)
- RR->sa->iam(id.address(),_remoteAddress,destAddr,trusted,RR->node->now());
+ if (externalSurfaceAddress)
+ RR->sa->iam(id.address(),_remoteAddress,externalSurfaceAddress,RR->topology->isRoot(id),RR->node->now());
Packet outp(id.address(),RR->identity.address(),Packet::VERB_OK);
-
outp.append((unsigned char)Packet::VERB_HELLO);
- outp.append(packetId());
- outp.append(timestamp);
+ outp.append((uint64_t)pid);
+ outp.append((uint64_t)timestamp);
outp.append((unsigned char)ZT_PROTO_VERSION);
outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
+ if (protoVersion >= 5) {
+ _remoteAddress.serialize(outp);
+ } else {
+ /* LEGACY COMPATIBILITY HACK:
+ *
+ * For a while now (since 1.0.3), ZeroTier has recognized changes in
+ * its network environment empirically by examining its external network
+ * address as reported by trusted peers. In versions prior to 1.1.0
+ * (protocol version < 5), they did this by saving a snapshot of this
+ * information (in SelfAwareness.hpp) keyed by reporting device ID and
+ * address type.
+ *
+ * This causes problems when clustering is combined with symmetric NAT.
+ * Symmetric NAT remaps ports, so different endpoints in a cluster will
+ * report back different exterior addresses. Since the old code keys
+ * this by device ID and not sending physical address and compares the
+ * entire address including port, it constantly thinks its external
+ * surface is changing and resets connections when talking to a cluster.
+ *
+ * In new code we key by sending physical address and device and we also
+ * take the more conservative position of only interpreting changes in
+ * IP address (neglecting port) as a change in network topology that
+ * necessitates a reset. But we can make older clients work here by
+ * nulling out the port field. Since this info is only used for empirical
+ * detection of link changes, it doesn't break anything else.
+ */
+ InetAddress tmpa(_remoteAddress);
+ tmpa.setPort(0);
+ tmpa.serialize(outp);
+ }
- switch(_remoteAddress.ss_family) {
- case AF_INET:
- outp.append((unsigned char)ZT_PROTO_DEST_ADDRESS_TYPE_IPV4);
- outp.append(_remoteAddress.rawIpData(),4);
- outp.append((uint16_t)_remoteAddress.port());
- break;
- case AF_INET6:
- outp.append((unsigned char)ZT_PROTO_DEST_ADDRESS_TYPE_IPV6);
- outp.append(_remoteAddress.rawIpData(),16);
- outp.append((uint16_t)_remoteAddress.port());
- break;
- default:
- outp.append((unsigned char)ZT_PROTO_DEST_ADDRESS_TYPE_NONE);
- break;
+ if ((worldId != ZT_WORLD_ID_NULL)&&(RR->topology->worldTimestamp() > worldTimestamp)&&(worldId == RR->topology->worldId())) {
+ World w(RR->topology->world());
+ const unsigned int sizeAt = outp.size();
+ outp.addSize(2); // make room for 16-bit size field
+ w.serialize(outp,false);
+ outp.setAt<uint16_t>(sizeAt,(uint16_t)(outp.size() - (sizeAt + 2)));
+ } else {
+ outp.append((uint16_t)0); // no world update needed
}
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
- } catch (std::exception &ex) {
- TRACE("dropped HELLO from %s(%s): %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
+
+ peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
+ peer->received(RR,_localAddress,_remoteAddress,hops(),pid,Packet::VERB_HELLO,0,Packet::VERB_NOP);
} catch ( ... ) {
TRACE("dropped HELLO from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
@@ -345,46 +373,43 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
const unsigned int vMinor = (*this)[ZT_PROTO_VERB_HELLO__OK__IDX_MINOR_VERSION];
const unsigned int vRevision = at<uint16_t>(ZT_PROTO_VERB_HELLO__OK__IDX_REVISION);
- InetAddress destAddr;
- unsigned int destAddrPtr = ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2; // dest address, if present, will start after 16-bit revision
- if (destAddrPtr < size()) { // ZeroTier One < 1.0.3 did not include this field
- const unsigned int destAddrType = (*this)[destAddrPtr++];
- switch(destAddrType) {
- case ZT_PROTO_DEST_ADDRESS_TYPE_IPV4:
- destAddr.set(field(destAddrPtr,4),4,at<uint16_t>(destAddrPtr + 4));
- break;
- case ZT_PROTO_DEST_ADDRESS_TYPE_IPV6:
- destAddr.set(field(destAddrPtr,16),16,at<uint16_t>(destAddrPtr + 16));
- break;
- }
- }
-
if (vProto < ZT_PROTO_VERSION_MIN) {
TRACE("%s(%s): OK(HELLO) dropped, protocol version too old",source().toString().c_str(),_remoteAddress.toString().c_str());
return true;
}
- TRACE("%s(%s): OK(HELLO), version %u.%u.%u, latency %u, reported external address %s",source().toString().c_str(),_remoteAddress.toString().c_str(),vMajor,vMinor,vRevision,latency,((destAddr) ? destAddr.toString().c_str() : "(none)"));
+ const bool trusted = RR->topology->isRoot(peer->identity());
+
+ InetAddress externalSurfaceAddress;
+ unsigned int ptr = ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2;
+ if (ptr < size()) // ZeroTier One < 1.0.3 did not include this field
+ ptr += externalSurfaceAddress.deserialize(*this,ptr);
+ if ((trusted)&&((ptr + 2) <= size())) { // older versions also did not include this field, and right now we only use if from a root
+ World worldUpdate;
+ const unsigned int worldLen = at<uint16_t>(ptr); ptr += 2;
+ if (worldLen > 0) {
+ World w;
+ w.deserialize(*this,ptr);
+ RR->topology->worldUpdateIfValid(w);
+ }
+ }
+
+ TRACE("%s(%s): OK(HELLO), version %u.%u.%u, latency %u, reported external address %s",source().toString().c_str(),_remoteAddress.toString().c_str(),vMajor,vMinor,vRevision,latency,((externalSurfaceAddress) ? externalSurfaceAddress.toString().c_str() : "(none)"));
peer->addDirectLatencyMeasurment(latency);
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
- bool trusted = false;
- if (RR->topology->isRoot(peer->identity())) {
- RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
- trusted = true;
- }
- if (destAddr)
- RR->sa->iam(peer->address(),_remoteAddress,destAddr,trusted,RR->node->now());
+ if (externalSurfaceAddress)
+ RR->sa->iam(peer->address(),_remoteAddress,externalSurfaceAddress,trusted,RR->node->now());
} break;
case Packet::VERB_WHOIS: {
- /* Right now only root servers are allowed to send OK(WHOIS) to prevent
- * poisoning attacks. Further decentralization will require some other
- * kind of trust mechanism. */
if (RR->topology->isRoot(peer->identity())) {
const Identity id(*this,ZT_PROTO_VERB_WHOIS__OK__IDX_IDENTITY);
- if (id.locallyValidate())
+ // Right now we can skip this since OK(WHOIS) is only accepted from
+ // roots. In the future it should be done if we query less trusted
+ // sources.
+ //if (id.locallyValidate())
RR->sw->doAnythingWaitingForPeer(RR->topology->addPeer(SharedPtr<Peer>(new Peer(RR->identity,id))));
}
} break;
@@ -438,10 +463,8 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
}
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_OK,inRePacketId,inReVerb);
- } catch (std::exception &ex) {
- TRACE("dropped OK from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
- TRACE("dropped OK from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped OK from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -450,22 +473,20 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer>
{
try {
if (payloadLength() == ZT_ADDRESS_LENGTH) {
- const SharedPtr<Peer> queried(RR->topology->getPeer(Address(payload(),ZT_ADDRESS_LENGTH)));
+ Identity queried(RR->topology->getIdentity(Address(payload(),ZT_ADDRESS_LENGTH)));
if (queried) {
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_OK);
outp.append((unsigned char)Packet::VERB_WHOIS);
outp.append(packetId());
- queried->identity().serialize(outp,false);
+ queried.serialize(outp,false);
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} else {
- Packet outp(peer->address(),RR->identity.address(),Packet::VERB_ERROR);
- outp.append((unsigned char)Packet::VERB_WHOIS);
- outp.append(packetId());
- outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
- outp.append(payload(),ZT_ADDRESS_LENGTH);
- outp.armor(peer->key(),true);
- RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster)
+ RR->cluster->sendDistributedQuery(*this);
+#endif
}
} else {
TRACE("dropped WHOIS from %s(%s): missing or invalid address",source().toString().c_str(),_remoteAddress.toString().c_str());
@@ -480,24 +501,27 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer>
bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
{
try {
- const Address with(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ZTADDRESS,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
- const SharedPtr<Peer> withPeer(RR->topology->getPeer(with));
- if (withPeer) {
- const unsigned int port = at<uint16_t>(ZT_PROTO_VERB_RENDEZVOUS_IDX_PORT);
- const unsigned int addrlen = (*this)[ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRLEN];
- if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
- InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
- TRACE("RENDEZVOUS from %s says %s might be at %s, starting NAT-t",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
- peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP);
- RR->sw->rendezvous(withPeer,_localAddress,atAddr);
+ if (RR->topology->isUpstream(peer->identity())) {
+ const Address with(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ZTADDRESS,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
+ const SharedPtr<Peer> withPeer(RR->topology->getPeer(with));
+ if (withPeer) {
+ const unsigned int port = at<uint16_t>(ZT_PROTO_VERB_RENDEZVOUS_IDX_PORT);
+ const unsigned int addrlen = (*this)[ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRLEN];
+ if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
+ InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
+ TRACE("RENDEZVOUS from %s says %s might be at %s, starting NAT-t",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP);
+ RR->sw->rendezvous(withPeer,_localAddress,atAddr);
+ } else {
+ TRACE("dropped corrupt RENDEZVOUS from %s(%s) (bad address or port)",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
+ }
} else {
- TRACE("dropped corrupt RENDEZVOUS from %s(%s) (bad address or port)",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
+ RR->sw->requestWhois(with);
+ TRACE("ignored RENDEZVOUS from %s(%s) to meet unknown peer %s",peer->address().toString().c_str(),_remoteAddress.toString().c_str(),with.toString().c_str());
}
} else {
- TRACE("ignored RENDEZVOUS from %s(%s) to meet unknown peer %s",peer->address().toString().c_str(),_remoteAddress.toString().c_str(),with.toString().c_str());
+ TRACE("ignored RENDEZVOUS from %s(%s): not a root server or a network relay",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
}
- } catch (std::exception &ex) {
- TRACE("dropped RENDEZVOUS from %s(%s): %s",peer->address().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
TRACE("dropped RENDEZVOUS from %s(%s): unexpected exception",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
}
@@ -530,10 +554,8 @@ bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer>
} else {
TRACE("dropped FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_remoteAddress.toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
}
- } catch (std::exception &ex) {
- TRACE("dropped FRAME from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
- TRACE("dropped FRAME from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped FRAME from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -547,15 +569,13 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
const unsigned int flags = (*this)[ZT_PROTO_VERB_EXT_FRAME_IDX_FLAGS];
unsigned int comLen = 0;
- bool comFailed = false;
if ((flags & 0x01) != 0) {
CertificateOfMembership com;
comLen = com.deserialize(*this,ZT_PROTO_VERB_EXT_FRAME_IDX_COM);
- if (!peer->validateAndSetNetworkMembershipCertificate(RR,network->id(),com))
- comFailed = true;
+ peer->validateAndSetNetworkMembershipCertificate(RR,network->id(),com);
}
- if ((comFailed)||(!network->isAllowed(peer))) {
+ if (!network->isAllowed(peer)) {
TRACE("dropped EXT_FRAME from %s(%s): not a member of private network %.16llx",peer->address().toString().c_str(),_remoteAddress.toString().c_str(),network->id());
_sendErrorNeedCertificate(RR,peer,network->id());
return true;
@@ -605,10 +625,25 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
} else {
TRACE("dropped EXT_FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_remoteAddress.toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
}
- } catch (std::exception &ex) {
- TRACE("dropped EXT_FRAME from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
- TRACE("dropped EXT_FRAME from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped EXT_FRAME from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
+ }
+ return true;
+}
+
+bool IncomingPacket::_doECHO(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
+{
+ try {
+ const uint64_t pid = packetId();
+ Packet outp(peer->address(),RR->identity.address(),Packet::VERB_OK);
+ outp.append((unsigned char)Packet::VERB_ECHO);
+ outp.append((uint64_t)pid);
+ outp.append(field(ZT_PACKET_IDX_PAYLOAD,size() - ZT_PACKET_IDX_PAYLOAD),size() - ZT_PACKET_IDX_PAYLOAD);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
+ peer->received(RR,_localAddress,_remoteAddress,hops(),pid,Packet::VERB_ECHO,0,Packet::VERB_NOP);
+ } catch ( ... ) {
+ TRACE("dropped ECHO from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -619,14 +654,15 @@ bool IncomingPacket::_doMULTICAST_LIKE(const RuntimeEnvironment *RR,const Shared
const uint64_t now = RR->node->now();
// Iterate through 18-byte network,MAC,ADI tuples
- for(unsigned int ptr=ZT_PACKET_IDX_PAYLOAD;ptr<size();ptr+=18)
- RR->mc->add(now,at<uint64_t>(ptr),MulticastGroup(MAC(field(ptr + 8,6),6),at<uint32_t>(ptr + 14)),peer->address());
+ for(unsigned int ptr=ZT_PACKET_IDX_PAYLOAD;ptr<size();ptr+=18) {
+ const uint64_t nwid = at<uint64_t>(ptr);
+ const MulticastGroup group(MAC(field(ptr + 8,6),6),at<uint32_t>(ptr + 14));
+ RR->mc->add(now,nwid,group,peer->address());
+ }
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_LIKE,0,Packet::VERB_NOP);
- } catch (std::exception &ex) {
- TRACE("dropped MULTICAST_LIKE from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
- TRACE("dropped MULTICAST_LIKE from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped MULTICAST_LIKE from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -643,10 +679,8 @@ bool IncomingPacket::_doNETWORK_MEMBERSHIP_CERTIFICATE(const RuntimeEnvironment
}
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE,0,Packet::VERB_NOP);
- } catch (std::exception &ex) {
- TRACE("dropped NETWORK_MEMBERSHIP_CERTIFICATE from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
} catch ( ... ) {
- TRACE("dropped NETWORK_MEMBERSHIP_CERTIFICATE from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped NETWORK_MEMBERSHIP_CERTIFICATE from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -680,9 +714,10 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append(netconfStr.data(),(unsigned int)netconfStr.length());
outp.compress();
outp.armor(peer->key(),true);
- if (outp.size() > ZT_PROTO_MAX_PACKET_LENGTH) {
+ if (outp.size() > ZT_PROTO_MAX_PACKET_LENGTH) { // sanity check
TRACE("NETWORK_CONFIG_REQUEST failed: internal error: netconf size %u is too large",(unsigned int)netconfStr.length());
} else {
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
@@ -695,6 +730,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
outp.append(nwid);
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} break;
@@ -705,6 +741,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_NETWORK_ACCESS_DENIED_);
outp.append(nwid);
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
} break;
@@ -727,12 +764,11 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_UNSUPPORTED_OPERATION);
outp.append(nwid);
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
- } catch (std::exception &exc) {
- TRACE("dropped NETWORK_CONFIG_REQUEST from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
- TRACE("dropped NETWORK_CONFIG_REQUEST from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped NETWORK_CONFIG_REQUEST from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -749,10 +785,8 @@ bool IncomingPacket::_doNETWORK_CONFIG_REFRESH(const RuntimeEnvironment *RR,cons
ptr += 8;
}
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_CONFIG_REFRESH,0,Packet::VERB_NOP);
- } catch (std::exception &exc) {
- TRACE("dropped NETWORK_CONFIG_REFRESH from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
- TRACE("dropped NETWORK_CONFIG_REFRESH from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped NETWORK_CONFIG_REFRESH from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -773,17 +807,22 @@ bool IncomingPacket::_doMULTICAST_GATHER(const RuntimeEnvironment *RR,const Shar
outp.append(nwid);
mg.mac().appendTo(outp);
outp.append((uint32_t)mg.adi());
- if (RR->mc->gather(peer->address(),nwid,mg,outp,gatherLimit)) {
+ const unsigned int gatheredLocally = RR->mc->gather(peer->address(),nwid,mg,outp,gatherLimit);
+ if (gatheredLocally) {
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
+
+#ifdef ZT_ENABLE_CLUSTER
+ if ((RR->cluster)&&(gatheredLocally < gatherLimit))
+ RR->cluster->sendDistributedQuery(*this);
+#endif
}
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_GATHER,0,Packet::VERB_NOP);
- } catch (std::exception &exc) {
- TRACE("dropped MULTICAST_GATHER from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
- TRACE("dropped MULTICAST_GATHER from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped MULTICAST_GATHER from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -865,16 +904,15 @@ bool IncomingPacket::_doMULTICAST_FRAME(const RuntimeEnvironment *RR,const Share
outp.append((unsigned char)0x02); // flag 0x02 = contains gather results
if (RR->mc->gather(peer->address(),nwid,to,outp,gatherLimit)) {
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
}
} // else ignore -- not a member of this network
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_FRAME,0,Packet::VERB_NOP);
- } catch (std::exception &exc) {
- TRACE("dropped MULTICAST_FRAME from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
} catch ( ... ) {
- TRACE("dropped MULTICAST_FRAME from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped MULTICAST_FRAME from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -882,11 +920,23 @@ bool IncomingPacket::_doMULTICAST_FRAME(const RuntimeEnvironment *RR,const Share
bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
{
try {
+ const uint64_t now = RR->node->now();
+
+ // First, subject this to a rate limit
+ if (!peer->shouldRespondToDirectPathPush(now)) {
+ TRACE("dropped PUSH_DIRECT_PATHS from %s(%s): circuit breaker tripped",source().toString().c_str(),_remoteAddress.toString().c_str());
+ return true;
+ }
+
+ // Second, limit addresses by scope and type
+ uint8_t countPerScope[ZT_INETADDRESS_MAX_SCOPE+1][2]; // [][0] is v4, [][1] is v6
+ memset(countPerScope,0,sizeof(countPerScope));
+
unsigned int count = at<uint16_t>(ZT_PACKET_IDX_PAYLOAD);
unsigned int ptr = ZT_PACKET_IDX_PAYLOAD + 2;
while (count--) { // if ptr overflows Buffer will throw
- // TODO: properly handle blacklisting, support other features... see Packet.hpp.
+ // TODO: some flags are not yet implemented
unsigned int flags = (*this)[ptr++];
unsigned int extLen = at<uint16_t>(ptr); ptr += 2;
@@ -897,25 +947,33 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
switch(addrType) {
case 4: {
InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
- if ( ((flags & (0x01 | 0x02)) == 0) && (Path::isAddressValidForPath(a)) ) {
- TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
- peer->attemptToContactAt(RR,_localAddress,a,RR->node->now());
+ if ( ((flags & 0x01) == 0) && (Path::isAddressValidForPath(a)) ) {
+ if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
+ TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
+ peer->sendHELLO(RR,_localAddress,a,now);
+ } else {
+ TRACE("ignoring contact for %s at %s -- too many per scope",peer->address().toString().c_str(),a.toString().c_str());
+ }
}
} break;
case 6: {
InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
- if ( ((flags & (0x01 | 0x02)) == 0) && (Path::isAddressValidForPath(a)) ) {
- TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
- peer->attemptToContactAt(RR,_localAddress,a,RR->node->now());
+ if ( ((flags & 0x01) == 0) && (Path::isAddressValidForPath(a)) ) {
+ if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
+ TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
+ peer->sendHELLO(RR,_localAddress,a,now);
+ } else {
+ TRACE("ignoring contact for %s at %s -- too many per scope",peer->address().toString().c_str(),a.toString().c_str());
+ }
}
} break;
}
ptr += addrLen;
}
- } catch (std::exception &exc) {
- TRACE("dropped PUSH_DIRECT_PATHS from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
+
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_PUSH_DIRECT_PATHS,0,Packet::VERB_NOP);
} catch ( ... ) {
- TRACE("dropped PUSH_DIRECT_PATHS from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped PUSH_DIRECT_PATHS from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
@@ -1021,7 +1079,7 @@ bool IncomingPacket::_doCIRCUIT_TEST(const RuntimeEnvironment *RR,const SharedPt
remainingHopsPtr += ZT_ADDRESS_LENGTH;
SharedPtr<Peer> nhp(RR->topology->getPeer(nextHop[h]));
if (nhp) {
- RemotePath *const rp = nhp->getBestPath(now);
+ Path *const rp = nhp->getBestPath(now);
if (rp)
nextHopBestPathAddress[h] = rp->address();
}
@@ -1044,6 +1102,7 @@ bool IncomingPacket::_doCIRCUIT_TEST(const RuntimeEnvironment *RR,const SharedPt
outp.append((uint16_t)0); // error code, currently unused
outp.append((uint64_t)0); // flags, currently unused
outp.append((uint64_t)packetId());
+ peer->address().appendTo(outp);
outp.append((uint8_t)hops());
_localAddress.serialize(outp);
_remoteAddress.serialize(outp);
@@ -1071,21 +1130,194 @@ bool IncomingPacket::_doCIRCUIT_TEST(const RuntimeEnvironment *RR,const SharedPt
outp.append(field(remainingHopsPtr,size() - remainingHopsPtr),size() - remainingHopsPtr);
for(unsigned int h=0;h<breadth;++h) {
- outp.newInitializationVector();
- outp.setDestination(nextHop[h]);
- RR->sw->send(outp,true,originatorCredentialNetworkId);
+ if (RR->identity.address() != nextHop[h]) { // next hops that loop back to the current hop are not valid
+ outp.newInitializationVector();
+ outp.setDestination(nextHop[h]);
+ RR->sw->send(outp,true,originatorCredentialNetworkId);
+ }
}
}
- } catch (std::exception &exc) {
- TRACE("dropped CIRCUIT_TEST from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
+
+ peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_CIRCUIT_TEST,0,Packet::VERB_NOP);
} catch ( ... ) {
- TRACE("dropped CIRCUIT_TEST from %s(%s): unexpected exception: (unknown)",source().toString().c_str(),_remoteAddress.toString().c_str());
+ TRACE("dropped CIRCUIT_TEST from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
}
return true;
}
bool IncomingPacket::_doCIRCUIT_TEST_REPORT(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
{
+ try {
+ ZT_CircuitTestReport report;
+ memset(&report,0,sizeof(report));
+
+ report.current = peer->address().toInt();
+ report.upstream = Address(field(ZT_PACKET_IDX_PAYLOAD + 52,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH).toInt();
+ report.testId = at<uint64_t>(ZT_PACKET_IDX_PAYLOAD + 8);
+ report.timestamp = at<uint64_t>(ZT_PACKET_IDX_PAYLOAD);
+ report.remoteTimestamp = at<uint64_t>(ZT_PACKET_IDX_PAYLOAD + 16);
+ report.sourcePacketId = at<uint64_t>(ZT_PACKET_IDX_PAYLOAD + 44);
+ report.flags = at<uint64_t>(ZT_PACKET_IDX_PAYLOAD + 36);
+ report.sourcePacketHopCount = (*this)[ZT_PACKET_IDX_PAYLOAD + 57]; // end of fixed length headers: 58
+ report.errorCode = at<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 34);
+ report.vendor = (enum ZT_Vendor)((*this)[ZT_PACKET_IDX_PAYLOAD + 24]);
+ report.protocolVersion = (*this)[ZT_PACKET_IDX_PAYLOAD + 25];
+ report.majorVersion = (*this)[ZT_PACKET_IDX_PAYLOAD + 26];
+ report.minorVersion = (*this)[ZT_PACKET_IDX_PAYLOAD + 27];
+ report.revision = at<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 28);
+ report.platform = (enum ZT_Platform)at<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 30);
+ report.architecture = (enum ZT_Architecture)at<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 32);
+
+ const unsigned int receivedOnLocalAddressLen = reinterpret_cast<InetAddress *>(&(report.receivedOnLocalAddress))->deserialize(*this,ZT_PACKET_IDX_PAYLOAD + 58);
+ const unsigned int receivedFromRemoteAddressLen = reinterpret_cast<InetAddress *>(&(report.receivedFromRemoteAddress))->deserialize(*this,ZT_PACKET_IDX_PAYLOAD + 58 + receivedOnLocalAddressLen);
+
+ unsigned int nhptr = ZT_PACKET_IDX_PAYLOAD + 58 + receivedOnLocalAddressLen + receivedFromRemoteAddressLen;
+ nhptr += at<uint16_t>(nhptr) + 2; // add "additional field" length, which right now will be zero
+
+ report.nextHopCount = (*this)[nhptr++];
+ if (report.nextHopCount > ZT_CIRCUIT_TEST_MAX_HOP_BREADTH) // sanity check, shouldn't be possible
+ report.nextHopCount = ZT_CIRCUIT_TEST_MAX_HOP_BREADTH;
+ for(unsigned int h=0;h<report.nextHopCount;++h) {
+ report.nextHops[h].address = Address(field(nhptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH).toInt(); nhptr += ZT_ADDRESS_LENGTH;
+ nhptr += reinterpret_cast<InetAddress *>(&(report.nextHops[h].physicalAddress))->deserialize(*this,nhptr);
+ }
+
+ RR->node->postCircuitTestReport(&report);
+ } catch ( ... ) {
+ TRACE("dropped CIRCUIT_TEST_REPORT from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
+ }
+ return true;
+}
+
+bool IncomingPacket::_doREQUEST_PROOF_OF_WORK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
+{
+ try {
+ // Right now this is only allowed from root servers -- may be allowed from controllers and relays later.
+ if (RR->topology->isRoot(peer->identity())) {
+ const uint64_t pid = packetId();
+ const unsigned int difficulty = (*this)[ZT_PACKET_IDX_PAYLOAD + 1];
+ const unsigned int challengeLength = at<uint16_t>(ZT_PACKET_IDX_PAYLOAD + 2);
+ if (challengeLength > ZT_PROTO_MAX_PACKET_LENGTH)
+ return true; // sanity check, drop invalid size
+ const unsigned char *challenge = field(ZT_PACKET_IDX_PAYLOAD + 4,challengeLength);
+
+ switch((*this)[ZT_PACKET_IDX_PAYLOAD]) {
+
+ // Salsa20/12+SHA512 hashcash
+ case 0x01: {
+ if (difficulty <= 14) {
+ unsigned char result[16];
+ computeSalsa2012Sha512ProofOfWork(difficulty,challenge,challengeLength,result);
+ TRACE("PROOF_OF_WORK computed for %s: difficulty==%u, challengeLength==%u, result: %.16llx%.16llx",peer->address().toString().c_str(),difficulty,challengeLength,Utils::ntoh(*(reinterpret_cast<const uint64_t *>(result))),Utils::ntoh(*(reinterpret_cast<const uint64_t *>(result + 8))));
+ Packet outp(peer->address(),RR->identity.address(),Packet::VERB_OK);
+ outp.append((unsigned char)Packet::VERB_REQUEST_PROOF_OF_WORK);
+ outp.append(pid);
+ outp.append((uint16_t)sizeof(result));
+ outp.append(result,sizeof(result));
+ outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
+ } else {
+ Packet outp(peer->address(),RR->identity.address(),Packet::VERB_ERROR);
+ outp.append((unsigned char)Packet::VERB_REQUEST_PROOF_OF_WORK);
+ outp.append(pid);
+ outp.append((unsigned char)Packet::ERROR_INVALID_REQUEST);
+ outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
+ RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
+ }
+ } break;
+
+ default:
+ TRACE("dropped REQUEST_PROOF_OF_WORK from %s(%s): unrecognized proof of work type",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
+ break;
+ }
+
+ peer->received(RR,_localAddress,_remoteAddress,hops(),pid,Packet::VERB_REQUEST_PROOF_OF_WORK,0,Packet::VERB_NOP);
+ } else {
+ TRACE("dropped REQUEST_PROOF_OF_WORK from %s(%s): not trusted enough",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
+ }
+ } catch ( ... ) {
+ TRACE("dropped REQUEST_PROOF_OF_WORK from %s(%s): unexpected exception",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
+ }
+ return true;
+}
+
+void IncomingPacket::computeSalsa2012Sha512ProofOfWork(unsigned int difficulty,const void *challenge,unsigned int challengeLength,unsigned char result[16])
+{
+ unsigned char salsabuf[131072]; // 131072 == protocol constant, size of memory buffer for this proof of work function
+ char candidatebuf[ZT_PROTO_MAX_PACKET_LENGTH + 256];
+ unsigned char shabuf[ZT_SHA512_DIGEST_LEN];
+ const uint64_t s20iv = 0; // zero IV for Salsa20
+ char *const candidate = (char *)(( ((uintptr_t)&(candidatebuf[0])) | 0xf ) + 1); // align to 16-byte boundary to ensure that uint64_t type punning of initial nonce is okay
+ Salsa20 s20;
+ unsigned int d;
+ unsigned char *p;
+
+ Utils::getSecureRandom(candidate,16);
+ memcpy(candidate + 16,challenge,challengeLength);
+
+ if (difficulty > 512)
+ difficulty = 512; // sanity check
+
+try_salsa2012sha512_again:
+ ++*(reinterpret_cast<volatile uint64_t *>(candidate));
+
+ SHA512::hash(shabuf,candidate,16 + challengeLength);
+ s20.init(shabuf,256,&s20iv);
+ memset(salsabuf,0,sizeof(salsabuf));
+ s20.encrypt12(salsabuf,salsabuf,sizeof(salsabuf));
+ SHA512::hash(shabuf,salsabuf,sizeof(salsabuf));
+
+ d = difficulty;
+ p = shabuf;
+ while (d >= 8) {
+ if (*(p++))
+ goto try_salsa2012sha512_again;
+ d -= 8;
+ }
+ if (d > 0) {
+ if ( ((((unsigned int)*p) << d) & 0xff00) != 0 )
+ goto try_salsa2012sha512_again;
+ }
+
+ memcpy(result,candidate,16);
+}
+
+bool IncomingPacket::testSalsa2012Sha512ProofOfWorkResult(unsigned int difficulty,const void *challenge,unsigned int challengeLength,const unsigned char proposedResult[16])
+{
+ unsigned char salsabuf[131072]; // 131072 == protocol constant, size of memory buffer for this proof of work function
+ char candidate[ZT_PROTO_MAX_PACKET_LENGTH + 256];
+ unsigned char shabuf[ZT_SHA512_DIGEST_LEN];
+ const uint64_t s20iv = 0; // zero IV for Salsa20
+ Salsa20 s20;
+ unsigned int d;
+ unsigned char *p;
+
+ if (difficulty > 512)
+ difficulty = 512; // sanity check
+
+ memcpy(candidate,proposedResult,16);
+ memcpy(candidate + 16,challenge,challengeLength);
+
+ SHA512::hash(shabuf,candidate,16 + challengeLength);
+ s20.init(shabuf,256,&s20iv);
+ memset(salsabuf,0,sizeof(salsabuf));
+ s20.encrypt12(salsabuf,salsabuf,sizeof(salsabuf));
+ SHA512::hash(shabuf,salsabuf,sizeof(salsabuf));
+
+ d = difficulty;
+ p = shabuf;
+ while (d >= 8) {
+ if (*(p++))
+ return false;
+ d -= 8;
+ }
+ if (d > 0) {
+ if ( ((((unsigned int)*p) << d) & 0xff00) != 0 )
+ return false;
+ }
+
return true;
}
@@ -1097,6 +1329,7 @@ void IncomingPacket::_sendErrorNeedCertificate(const RuntimeEnvironment *RR,cons
outp.append((unsigned char)Packet::ERROR_NEED_MEMBERSHIP_CERTIFICATE);
outp.append(nwid);
outp.armor(peer->key(),true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
}
diff --git a/node/IncomingPacket.hpp b/node/IncomingPacket.hpp
index 06220c4b..7fb7dbd3 100644
--- a/node/IncomingPacket.hpp
+++ b/node/IncomingPacket.hpp
@@ -93,30 +93,59 @@ public:
* about whether the packet was valid. A rejection is 'complete.'
*
* Once true is returned, this must not be called again. The packet's state
- * may no longer be valid.
+ * may no longer be valid. The only exception is deferred decoding. In this
+ * case true is returned to indicate to the normal decode path that it is
+ * finished with the packet. The packet will have added itself to the
+ * deferred queue and will expect tryDecode() to be called one more time
+ * with deferred set to true.
+ *
+ * Deferred decoding is performed by DeferredPackets.cpp and should not be
+ * done elsewhere. Under deferred decoding packets only get one shot and
+ * so the return value of tryDecode() is ignored.
*
* @param RR Runtime environment
+ * @param deferred If true, this is a deferred decode and the return is ignored
* @return True if decoding and processing is complete, false if caller should try again
- * @throws std::out_of_range Range error processing packet (should be discarded)
- * @throws std::runtime_error Other error processing packet (should be discarded)
*/
- bool tryDecode(const RuntimeEnvironment *RR);
+ bool tryDecode(const RuntimeEnvironment *RR,bool deferred);
/**
* @return Time of packet receipt / start of decode
*/
inline uint64_t receiveTime() const throw() { return _receiveTime; }
+ /**
+ * Compute the Salsa20/12+SHA512 proof of work function
+ *
+ * @param difficulty Difficulty in bits (max: 64)
+ * @param challenge Challenge string
+ * @param challengeLength Length of challenge in bytes (max allowed: ZT_PROTO_MAX_PACKET_LENGTH)
+ * @param result Buffer to fill with 16-byte result
+ */
+ static void computeSalsa2012Sha512ProofOfWork(unsigned int difficulty,const void *challenge,unsigned int challengeLength,unsigned char result[16]);
+
+ /**
+ * Verify the result of Salsa20/12+SHA512 proof of work
+ *
+ * @param difficulty Difficulty in bits (max: 64)
+ * @param challenge Challenge bytes
+ * @param challengeLength Length of challenge in bytes (max allowed: ZT_PROTO_MAX_PACKET_LENGTH)
+ * @param proposedResult Result supplied by client
+ * @return True if result is valid
+ */
+ static bool testSalsa2012Sha512ProofOfWorkResult(unsigned int difficulty,const void *challenge,unsigned int challengeLength,const unsigned char proposedResult[16]);
+
private:
// These are called internally to handle packet contents once it has
// been authenticated, decrypted, decompressed, and classified.
bool _doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
- bool _doHELLO(const RuntimeEnvironment *RR);
+ bool _doHELLO(const RuntimeEnvironment *RR,SharedPtr<Peer> &peer); // can be called with NULL peer, while all others cannot
bool _doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doRENDEZVOUS(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doFRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
+ bool _doECHO(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doMULTICAST_LIKE(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doNETWORK_MEMBERSHIP_CERTIFICATE(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
@@ -126,6 +155,7 @@ private:
bool _doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doCIRCUIT_TEST(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
bool _doCIRCUIT_TEST_REPORT(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
+ bool _doREQUEST_PROOF_OF_WORK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer);
// Send an ERROR_NEED_MEMBERSHIP_CERTIFICATE to a peer indicating that an updated cert is needed to communicate
void _sendErrorNeedCertificate(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer,uint64_t nwid);
diff --git a/node/InetAddress.cpp b/node/InetAddress.cpp
index e542f0d4..f35eb9c3 100644
--- a/node/InetAddress.cpp
+++ b/node/InetAddress.cpp
@@ -77,14 +77,12 @@ InetAddress::IpScope InetAddress::ipScope() const
if ((ip & 0xffff0000) == 0xc0a80000) return IP_SCOPE_PRIVATE; // 192.168.0.0/16
break;
case 0xff: return IP_SCOPE_NONE; // 255.0.0.0/8 (broadcast, or unused/unusable)
- default:
- switch(ip >> 28) {
- case 0xe: return IP_SCOPE_MULTICAST; // 224.0.0.0/4
- case 0xf: return IP_SCOPE_PSEUDOPRIVATE; // 240.0.0.0/4 ("reserved," usually unusable)
- default: return IP_SCOPE_GLOBAL; // everything else
- }
- break;
}
+ switch(ip >> 28) {
+ case 0xe: return IP_SCOPE_MULTICAST; // 224.0.0.0/4
+ case 0xf: return IP_SCOPE_PSEUDOPRIVATE; // 240.0.0.0/4 ("reserved," usually unusable)
+ }
+ return IP_SCOPE_GLOBAL;
} break;
case AF_INET6: {
@@ -236,7 +234,6 @@ void InetAddress::fromString(const std::string &ipSlashPort)
}
InetAddress InetAddress::netmask() const
- throw()
{
InetAddress r(*this);
switch(r.ss_family) {
@@ -244,36 +241,40 @@ InetAddress InetAddress::netmask() const
reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr = Utils::hton((uint32_t)(0xffffffff << (32 - netmaskBits())));
break;
case AF_INET6: {
- unsigned char *bf = reinterpret_cast<unsigned char *>(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr);
- signed int bitsLeft = (signed int)netmaskBits();
- for(unsigned int i=0;i<16;++i) {
- if (bitsLeft > 0) {
- bf[i] |= (unsigned char)((bitsLeft >= 8) ? 0x00 : (0xff >> bitsLeft));
- bitsLeft -= 8;
- }
- }
+ uint64_t nm[2];
+ const unsigned int bits = netmaskBits();
+ nm[0] = Utils::hton((uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
+ nm[1] = Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
+ memcpy(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,nm,16);
} break;
}
return r;
}
InetAddress InetAddress::broadcast() const
- throw()
+{
+ if (ss_family == AF_INET) {
+ InetAddress r(*this);
+ reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t)(0xffffffff >> netmaskBits()));
+ return r;
+ }
+ return InetAddress();
+}
+
+InetAddress InetAddress::network() const
{
InetAddress r(*this);
switch(r.ss_family) {
case AF_INET:
- reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t)(0xffffffff >> netmaskBits()));
+ reinterpret_cast<struct sockaddr_in *>(&r)->sin_addr.s_addr &= Utils::hton((uint32_t)(0xffffffff << (32 - netmaskBits())));
break;
case AF_INET6: {
- unsigned char *bf = reinterpret_cast<unsigned char *>(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr);
- signed int bitsLeft = (signed int)netmaskBits();
- for(unsigned int i=0;i<16;++i) {
- if (bitsLeft > 0) {
- bf[i] |= (unsigned char)((bitsLeft >= 8) ? 0x00 : (0xff >> bitsLeft));
- bitsLeft -= 8;
- }
- }
+ uint64_t nm[2];
+ const unsigned int bits = netmaskBits();
+ memcpy(nm,reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,16);
+ nm[0] &= Utils::hton((uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
+ nm[1] &= Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
+ memcpy(reinterpret_cast<struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,nm,16);
} break;
}
return r;
diff --git a/node/InetAddress.hpp b/node/InetAddress.hpp
index c376a032..2573e694 100644
--- a/node/InetAddress.hpp
+++ b/node/InetAddress.hpp
@@ -43,12 +43,17 @@
namespace ZeroTier {
/**
+ * Maximum integer value of enum IpScope
+ */
+#define ZT_INETADDRESS_MAX_SCOPE 7
+
+/**
* Extends sockaddr_storage with friendly C++ methods
*
* This is basically a "mixin" for sockaddr_storage. It adds methods and
* operators, but does not modify the structure. This can be cast to/from
- * sockaddr_storage and used interchangeably. Don't change this as it's
- * used in a few places.
+ * sockaddr_storage and used interchangeably. DO NOT change this by e.g.
+ * adding non-static fields, since much code depends on this identity.
*/
struct InetAddress : public sockaddr_storage
{
@@ -66,7 +71,8 @@ struct InetAddress : public sockaddr_storage
* IP address scope
*
* Note that these values are in ascending order of path preference and
- * MUST remain that way or Path must be changed to reflect.
+ * MUST remain that way or Path must be changed to reflect. Also be sure
+ * to change ZT_INETADDRESS_MAX_SCOPE if the max changes.
*/
enum IpScope
{
@@ -100,74 +106,88 @@ struct InetAddress : public sockaddr_storage
inline InetAddress &operator=(const InetAddress &a)
throw()
{
- memcpy(this,&a,sizeof(InetAddress));
+ if (&a != this)
+ memcpy(this,&a,sizeof(InetAddress));
return *this;
}
inline InetAddress &operator=(const InetAddress *a)
throw()
{
- memcpy(this,a,sizeof(InetAddress));
+ if (a != this)
+ memcpy(this,a,sizeof(InetAddress));
return *this;
}
inline InetAddress &operator=(const struct sockaddr_storage &ss)
throw()
{
- memcpy(this,&ss,sizeof(InetAddress));
+ if (reinterpret_cast<const InetAddress *>(&ss) != this)
+ memcpy(this,&ss,sizeof(InetAddress));
return *this;
}
inline InetAddress &operator=(const struct sockaddr_storage *ss)
throw()
{
- memcpy(this,ss,sizeof(InetAddress));
+ if (reinterpret_cast<const InetAddress *>(ss) != this)
+ memcpy(this,ss,sizeof(InetAddress));
return *this;
}
inline InetAddress &operator=(const struct sockaddr_in &sa)
throw()
{
- memset(this,0,sizeof(InetAddress));
- memcpy(this,&sa,sizeof(struct sockaddr_in));
+ if (reinterpret_cast<const InetAddress *>(&sa) != this) {
+ memset(this,0,sizeof(InetAddress));
+ memcpy(this,&sa,sizeof(struct sockaddr_in));
+ }
return *this;
}
inline InetAddress &operator=(const struct sockaddr_in *sa)
throw()
{
- memset(this,0,sizeof(InetAddress));
- memcpy(this,sa,sizeof(struct sockaddr_in));
+ if (reinterpret_cast<const InetAddress *>(sa) != this) {
+ memset(this,0,sizeof(InetAddress));
+ memcpy(this,sa,sizeof(struct sockaddr_in));
+ }
return *this;
}
inline InetAddress &operator=(const struct sockaddr_in6 &sa)
throw()
{
- memset(this,0,sizeof(InetAddress));
- memcpy(this,&sa,sizeof(struct sockaddr_in6));
+ if (reinterpret_cast<const InetAddress *>(&sa) != this) {
+ memset(this,0,sizeof(InetAddress));
+ memcpy(this,&sa,sizeof(struct sockaddr_in6));
+ }
return *this;
}
inline InetAddress &operator=(const struct sockaddr_in6 *sa)
throw()
{
- memset(this,0,sizeof(InetAddress));
- memcpy(this,sa,sizeof(struct sockaddr_in6));
+ if (reinterpret_cast<const InetAddress *>(sa) != this) {
+ memset(this,0,sizeof(InetAddress));
+ memcpy(this,sa,sizeof(struct sockaddr_in6));
+ }
return *this;
}
inline InetAddress &operator=(const struct sockaddr &sa)
throw()
{
- memset(this,0,sizeof(InetAddress));
- switch(sa.sa_family) {
- case AF_INET:
- memcpy(this,&sa,sizeof(struct sockaddr_in));
- break;
- case AF_INET6:
- memcpy(this,&sa,sizeof(struct sockaddr_in6));
- break;
+ if (reinterpret_cast<const InetAddress *>(&sa) != this) {
+ memset(this,0,sizeof(InetAddress));
+ switch(sa.sa_family) {
+ case AF_INET:
+ memcpy(this,&sa,sizeof(struct sockaddr_in));
+ break;
+ case AF_INET6:
+ memcpy(this,&sa,sizeof(struct sockaddr_in6));
+ break;
+ }
}
return *this;
}
@@ -175,14 +195,16 @@ struct InetAddress : public sockaddr_storage
inline InetAddress &operator=(const struct sockaddr *sa)
throw()
{
- memset(this,0,sizeof(InetAddress));
- switch(sa->sa_family) {
- case AF_INET:
- memcpy(this,sa,sizeof(struct sockaddr_in));
- break;
- case AF_INET6:
- memcpy(this,sa,sizeof(struct sockaddr_in6));
- break;
+ if (reinterpret_cast<const InetAddress *>(sa) != this) {
+ memset(this,0,sizeof(InetAddress));
+ switch(sa->sa_family) {
+ case AF_INET:
+ memcpy(this,sa,sizeof(struct sockaddr_in));
+ break;
+ case AF_INET6:
+ memcpy(this,sa,sizeof(struct sockaddr_in6));
+ break;
+ }
}
return *this;
}
@@ -281,17 +303,27 @@ struct InetAddress : public sockaddr_storage
/**
* Construct a full netmask as an InetAddress
+ *
+ * @return Netmask such as 255.255.255.0 if this address is /24 (port field will be unchanged)
*/
- InetAddress netmask() const
- throw();
+ InetAddress netmask() const;
/**
* Constructs a broadcast address from a network/netmask address
*
+ * This is only valid for IPv4 and will return a NULL InetAddress for other
+ * address families.
+ *
* @return Broadcast address (only IP portion is meaningful)
*/
- InetAddress broadcast() const
- throw();
+ InetAddress broadcast() const;
+
+ /**
+ * Return the network -- a.k.a. the IP ANDed with the netmask
+ *
+ * @return Network e.g. 10.0.1.0/24 from 10.0.1.200/24
+ */
+ InetAddress network() const;
/**
* @return True if this is an IPv4 address
@@ -304,7 +336,7 @@ struct InetAddress : public sockaddr_storage
inline bool isV6() const throw() { return (ss_family == AF_INET6); }
/**
- * @return pointer to raw IP address bytes
+ * @return pointer to raw address bytes or NULL if not available
*/
inline const void *rawIpData() const
throw()
@@ -317,27 +349,19 @@ struct InetAddress : public sockaddr_storage
}
/**
- * @return pointer to raw IP address bytes
- */
- inline void *rawIpData()
- throw()
- {
- switch(ss_family) {
- case AF_INET: return (void *)&(reinterpret_cast<struct sockaddr_in *>(this)->sin_addr.s_addr);
- case AF_INET6: return (void *)(reinterpret_cast<struct sockaddr_in6 *>(this)->sin6_addr.s6_addr);
- default: return 0;
- }
- }
-
- /**
+ * Performs an IP-only comparison or, if that is impossible, a memcmp()
+ *
* @param a InetAddress to compare again
* @return True if only IP portions are equal (false for non-IP or null addresses)
*/
inline bool ipsEqual(const InetAddress &a) const
{
- switch(ss_family) {
- case AF_INET: return (reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_addr.s_addr);
- case AF_INET6: return (memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0);
+ if (ss_family == a.ss_family) {
+ if (ss_family == AF_INET)
+ return (reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr == reinterpret_cast<const struct sockaddr_in *>(&a)->sin_addr.s_addr);
+ if (ss_family == AF_INET6)
+ return (memcmp(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr,reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr,16) == 0);
+ return (memcmp(this,&a,sizeof(InetAddress)) == 0);
}
return false;
}
@@ -366,7 +390,8 @@ struct InetAddress : public sockaddr_storage
template<unsigned int C>
inline void serialize(Buffer<C> &b) const
{
- // Format is the same as in VERB_HELLO in Packet.hpp
+ // This is used in the protocol and must be the same as describe in places
+ // like VERB_HELLO in Packet.hpp.
switch(ss_family) {
case AF_INET:
b.append((uint8_t)0x04);
@@ -387,11 +412,21 @@ struct InetAddress : public sockaddr_storage
template<unsigned int C>
inline unsigned int deserialize(const Buffer<C> &b,unsigned int startAt = 0)
{
- unsigned int p = startAt;
memset(this,0,sizeof(InetAddress));
+ unsigned int p = startAt;
switch(b[p++]) {
case 0:
return 1;
+ case 0x01:
+ // TODO: Ethernet address (but accept for forward compatibility)
+ return 7;
+ case 0x02:
+ // TODO: Bluetooth address (but accept for forward compatibility)
+ return 7;
+ case 0x03:
+ // TODO: Other address types (but accept for forward compatibility)
+ // These could be extended/optional things like AF_UNIX, LTE Direct, shared memory, etc.
+ return (unsigned int)(b.template at<uint16_t>(p) + 3); // other addresses begin with 16-bit non-inclusive length
case 0x04:
ss_family = AF_INET;
memcpy(&(reinterpret_cast<struct sockaddr_in *>(this)->sin_addr.s_addr),b.field(p,4),4); p += 4;
diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp
index 6a8d6379..fa9487ef 100644
--- a/node/Multicaster.cpp
+++ b/node/Multicaster.cpp
@@ -37,6 +37,7 @@
#include "Peer.hpp"
#include "C25519.hpp"
#include "CertificateOfMembership.hpp"
+#include "Node.hpp"
namespace ZeroTier {
@@ -77,7 +78,7 @@ void Multicaster::remove(uint64_t nwid,const MulticastGroup &mg,const Address &m
}
}
-unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const MulticastGroup &mg,Packet &appendTo,unsigned int limit) const
+unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const MulticastGroup &mg,Buffer<ZT_PROTO_MAX_PACKET_LENGTH> &appendTo,unsigned int limit) const
{
unsigned char *p;
unsigned int added = 0,i,k,rptr,totalKnown = 0;
@@ -174,129 +175,134 @@ void Multicaster::send(
unsigned long idxbuf[8194];
unsigned long *indexes = idxbuf;
- Mutex::Lock _l(_groups_m);
- MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
-
- if (!gs.members.empty()) {
- // Allocate a memory buffer if group is monstrous
- if (gs.members.size() > (sizeof(idxbuf) / sizeof(unsigned long)))
- indexes = new unsigned long[gs.members.size()];
-
- // Generate a random permutation of member indexes
- for(unsigned long i=0;i<gs.members.size();++i)
- indexes[i] = i;
- for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
- unsigned long j = (unsigned long)RR->node->prng() % (i + 1);
- unsigned long tmp = indexes[j];
- indexes[j] = indexes[i];
- indexes[i] = tmp;
+ try {
+ Mutex::Lock _l(_groups_m);
+ MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
+
+ if (!gs.members.empty()) {
+ // Allocate a memory buffer if group is monstrous
+ if (gs.members.size() > (sizeof(idxbuf) / sizeof(unsigned long)))
+ indexes = new unsigned long[gs.members.size()];
+
+ // Generate a random permutation of member indexes
+ for(unsigned long i=0;i<gs.members.size();++i)
+ indexes[i] = i;
+ for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
+ unsigned long j = (unsigned long)RR->node->prng() % (i + 1);
+ unsigned long tmp = indexes[j];
+ indexes[j] = indexes[i];
+ indexes[i] = tmp;
+ }
}
- }
- if (gs.members.size() >= limit) {
- // Skip queue if we already have enough members to complete the send operation
- OutboundMulticast out;
-
- out.init(
- RR,
- now,
- nwid,
- com,
- limit,
- 1, // we'll still gather a little from peers to keep multicast list fresh
- src,
- mg,
- etherType,
- data,
- len);
-
- unsigned int count = 0;
-
- for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
- if (*ast != RR->identity.address()) {
- out.sendOnly(RR,*ast);
- if (++count >= limit)
- break;
+ if (gs.members.size() >= limit) {
+ // Skip queue if we already have enough members to complete the send operation
+ OutboundMulticast out;
+
+ out.init(
+ RR,
+ now,
+ nwid,
+ com,
+ limit,
+ 1, // we'll still gather a little from peers to keep multicast list fresh
+ src,
+ mg,
+ etherType,
+ data,
+ len);
+
+ unsigned int count = 0;
+
+ for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
+ if (*ast != RR->identity.address()) {
+ out.sendOnly(RR,*ast); // optimization: don't use dedup log if it's a one-pass send
+ if (++count >= limit)
+ break;
+ }
}
- }
- unsigned long idx = 0;
- while ((count < limit)&&(idx < gs.members.size())) {
- Address ma(gs.members[indexes[idx++]].address);
- if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
- out.sendOnly(RR,ma);
- ++count;
+ unsigned long idx = 0;
+ while ((count < limit)&&(idx < gs.members.size())) {
+ Address ma(gs.members[indexes[idx++]].address);
+ if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
+ out.sendOnly(RR,ma); // optimization: don't use dedup log if it's a one-pass send
+ ++count;
+ }
}
- }
- } else {
- unsigned int gatherLimit = (limit - (unsigned int)gs.members.size()) + 1;
-
- if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY) {
- gs.lastExplicitGather = now;
- SharedPtr<Peer> sn(RR->topology->getBestRoot());
- if (sn) {
- TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
-
- const CertificateOfMembership *com = (CertificateOfMembership *)0;
- SharedPtr<NetworkConfig> nconf;
- if (sn->needsOurNetworkMembershipCertificate(nwid,now,true)) {
- SharedPtr<Network> nw(RR->node->network(nwid));
- if (nw) {
- nconf = nw->config2();
- if (nconf)
- com = &(nconf->com());
+ } else {
+ unsigned int gatherLimit = (limit - (unsigned int)gs.members.size()) + 1;
+
+ if ((gs.members.empty())||((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY)) {
+ gs.lastExplicitGather = now;
+ SharedPtr<Peer> explicitGatherPeers[2];
+ explicitGatherPeers[0] = RR->topology->getBestRoot();
+ explicitGatherPeers[1] = RR->topology->getPeer(Network::controllerFor(nwid));
+ for(unsigned int k=0;k<2;++k) {
+ const SharedPtr<Peer> &p = explicitGatherPeers[k];
+ if (!p)
+ continue;
+ //TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
+
+ const CertificateOfMembership *com = (CertificateOfMembership *)0;
+ {
+ SharedPtr<Network> nw(RR->node->network(nwid));
+ if (nw) {
+ SharedPtr<NetworkConfig> nconf(nw->config2());
+ if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(p->needsOurNetworkMembershipCertificate(nwid,now,true)))
+ com = &(nconf->com());
+ }
}
- }
- Packet outp(sn->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
- outp.append(nwid);
- outp.append((uint8_t)(com ? 0x01 : 0x00));
- mg.mac().appendTo(outp);
- outp.append((uint32_t)mg.adi());
- outp.append((uint32_t)gatherLimit);
- if (com)
- com->serialize(outp);
- outp.armor(sn->key(),true);
- sn->send(RR,outp.data(),outp.size(),now);
+ Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
+ outp.append(nwid);
+ outp.append((uint8_t)(com ? 0x01 : 0x00));
+ mg.mac().appendTo(outp);
+ outp.append((uint32_t)mg.adi());
+ outp.append((uint32_t)gatherLimit);
+ if (com)
+ com->serialize(outp);
+ RR->sw->send(outp,true,0);
+ }
+ gatherLimit = 0;
}
- gatherLimit = 0;
- }
- gs.txQueue.push_back(OutboundMulticast());
- OutboundMulticast &out = gs.txQueue.back();
-
- out.init(
- RR,
- now,
- nwid,
- com,
- limit,
- gatherLimit,
- src,
- mg,
- etherType,
- data,
- len);
-
- unsigned int count = 0;
-
- for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
- if (*ast != RR->identity.address()) {
- out.sendAndLog(RR,*ast);
- if (++count >= limit)
- break;
+ gs.txQueue.push_back(OutboundMulticast());
+ OutboundMulticast &out = gs.txQueue.back();
+
+ out.init(
+ RR,
+ now,
+ nwid,
+ com,
+ limit,
+ gatherLimit,
+ src,
+ mg,
+ etherType,
+ data,
+ len);
+
+ unsigned int count = 0;
+
+ for(std::vector<Address>::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) {
+ if (*ast != RR->identity.address()) {
+ out.sendAndLog(RR,*ast);
+ if (++count >= limit)
+ break;
+ }
}
- }
- unsigned long idx = 0;
- while ((count < limit)&&(idx < gs.members.size())) {
- Address ma(gs.members[indexes[idx++]].address);
- if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
- out.sendAndLog(RR,ma);
- ++count;
+ unsigned long idx = 0;
+ while ((count < limit)&&(idx < gs.members.size())) {
+ Address ma(gs.members[indexes[idx++]].address);
+ if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) {
+ out.sendAndLog(RR,ma);
+ ++count;
+ }
}
}
- }
+ } catch ( ... ) {} // this is a sanity check to catch any failures and make sure indexes[] still gets deleted
// Free allocated memory buffer if any
if (indexes != idxbuf)
diff --git a/node/Multicaster.hpp b/node/Multicaster.hpp
index 898c4db7..8e6a7556 100644
--- a/node/Multicaster.hpp
+++ b/node/Multicaster.hpp
@@ -146,7 +146,7 @@ public:
* @return Number of addresses appended
* @throws std::out_of_range Buffer overflow writing to packet
*/
- unsigned int gather(const Address &queryingPeer,uint64_t nwid,const MulticastGroup &mg,Packet &appendTo,unsigned int limit) const;
+ unsigned int gather(const Address &queryingPeer,uint64_t nwid,const MulticastGroup &mg,Buffer<ZT_PROTO_MAX_PACKET_LENGTH> &appendTo,unsigned int limit) const;
/**
* Get subscribers to a multicast group
diff --git a/node/Network.cpp b/node/Network.cpp
index 9ce58c63..afbe1074 100644
--- a/node/Network.cpp
+++ b/node/Network.cpp
@@ -37,6 +37,7 @@
#include "Packet.hpp"
#include "Buffer.hpp"
#include "NetworkController.hpp"
+#include "Node.hpp"
#include "../version.h"
@@ -144,7 +145,15 @@ void Network::multicastUnsubscribe(const MulticastGroup &mg)
bool Network::tryAnnounceMulticastGroupsTo(const SharedPtr<Peer> &peer)
{
Mutex::Lock _l(_lock);
- return _tryAnnounceMulticastGroupsTo(RR->topology->rootAddresses(),_allMulticastGroups(),peer,RR->node->now());
+ if (
+ (_isAllowed(peer)) ||
+ (peer->address() == this->controller()) ||
+ (RR->topology->isRoot(peer->identity()))
+ ) {
+ _announceMulticastGroupsTo(peer->address(),_allMulticastGroups());
+ return true;
+ }
+ return false;
}
bool Network::applyConfiguration(const SharedPtr<NetworkConfig> &conf)
@@ -400,79 +409,80 @@ bool Network::_isAllowed(const SharedPtr<Peer> &peer) const
return false; // default position on any failure
}
-bool Network::_tryAnnounceMulticastGroupsTo(const std::vector<Address> &alwaysAddresses,const std::vector<MulticastGroup> &allMulticastGroups,const SharedPtr<Peer> &peer,uint64_t now) const
-{
- // assumes _lock is locked
- if (
- (_isAllowed(peer)) ||
- (peer->address() == this->controller()) ||
- (std::find(alwaysAddresses.begin(),alwaysAddresses.end(),peer->address()) != alwaysAddresses.end())
- ) {
-
- if ((_config)&&(_config->com())&&(!_config->isPublic())&&(peer->needsOurNetworkMembershipCertificate(_id,now,true))) {
- Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
- _config->com().serialize(outp);
- outp.armor(peer->key(),true);
- peer->send(RR,outp.data(),outp.size(),now);
- }
-
- {
- Packet outp(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
-
- for(std::vector<MulticastGroup>::const_iterator mg(allMulticastGroups.begin());mg!=allMulticastGroups.end();++mg) {
- if ((outp.size() + 18) >= ZT_UDP_DEFAULT_PAYLOAD_MTU) {
- outp.armor(peer->key(),true);
- peer->send(RR,outp.data(),outp.size(),now);
- outp.reset(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
- }
-
- // network ID, MAC, ADI
- outp.append((uint64_t)_id);
- mg->mac().appendTo(outp);
- outp.append((uint32_t)mg->adi());
- }
-
- if (outp.size() > ZT_PROTO_MIN_PACKET_LENGTH) {
- outp.armor(peer->key(),true);
- peer->send(RR,outp.data(),outp.size(),now);
- }
- }
-
- return true;
- }
- return false;
-}
-
-class _AnnounceMulticastGroupsToAll
+class _GetPeersThatNeedMulticastAnnouncement
{
public:
- _AnnounceMulticastGroupsToAll(const RuntimeEnvironment *renv,Network *nw) :
+ _GetPeersThatNeedMulticastAnnouncement(const RuntimeEnvironment *renv,Network *nw) :
_now(renv->node->now()),
- RR(renv),
+ _controller(nw->controller()),
_network(nw),
- _rootAddresses(renv->topology->rootAddresses()),
- _allMulticastGroups(nw->_allMulticastGroups())
+ _rootAddresses(renv->topology->rootAddresses())
{}
-
- inline void operator()(Topology &t,const SharedPtr<Peer> &p) { _network->_tryAnnounceMulticastGroupsTo(_rootAddresses,_allMulticastGroups,p,_now); }
-
+ inline void operator()(Topology &t,const SharedPtr<Peer> &p)
+ {
+ if (
+ (_network->_isAllowed(p)) ||
+ (p->address() == _controller) ||
+ (std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end())
+ ) {
+ peers.push_back(p->address());
+ }
+ }
+ std::vector<Address> peers;
private:
uint64_t _now;
- const RuntimeEnvironment *RR;
+ Address _controller;
Network *_network;
std::vector<Address> _rootAddresses;
- std::vector<MulticastGroup> _allMulticastGroups;
};
void Network::_announceMulticastGroups()
{
// Assumes _lock is locked
- _AnnounceMulticastGroupsToAll afunc(RR,this);
- RR->topology->eachPeer<_AnnounceMulticastGroupsToAll &>(afunc);
+
+ _GetPeersThatNeedMulticastAnnouncement gpfunc(RR,this);
+ RR->topology->eachPeer<_GetPeersThatNeedMulticastAnnouncement &>(gpfunc);
+
+ std::vector<MulticastGroup> allMulticastGroups(_allMulticastGroups());
+ for(std::vector<Address>::const_iterator pa(gpfunc.peers.begin());pa!=gpfunc.peers.end();++pa)
+ _announceMulticastGroupsTo(*pa,allMulticastGroups);
+}
+
+void Network::_announceMulticastGroupsTo(const Address &peerAddress,const std::vector<MulticastGroup> &allMulticastGroups) const
+{
+ // Assumes _lock is locked
+
+ // We push COMs ahead of MULTICAST_LIKE since they're used for access control -- a COM is a public
+ // credential so "over-sharing" isn't really an issue (and we only do so with roots).
+ if ((_config)&&(_config->com())&&(!_config->isPublic())) {
+ Packet outp(peerAddress,RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
+ _config->com().serialize(outp);
+ RR->sw->send(outp,true,0);
+ }
+
+ {
+ Packet outp(peerAddress,RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
+
+ for(std::vector<MulticastGroup>::const_iterator mg(allMulticastGroups.begin());mg!=allMulticastGroups.end();++mg) {
+ if ((outp.size() + 18) >= ZT_UDP_DEFAULT_PAYLOAD_MTU) {
+ RR->sw->send(outp,true,0);
+ outp.reset(peerAddress,RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
+ }
+
+ // network ID, MAC, ADI
+ outp.append((uint64_t)_id);
+ mg->mac().appendTo(outp);
+ outp.append((uint32_t)mg->adi());
+ }
+
+ if (outp.size() > ZT_PROTO_MIN_PACKET_LENGTH)
+ RR->sw->send(outp,true,0);
+ }
}
std::vector<MulticastGroup> Network::_allMulticastGroups() const
{
// Assumes _lock is locked
+
std::vector<MulticastGroup> mgs;
mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1);
mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end());
@@ -481,6 +491,7 @@ std::vector<MulticastGroup> Network::_allMulticastGroups() const
mgs.push_back(Network::BROADCAST);
std::sort(mgs.begin(),mgs.end());
mgs.erase(std::unique(mgs.begin(),mgs.end()),mgs.end());
+
return mgs;
}
diff --git a/node/Network.hpp b/node/Network.hpp
index f7939323..0effa8e2 100644
--- a/node/Network.hpp
+++ b/node/Network.hpp
@@ -56,7 +56,7 @@ namespace ZeroTier {
class RuntimeEnvironment;
class Peer;
-class _AnnounceMulticastGroupsToAll; // internal function object in Network.cpp
+class _GetPeersThatNeedMulticastAnnouncement;
/**
* A virtual LAN
@@ -64,7 +64,7 @@ class _AnnounceMulticastGroupsToAll; // internal function object in Network.cpp
class Network : NonCopyable
{
friend class SharedPtr<Network>;
- friend class _AnnounceMulticastGroupsToAll;
+ friend class _GetPeersThatNeedMulticastAnnouncement; // internal function object
public:
/**
@@ -344,6 +344,7 @@ private:
bool _isAllowed(const SharedPtr<Peer> &peer) const;
bool _tryAnnounceMulticastGroupsTo(const std::vector<Address> &rootAddresses,const std::vector<MulticastGroup> &allMulticastGroups,const SharedPtr<Peer> &peer,uint64_t now) const;
void _announceMulticastGroups();
+ void _announceMulticastGroupsTo(const Address &peerAddress,const std::vector<MulticastGroup> &allMulticastGroups) const;
std::vector<MulticastGroup> _allMulticastGroups() const;
const RuntimeEnvironment *RR;
diff --git a/node/NetworkConfig.cpp b/node/NetworkConfig.cpp
index cd32600f..35e23837 100644
--- a/node/NetworkConfig.cpp
+++ b/node/NetworkConfig.cpp
@@ -55,6 +55,9 @@ SharedPtr<NetworkConfig> NetworkConfig::createTestNetworkConfig(const Address &s
if ((ip & 0x000000ff) == 0x00000000) ip ^= 0x00000001; // or .0
nc->_staticIps.push_back(InetAddress(Utils::hton(ip),8));
+ // Assign an RFC4193-compliant IPv6 address -- will never collide
+ nc->_staticIps.push_back(InetAddress::makeIpv6rfc4193(ZT_TEST_NETWORK_ID,self.toInt()));
+
return nc;
}
diff --git a/node/Node.cpp b/node/Node.cpp
index d5cc50b9..f077424b 100644
--- a/node/Node.cpp
+++ b/node/Node.cpp
@@ -46,7 +46,8 @@
#include "Address.hpp"
#include "Identity.hpp"
#include "SelfAwareness.hpp"
-#include "Defaults.hpp"
+#include "Cluster.hpp"
+#include "DeferredPackets.hpp"
const struct sockaddr_storage ZT_SOCKADDR_NULL = {0};
@@ -64,8 +65,7 @@ Node::Node(
ZT_WirePacketSendFunction wirePacketSendFunction,
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
- ZT_EventCallback eventCallback,
- const char *overrideRootTopology) :
+ ZT_EventCallback eventCallback) :
_RR(this),
RR(&_RR),
_uPtr(uptr),
@@ -82,35 +82,33 @@ Node::Node(
_lastPingCheck(0),
_lastHousekeepingRun(0)
{
- _newestVersionSeen[0] = ZEROTIER_ONE_VERSION_MAJOR;
- _newestVersionSeen[1] = ZEROTIER_ONE_VERSION_MINOR;
- _newestVersionSeen[2] = ZEROTIER_ONE_VERSION_REVISION;
_online = false;
// Use Salsa20 alone as a high-quality non-crypto PRNG
{
char foo[32];
Utils::getSecureRandom(foo,32);
- _prng.init(foo,256,foo,8);
+ _prng.init(foo,256,foo);
memset(_prngStream,0,sizeof(_prngStream));
- _prng.encrypt(_prngStream,_prngStream,sizeof(_prngStream));
+ _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
}
- std::string idtmp(dataStoreGet("identity.secret"));
- if ((!idtmp.length())||(!RR->identity.fromString(idtmp))||(!RR->identity.hasPrivate())) {
- TRACE("identity.secret not found, generating...");
- RR->identity.generate();
- idtmp = RR->identity.toString(true);
- if (!dataStorePut("identity.secret",idtmp,true))
- throw std::runtime_error("unable to write identity.secret");
- }
- RR->publicIdentityStr = RR->identity.toString(false);
- RR->secretIdentityStr = RR->identity.toString(true);
-
- idtmp = dataStoreGet("identity.public");
- if (idtmp != RR->publicIdentityStr) {
- if (!dataStorePut("identity.public",RR->publicIdentityStr,false))
- throw std::runtime_error("unable to write identity.public");
+ {
+ std::string idtmp(dataStoreGet("identity.secret"));
+ if ((!idtmp.length())||(!RR->identity.fromString(idtmp))||(!RR->identity.hasPrivate())) {
+ TRACE("identity.secret not found, generating...");
+ RR->identity.generate();
+ idtmp = RR->identity.toString(true);
+ if (!dataStorePut("identity.secret",idtmp,true))
+ throw std::runtime_error("unable to write identity.secret");
+ }
+ RR->publicIdentityStr = RR->identity.toString(false);
+ RR->secretIdentityStr = RR->identity.toString(true);
+ idtmp = dataStoreGet("identity.public");
+ if (idtmp != RR->publicIdentityStr) {
+ if (!dataStorePut("identity.public",RR->publicIdentityStr,false))
+ throw std::runtime_error("unable to write identity.public");
+ }
}
try {
@@ -119,7 +117,9 @@ Node::Node(
RR->antiRec = new AntiRecursion();
RR->topology = new Topology(RR);
RR->sa = new SelfAwareness(RR);
+ RR->dp = new DeferredPackets(RR);
} catch ( ... ) {
+ delete RR->dp;
delete RR->sa;
delete RR->topology;
delete RR->antiRec;
@@ -128,33 +128,25 @@ Node::Node(
throw;
}
- Dictionary rt;
- if (overrideRootTopology) {
- rt.fromString(std::string(overrideRootTopology));
- } else {
- std::string rttmp(dataStoreGet("root-topology"));
- if (rttmp.length() > 0) {
- rt.fromString(rttmp);
- if (!Topology::authenticateRootTopology(rt))
- rt.clear();
- }
- if ((!rt.size())||(!rt.contains("rootservers")))
- rt.fromString(ZT_DEFAULTS.defaultRootTopology);
- }
- RR->topology->setRootServers(Dictionary(rt.get("rootservers","")));
-
postEvent(ZT_EVENT_UP);
}
Node::~Node()
{
Mutex::Lock _l(_networks_m);
- _networks.clear(); // ensure that networks are destroyed before shutdown
+
+ _networks.clear(); // ensure that networks are destroyed before shutdow
+
+ RR->dpEnabled = 0;
+ delete RR->dp;
delete RR->sa;
delete RR->topology;
delete RR->antiRec;
delete RR->mc;
delete RR->sw;
+#ifdef ZT_ENABLE_CLUSTER
+ delete RR->cluster;
+#endif
}
ZT_ResultCode Node::processWirePacket(
@@ -197,29 +189,91 @@ public:
RR(renv),
_now(now),
_relays(relays),
- _rootAddresses(RR->topology->rootAddresses())
+ _world(RR->topology->world())
{
}
- uint64_t lastReceiveFromUpstream;
+ uint64_t lastReceiveFromUpstream; // tracks last time we got a packet from an 'upstream' peer like a root or a relay
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
{
- bool isRelay = false;
- for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(_relays.begin());r!=_relays.end();++r) {
- if (r->first == p->address()) {
- isRelay = true;
+ bool upstream = false;
+ InetAddress stableEndpoint4,stableEndpoint6;
+
+ // If this is a world root, pick (if possible) both an IPv4 and an IPv6 stable endpoint to use if link isn't currently alive.
+ for(std::vector<World::Root>::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) {
+ if (r->identity.address() == p->address()) {
+ upstream = true;
+ for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)r->stableEndpoints.size();++k) {
+ const InetAddress &addr = r->stableEndpoints[ptr++ % r->stableEndpoints.size()];
+ if (!stableEndpoint4) {
+ if (addr.ss_family == AF_INET)
+ stableEndpoint4 = addr;
+ }
+ if (!stableEndpoint6) {
+ if (addr.ss_family == AF_INET6)
+ stableEndpoint6 = addr;
+ }
+ }
break;
}
}
- if ((isRelay)||(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end())) {
- p->doPingAndKeepalive(RR,_now);
- if (p->lastReceive() > lastReceiveFromUpstream)
- lastReceiveFromUpstream = p->lastReceive();
- } else {
- if (p->alive(_now))
- p->doPingAndKeepalive(RR,_now);
+ if (!upstream) {
+ // If I am a root server, only ping other root servers -- roots don't ping "down"
+ // since that would just be a waste of bandwidth and could potentially cause route
+ // flapping in Cluster mode.
+ if (RR->topology->amRoot())
+ return;
+
+ // Check for network preferred relays, also considered 'upstream' and thus always
+ // pinged to keep links up. If they have stable addresses we will try them there.
+ for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(_relays.begin());r!=_relays.end();++r) {
+ if (r->first == p->address()) {
+ if (r->second.ss_family == AF_INET)
+ stableEndpoint4 = r->second;
+ else if (r->second.ss_family == AF_INET6)
+ stableEndpoint6 = r->second;
+ upstream = true;
+ break;
+ }
+ }
+ }
+
+ if (upstream) {
+ // "Upstream" devices are roots and relays and get special treatment -- they stay alive
+ // forever and we try to keep (if available) both IPv4 and IPv6 channels open to them.
+ bool needToContactIndirect = true;
+ if (p->doPingAndKeepalive(RR,_now,AF_INET)) {
+ needToContactIndirect = false;
+ } else {
+ if (stableEndpoint4) {
+ needToContactIndirect = false;
+ p->sendHELLO(RR,InetAddress(),stableEndpoint4,_now);
+ }
+ }
+ if (p->doPingAndKeepalive(RR,_now,AF_INET6)) {
+ needToContactIndirect = false;
+ } else {
+ if (stableEndpoint6) {
+ needToContactIndirect = false;
+ p->sendHELLO(RR,InetAddress(),stableEndpoint6,_now);
+ }
+ }
+
+ if (needToContactIndirect) {
+ // If this is an upstream and we have no stable endpoint for either IPv4 or IPv6,
+ // send a NOP indirectly if possible to see if we can get to this peer in any
+ // way whatsoever. This will e.g. find network preferred relays that lack
+ // stable endpoints by using root servers.
+ Packet outp(p->address(),RR->identity.address(),Packet::VERB_NOP);
+ RR->sw->send(outp,true,0);
+ }
+
+ lastReceiveFromUpstream = std::max(p->lastReceive(),lastReceiveFromUpstream);
+ } else if (p->activelyTransferringFrames(_now)) {
+ // Normal nodes get their preferred link kept alive if the node has generated frame traffic recently
+ p->doPingAndKeepalive(RR,_now,0);
}
}
@@ -227,7 +281,7 @@ private:
const RuntimeEnvironment *RR;
uint64_t _now;
const std::vector< std::pair<Address,InetAddress> > &_relays;
- std::vector<Address> _rootAddresses;
+ World _world;
};
ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
@@ -259,24 +313,13 @@ ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextB
for(std::vector< SharedPtr<Network> >::const_iterator n(needConfig.begin());n!=needConfig.end();++n)
(*n)->requestConfiguration();
- // Attempt to contact network preferred relays that we don't have direct links to
- std::sort(networkRelays.begin(),networkRelays.end());
- networkRelays.erase(std::unique(networkRelays.begin(),networkRelays.end()),networkRelays.end());
- for(std::vector< std::pair<Address,InetAddress> >::const_iterator nr(networkRelays.begin());nr!=networkRelays.end();++nr) {
- if (nr->second) {
- SharedPtr<Peer> rp(RR->topology->getPeer(nr->first));
- if ((rp)&&(!rp->hasActiveDirectPath(now)))
- rp->attemptToContactAt(RR,InetAddress(),nr->second,now);
- }
- }
-
- // Ping living or root server/relay peers
+ // Do pings and keepalives
_PingPeersThatNeedPing pfunc(RR,now,networkRelays);
RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
// Update online status, post status change as event
- bool oldOnline = _online;
- _online = ((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT);
+ const bool oldOnline = _online;
+ _online = (((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT)||(RR->topology->amRoot()));
if (oldOnline != _online)
postEvent(_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
} catch ( ... ) {
@@ -298,7 +341,18 @@ ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextB
}
try {
- *nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
+#ifdef ZT_ENABLE_CLUSTER
+ // If clustering is enabled we have to call cluster->doPeriodicTasks() very often, so we override normal timer deadline behavior
+ if (RR->cluster) {
+ RR->sw->doTimerTasks(now);
+ RR->cluster->doPeriodicTasks();
+ *nextBackgroundTaskDeadline = now + ZT_CLUSTER_PERIODIC_TASK_PERIOD; // this is really short so just tick at this rate
+ } else {
+#endif
+ *nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
+#ifdef ZT_ENABLE_CLUSTER
+ }
+#endif
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
@@ -355,6 +409,8 @@ uint64_t Node::address() const
void Node::status(ZT_NodeStatus *status) const
{
status->address = RR->identity.address().toInt();
+ status->worldId = RR->topology->worldId();
+ status->worldTimestamp = RR->topology->worldTimestamp();
status->publicIdentity = RR->publicIdentityStr.c_str();
status->secretIdentity = RR->secretIdentityStr.c_str();
status->online = _online ? 1 : 0;
@@ -389,14 +445,13 @@ ZT_PeerList *Node::peers() const
p->latency = pi->second->latency();
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_ROOT : ZT_PEER_ROLE_LEAF;
- std::vector<RemotePath> paths(pi->second->paths());
- RemotePath *bestPath = pi->second->getBestPath(_now);
+ std::vector<Path> paths(pi->second->paths());
+ Path *bestPath = pi->second->getBestPath(_now);
p->pathCount = 0;
- for(std::vector<RemotePath>::iterator path(paths.begin());path!=paths.end();++path) {
+ for(std::vector<Path>::iterator path(paths.begin());path!=paths.end();++path) {
memcpy(&(p->paths[p->pathCount].address),&(path->address()),sizeof(struct sockaddr_storage));
p->paths[p->pathCount].lastSend = path->lastSend();
p->paths[p->pathCount].lastReceive = path->lastReceived();
- p->paths[p->pathCount].fixed = path->fixed() ? 1 : 0;
p->paths[p->pathCount].active = path->active(_now) ? 1 : 0;
p->paths[p->pathCount].preferred = ((bestPath)&&(*path == *bestPath)) ? 1 : 0;
++p->pathCount;
@@ -441,11 +496,11 @@ void Node::freeQueryResult(void *qr)
::free(qr);
}
-int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust)
+int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr)
{
if (Path::isAddressValidForPath(*(reinterpret_cast<const InetAddress *>(addr)))) {
Mutex::Lock _l(_directPaths_m);
- _directPaths.push_back(Path(*(reinterpret_cast<const InetAddress *>(addr)),metric,(Path::Trust)trust));
+ _directPaths.push_back(*(reinterpret_cast<const InetAddress *>(addr)));
std::sort(_directPaths.begin(),_directPaths.end());
_directPaths.erase(std::unique(_directPaths.begin(),_directPaths.end()),_directPaths.end());
return 1;
@@ -482,7 +537,7 @@ ZT_ResultCode Node::circuitTestBegin(ZT_CircuitTest *test,void (*reportCallback)
outp.append((uint16_t)0);
C25519::Signature sig(RR->identity.sign(reinterpret_cast<const char *>(outp.data()) + ZT_PACKET_IDX_PAYLOAD,outp.size() - ZT_PACKET_IDX_PAYLOAD));
outp.append((uint16_t)sig.size());
- outp.append(sig.data,sig.size());
+ outp.append(sig.data,(unsigned int)sig.size());
outp.append((uint16_t)0); // originator doesn't need an extra credential, since it's the originator
for(unsigned int h=1;h<test->hopCount;++h) {
outp.append((uint8_t)0);
@@ -494,7 +549,7 @@ ZT_ResultCode Node::circuitTestBegin(ZT_CircuitTest *test,void (*reportCallback)
for(unsigned int a=0;a<test->hops[0].breadth;++a) {
outp.newInitializationVector();
outp.setDestination(Address(test->hops[0].addresses[a]));
- RR->sw->send(outp,true,test->credentialNetworkId);
+ RR->sw->send(outp,true,0);
}
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL; // probably indicates FIFO too big for packet
@@ -522,13 +577,93 @@ void Node::circuitTestEnd(ZT_CircuitTest *test)
}
}
+ZT_ResultCode Node::clusterInit(
+ unsigned int myId,
+ const struct sockaddr_storage *zeroTierPhysicalEndpoints,
+ unsigned int numZeroTierPhysicalEndpoints,
+ int x,
+ int y,
+ int z,
+ void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
+ void *sendFunctionArg,
+ int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
+ void *addressToLocationFunctionArg)
+{
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster)
+ return ZT_RESULT_ERROR_BAD_PARAMETER;
+
+ std::vector<InetAddress> eps;
+ for(unsigned int i=0;i<numZeroTierPhysicalEndpoints;++i)
+ eps.push_back(InetAddress(zeroTierPhysicalEndpoints[i]));
+ std::sort(eps.begin(),eps.end());
+ RR->cluster = new Cluster(RR,myId,eps,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
+
+ return ZT_RESULT_OK;
+#else
+ return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
+#endif
+}
+
+ZT_ResultCode Node::clusterAddMember(unsigned int memberId)
+{
+#ifdef ZT_ENABLE_CLUSTER
+ if (!RR->cluster)
+ return ZT_RESULT_ERROR_BAD_PARAMETER;
+ RR->cluster->addMember((uint16_t)memberId);
+ return ZT_RESULT_OK;
+#else
+ return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
+#endif
+}
+
+void Node::clusterRemoveMember(unsigned int memberId)
+{
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster)
+ RR->cluster->removeMember((uint16_t)memberId);
+#endif
+}
+
+void Node::clusterHandleIncomingMessage(const void *msg,unsigned int len)
+{
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster)
+ RR->cluster->handleIncomingStateMessage(msg,len);
+#endif
+}
+
+void Node::clusterStatus(ZT_ClusterStatus *cs)
+{
+ if (!cs)
+ return;
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster)
+ RR->cluster->status(*cs);
+ else
+#endif
+ memset(cs,0,sizeof(ZT_ClusterStatus));
+}
+
+void Node::backgroundThreadMain()
+{
+ ++RR->dpEnabled;
+ for(;;) {
+ try {
+ if (RR->dp->process() < 0)
+ break;
+ } catch ( ... ) {} // sanity check -- should not throw
+ }
+ --RR->dpEnabled;
+}
+
/****************************************************************************/
/* Node methods used only within node/ */
/****************************************************************************/
std::string Node::dataStoreGet(const char *name)
{
- char buf[16384];
+ char buf[1024];
std::string r;
unsigned long olen = 0;
do {
@@ -540,16 +675,6 @@ std::string Node::dataStoreGet(const char *name)
return r;
}
-void Node::postNewerVersionIfNewer(unsigned int major,unsigned int minor,unsigned int rev)
-{
- if (Utils::compareVersion(major,minor,rev,_newestVersionSeen[0],_newestVersionSeen[1],_newestVersionSeen[2]) > 0) {
- _newestVersionSeen[0] = major;
- _newestVersionSeen[1] = minor;
- _newestVersionSeen[2] = rev;
- this->postEvent(ZT_EVENT_SAW_MORE_RECENT_VERSION,(const void *)_newestVersionSeen);
- }
-}
-
#ifdef ZT_TRACE
void Node::postTrace(const char *module,unsigned int line,const char *fmt,...)
{
@@ -587,7 +712,7 @@ uint64_t Node::prng()
{
unsigned int p = (++_prngStreamPtr % (sizeof(_prngStream) / sizeof(uint64_t)));
if (!p)
- _prng.encrypt(_prngStream,_prngStream,sizeof(_prngStream));
+ _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
return _prngStream[p];
}
@@ -622,12 +747,11 @@ enum ZT_ResultCode ZT_Node_new(
ZT_WirePacketSendFunction wirePacketSendFunction,
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
- ZT_EventCallback eventCallback,
- const char *overrideRootTopology)
+ ZT_EventCallback eventCallback)
{
*node = (ZT_Node *)0;
try {
- *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,eventCallback,overrideRootTopology));
+ *node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,eventCallback));
return ZT_RESULT_OK;
} catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
@@ -659,8 +783,7 @@ enum ZT_ResultCode ZT_Node_processWirePacket(
} catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch ( ... ) {
- reinterpret_cast<ZeroTier::Node *>(node)->postEvent(ZT_EVENT_INVALID_PACKET,(const void *)remoteAddress);
- return ZT_RESULT_OK;
+ return ZT_RESULT_OK; // "OK" since invalid packets are simply dropped, but the system is still up
}
}
@@ -786,6 +909,22 @@ void ZT_Node_freeQueryResult(ZT_Node *node,void *qr)
} catch ( ... ) {}
}
+int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr)
+{
+ try {
+ return reinterpret_cast<ZeroTier::Node *>(node)->addLocalInterfaceAddress(addr);
+ } catch ( ... ) {
+ return 0;
+ }
+}
+
+void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node)
+{
+ try {
+ reinterpret_cast<ZeroTier::Node *>(node)->clearLocalInterfaceAddresses();
+ } catch ( ... ) {}
+}
+
void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
{
try {
@@ -793,7 +932,7 @@ void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
} catch ( ... ) {}
}
-ZT_ResultCode ZT_Node_circuitTestBegin(ZT_Node *node,ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *))
+enum ZT_ResultCode ZT_Node_circuitTestBegin(ZT_Node *node,ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *))
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->circuitTestBegin(test,reportCallback);
@@ -809,19 +948,60 @@ void ZT_Node_circuitTestEnd(ZT_Node *node,ZT_CircuitTest *test)
} catch ( ... ) {}
}
-int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust)
+enum ZT_ResultCode ZT_Node_clusterInit(
+ ZT_Node *node,
+ unsigned int myId,
+ const struct sockaddr_storage *zeroTierPhysicalEndpoints,
+ unsigned int numZeroTierPhysicalEndpoints,
+ int x,
+ int y,
+ int z,
+ void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
+ void *sendFunctionArg,
+ int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
+ void *addressToLocationFunctionArg)
{
try {
- return reinterpret_cast<ZeroTier::Node *>(node)->addLocalInterfaceAddress(addr,metric,trust);
+ return reinterpret_cast<ZeroTier::Node *>(node)->clusterInit(myId,zeroTierPhysicalEndpoints,numZeroTierPhysicalEndpoints,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
} catch ( ... ) {
- return 0;
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
-void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node)
+enum ZT_ResultCode ZT_Node_clusterAddMember(ZT_Node *node,unsigned int memberId)
{
try {
- reinterpret_cast<ZeroTier::Node *>(node)->clearLocalInterfaceAddresses();
+ return reinterpret_cast<ZeroTier::Node *>(node)->clusterAddMember(memberId);
+ } catch ( ... ) {
+ return ZT_RESULT_FATAL_ERROR_INTERNAL;
+ }
+}
+
+void ZT_Node_clusterRemoveMember(ZT_Node *node,unsigned int memberId)
+{
+ try {
+ reinterpret_cast<ZeroTier::Node *>(node)->clusterRemoveMember(memberId);
+ } catch ( ... ) {}
+}
+
+void ZT_Node_clusterHandleIncomingMessage(ZT_Node *node,const void *msg,unsigned int len)
+{
+ try {
+ reinterpret_cast<ZeroTier::Node *>(node)->clusterHandleIncomingMessage(msg,len);
+ } catch ( ... ) {}
+}
+
+void ZT_Node_clusterStatus(ZT_Node *node,ZT_ClusterStatus *cs)
+{
+ try {
+ reinterpret_cast<ZeroTier::Node *>(node)->clusterStatus(cs);
+ } catch ( ... ) {}
+}
+
+void ZT_Node_backgroundThreadMain(ZT_Node *node)
+{
+ try {
+ reinterpret_cast<ZeroTier::Node *>(node)->backgroundThreadMain();
} catch ( ... ) {}
}
diff --git a/node/Node.hpp b/node/Node.hpp
index 20c54471..15295139 100644
--- a/node/Node.hpp
+++ b/node/Node.hpp
@@ -71,8 +71,7 @@ public:
ZT_WirePacketSendFunction wirePacketSendFunction,
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
- ZT_EventCallback eventCallback,
- const char *overrideRootTopology);
+ ZT_EventCallback eventCallback);
~Node();
@@ -106,15 +105,39 @@ public:
ZT_VirtualNetworkConfig *networkConfig(uint64_t nwid) const;
ZT_VirtualNetworkList *networks() const;
void freeQueryResult(void *qr);
- int addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust);
+ int addLocalInterfaceAddress(const struct sockaddr_storage *addr);
void clearLocalInterfaceAddresses();
void setNetconfMaster(void *networkControllerInstance);
ZT_ResultCode circuitTestBegin(ZT_CircuitTest *test,void (*reportCallback)(ZT_Node *,ZT_CircuitTest *,const ZT_CircuitTestReport *));
void circuitTestEnd(ZT_CircuitTest *test);
+ ZT_ResultCode clusterInit(
+ unsigned int myId,
+ const struct sockaddr_storage *zeroTierPhysicalEndpoints,
+ unsigned int numZeroTierPhysicalEndpoints,
+ int x,
+ int y,
+ int z,
+ void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
+ void *sendFunctionArg,
+ int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
+ void *addressToLocationFunctionArg);
+ ZT_ResultCode clusterAddMember(unsigned int memberId);
+ void clusterRemoveMember(unsigned int memberId);
+ void clusterHandleIncomingMessage(const void *msg,unsigned int len);
+ void clusterStatus(ZT_ClusterStatus *cs);
+ void backgroundThreadMain();
// Internal functions ------------------------------------------------------
/**
+ * Convenience threadMain() for easy background thread launch
+ *
+ * This allows background threads to be launched with Thread::start
+ * that will run against this node.
+ */
+ inline void threadMain() throw() { this->backgroundThreadMain(); }
+
+ /**
* @return Time as of last call to run()
*/
inline uint64_t now() const throw() { return _now; }
@@ -126,9 +149,10 @@ public:
* @param addr Destination address
* @param data Packet data
* @param len Packet length
+ * @param ttl Desired TTL (default: 0 for unchanged/default TTL)
* @return True if packet appears to have been sent
*/
- inline bool putPacket(const InetAddress &localAddress,const InetAddress &addr,const void *data,unsigned int len)
+ inline bool putPacket(const InetAddress &localAddress,const InetAddress &addr,const void *data,unsigned int len,unsigned int ttl = 0)
{
return (_wirePacketSendFunction(
reinterpret_cast<ZT_Node *>(this),
@@ -136,7 +160,8 @@ public:
reinterpret_cast<const struct sockaddr_storage *>(&localAddress),
reinterpret_cast<const struct sockaddr_storage *>(&addr),
data,
- len) == 0);
+ len,
+ ttl) == 0);
}
/**
@@ -193,7 +218,7 @@ public:
/**
* @return Potential direct paths to me a.k.a. local interface addresses
*/
- inline std::vector<Path> directPaths() const
+ inline std::vector<InetAddress> directPaths() const
{
Mutex::Lock _l(_directPaths_m);
return _directPaths;
@@ -226,11 +251,6 @@ public:
*/
inline bool online() const throw() { return _online; }
- /**
- * If this version is newer than the newest we've seen, post a new version seen event
- */
- void postNewerVersionIfNewer(unsigned int major,unsigned int minor,unsigned int rev);
-
#ifdef ZT_TRACE
void postTrace(const char *module,unsigned int line,const char *fmt,...);
#endif
@@ -276,7 +296,7 @@ private:
std::vector< ZT_CircuitTest * > _circuitTests;
Mutex _circuitTests_m;
- std::vector<Path> _directPaths;
+ std::vector<InetAddress> _directPaths;
Mutex _directPaths_m;
Mutex _backgroundTasksLock;
@@ -288,7 +308,6 @@ private:
uint64_t _now;
uint64_t _lastPingCheck;
uint64_t _lastHousekeepingRun;
- unsigned int _newestVersionSeen[3]; // major, minor, revision
bool _online;
};
diff --git a/node/Packet.cpp b/node/Packet.cpp
index f69e4e79..f11ae1b8 100644
--- a/node/Packet.cpp
+++ b/node/Packet.cpp
@@ -45,7 +45,7 @@ const char *Packet::verbString(Verb v)
case VERB_RENDEZVOUS: return "RENDEZVOUS";
case VERB_FRAME: return "FRAME";
case VERB_EXT_FRAME: return "EXT_FRAME";
- case VERB_P5_MULTICAST_FRAME: return "P5_MULTICAST_FRAME";
+ case VERB_ECHO: return "ECHO";
case VERB_MULTICAST_LIKE: return "MULTICAST_LIKE";
case VERB_NETWORK_MEMBERSHIP_CERTIFICATE: return "NETWORK_MEMBERSHIP_CERTIFICATE";
case VERB_NETWORK_CONFIG_REQUEST: return "NETWORK_CONFIG_REQUEST";
@@ -56,6 +56,7 @@ const char *Packet::verbString(Verb v)
case VERB_PUSH_DIRECT_PATHS: return "PUSH_DIRECT_PATHS";
case VERB_CIRCUIT_TEST: return "CIRCUIT_TEST";
case VERB_CIRCUIT_TEST_REPORT: return "CIRCUIT_TEST_REPORT";
+ case VERB_REQUEST_PROOF_OF_WORK: return "REQUEST_PROOF_OF_WORK";
}
return "(unknown)";
}
@@ -91,14 +92,14 @@ void Packet::armor(const void *key,bool encryptPayload)
setCipher(encryptPayload ? ZT_PROTO_CIPHER_SUITE__C25519_POLY1305_SALSA2012 : ZT_PROTO_CIPHER_SUITE__C25519_POLY1305_NONE);
_salsa20MangleKey((const unsigned char *)key,mangledKey);
- Salsa20 s20(mangledKey,256,field(ZT_PACKET_IDX_IV,8),ZT_PROTO_SALSA20_ROUNDS);
+ Salsa20 s20(mangledKey,256,field(ZT_PACKET_IDX_IV,8)/*,ZT_PROTO_SALSA20_ROUNDS*/);
// MAC key is always the first 32 bytes of the Salsa20 key stream
// This is the same construction DJB's NaCl library uses
- s20.encrypt(ZERO_KEY,macKey,sizeof(macKey));
+ s20.encrypt12(ZERO_KEY,macKey,sizeof(macKey));
if (encryptPayload)
- s20.encrypt(payload,payload,payloadLen);
+ s20.encrypt12(payload,payload,payloadLen);
Poly1305::compute(mac,payload,payloadLen,macKey);
memcpy(field(ZT_PACKET_IDX_MAC,8),mac,8);
@@ -115,15 +116,15 @@ bool Packet::dearmor(const void *key)
if ((cs == ZT_PROTO_CIPHER_SUITE__C25519_POLY1305_NONE)||(cs == ZT_PROTO_CIPHER_SUITE__C25519_POLY1305_SALSA2012)) {
_salsa20MangleKey((const unsigned char *)key,mangledKey);
- Salsa20 s20(mangledKey,256,field(ZT_PACKET_IDX_IV,8),ZT_PROTO_SALSA20_ROUNDS);
+ Salsa20 s20(mangledKey,256,field(ZT_PACKET_IDX_IV,8)/*,ZT_PROTO_SALSA20_ROUNDS*/);
- s20.encrypt(ZERO_KEY,macKey,sizeof(macKey));
+ s20.encrypt12(ZERO_KEY,macKey,sizeof(macKey));
Poly1305::compute(mac,payload,payloadLen,macKey);
if (!Utils::secureEq(mac,field(ZT_PACKET_IDX_MAC,8),8))
return false;
if (cs == ZT_PROTO_CIPHER_SUITE__C25519_POLY1305_SALSA2012)
- s20.decrypt(payload,payload,payloadLen);
+ s20.decrypt12(payload,payload,payloadLen);
return true;
} else return false; // unrecognized cipher suite
diff --git a/node/Packet.hpp b/node/Packet.hpp
index 409762c7..ef0251e3 100644
--- a/node/Packet.hpp
+++ b/node/Packet.hpp
@@ -46,22 +46,24 @@
#include "../ext/lz4/lz4.h"
/**
- * Protocol version -- incremented only for MAJOR changes
+ * Protocol version -- incremented only for major changes
*
* 1 - 0.2.0 ... 0.2.5
* 2 - 0.3.0 ... 0.4.5
- * * Added signature and originating peer to multicast frame
- * * Double size of multicast frame bloom filter
+ * + Added signature and originating peer to multicast frame
+ * + Double size of multicast frame bloom filter
* 3 - 0.5.0 ... 0.6.0
- * * Yet another multicast redesign
- * * New crypto completely changes key agreement cipher
- * 4 - 0.6.0 ... CURRENT
- * * New identity format based on hashcash design
- *
- * This isn't going to change again for a long time unless your
- * author wakes up again at 4am with another great idea. :P
+ * + Yet another multicast redesign
+ * + New crypto completely changes key agreement cipher
+ * 4 - 0.6.0 ... 1.0.6
+ * + New identity format based on hashcash design
+ * 5 - 1.1.0 ... CURRENT
+ * + Supports circuit test, proof of work, and echo
+ * + Supports in-band world (root server definition) updates
+ * + Clustering! (Though this will work with protocol v4 clients.)
+ * + Otherwise backward compatible with protocol v4
*/
-#define ZT_PROTO_VERSION 4
+#define ZT_PROTO_VERSION 5
/**
* Minimum supported protocol version
@@ -233,15 +235,6 @@
*/
#define ZT_PROTO_MIN_FRAGMENT_LENGTH ZT_PACKET_FRAGMENT_IDX_PAYLOAD
-// Destination address types from HELLO, OK(HELLO), and other message types
-#define ZT_PROTO_DEST_ADDRESS_TYPE_NONE 0
-#define ZT_PROTO_DEST_ADDRESS_TYPE_ZEROTIER 1 // reserved but unused
-#define ZT_PROTO_DEST_ADDRESS_TYPE_ETHERNET 2 // future use
-#define ZT_PROTO_DEST_ADDRESS_TYPE_BLUETOOTH 3 // future use
-#define ZT_PROTO_DEST_ADDRESS_TYPE_IPV4 4
-#define ZT_PROTO_DEST_ADDRESS_TYPE_LTE_DIRECT 5 // future use
-#define ZT_PROTO_DEST_ADDRESS_TYPE_IPV6 6
-
// Ephemeral key record flags
#define ZT_PROTO_EPHEMERAL_KEY_FLAG_FIPS 0x01 // future use
@@ -329,8 +322,6 @@
#define ZT_PROTO_VERB_WHOIS__OK__IDX_IDENTITY (ZT_PROTO_VERB_OK_IDX_PAYLOAD)
-#define ZT_PROTO_VERB_WHOIS__ERROR__IDX_ZTADDRESS (ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)
-
#define ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST__OK__IDX_NETWORK_ID (ZT_PROTO_VERB_OK_IDX_PAYLOAD)
#define ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST__OK__IDX_DICT_LEN (ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST__OK__IDX_NETWORK_ID + 8)
#define ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST__OK__IDX_DICT (ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST__OK__IDX_DICT_LEN + 2)
@@ -354,11 +345,11 @@ namespace ZeroTier {
* ZeroTier packet
*
* Packet format:
- * <[8] random initialization vector (doubles as 64-bit packet ID)>
+ * <[8] 64-bit random packet ID and crypto initialization vector>
* <[5] destination ZT address>
* <[5] source ZT address>
* <[1] flags/cipher (top 5 bits) and ZT hop count (last 3 bits)>
- * <[8] 8-bit MAC (currently first 8 bytes of poly1305 tag)>
+ * <[8] 64-bit MAC>
* [... -- begin encryption envelope -- ...]
* <[1] encrypted flags (top 3 bits) and verb (last 5 bits)>
* [... verb-specific payload ...]
@@ -374,6 +365,10 @@ namespace ZeroTier {
* immutable. This is because intermediate nodes can increment the hop
* count up to 7 (protocol max).
*
+ * A hop count of 7 also indicates that receiving peers should not attempt
+ * to learn direct paths from this packet. (Right now direct paths are only
+ * learned from direct packets anyway.)
+ *
* http://tonyarcieri.com/all-the-crypto-code-youve-ever-written-is-probably-broken
*
* For unencrypted packets, MAC is computed on plaintext. Only HELLO is ever
@@ -530,10 +525,13 @@ public:
*/
enum Verb /* Max value: 32 (5 bits) */
{
- /* No operation, payload ignored, no reply */
+ /**
+ * No operation (ignored, no reply)
+ */
VERB_NOP = 0,
- /* Announcement of a node's existence:
+ /**
+ * Announcement of a node's existence:
* <[1] protocol version>
* <[1] software major version>
* <[1] software minor version>
@@ -542,6 +540,8 @@ public:
* <[...] binary serialized identity (see Identity)>
* <[1] destination address type>
* [<[...] destination address>]
+ * <[8] 64-bit world ID of current world>
+ * <[8] 64-bit timestamp of current world>
*
* This is the only message that ever must be sent in the clear, since it
* is used to push an identity to a new peer.
@@ -566,12 +566,15 @@ public:
* <[2] software revision (of responder)>
* <[1] destination address type (for this OK, not copied from HELLO)>
* [<[...] destination address>]
+ * <[2] 16-bit length of world update or 0 if none>
+ * [[...] world update]
*
* ERROR has no payload.
*/
VERB_HELLO = 1,
- /* Error response:
+ /**
+ * Error response:
* <[1] in-re verb>
* <[8] in-re packet ID>
* <[1] error code>
@@ -579,25 +582,31 @@ public:
*/
VERB_ERROR = 2,
- /* Success response:
+ /**
+ * Success response:
* <[1] in-re verb>
* <[8] in-re packet ID>
* <[...] request-specific payload>
*/
VERB_OK = 3,
- /* Query an identity by address:
+ /**
+ * Query an identity by address:
* <[5] address to look up>
*
* OK response payload:
* <[...] binary serialized identity>
*
- * ERROR response payload:
- * <[5] address>
+ * If querying a cluster, duplicate OK responses may occasionally occur.
+ * These should be discarded.
+ *
+ * If the address is not found, no response is generated. WHOIS requests
+ * will time out much like ARP requests and similar do in L2.
*/
VERB_WHOIS = 4,
- /* Meet another node at a given protocol address:
+ /**
+ * Meet another node at a given protocol address:
* <[1] flags (unused, currently 0)>
* <[5] ZeroTier address of peer that might be found at this address>
* <[2] 16-bit protocol address port>
@@ -616,11 +625,16 @@ public:
* may also ignore these messages if a peer is not known or is not being
* actively communicated with.
*
+ * Unfortunately the physical address format in this message pre-dates
+ * InetAddress's serialization format. :( ZeroTier is four years old and
+ * yes we've accumulated a tiny bit of cruft here and there.
+ *
* No OK or ERROR is generated.
*/
VERB_RENDEZVOUS = 5,
- /* ZT-to-ZT unicast ethernet frame (shortened EXT_FRAME):
+ /**
+ * ZT-to-ZT unicast ethernet frame (shortened EXT_FRAME):
* <[8] 64-bit network ID>
* <[2] 16-bit ethertype>
* <[...] ethernet payload>
@@ -635,7 +649,8 @@ public:
*/
VERB_FRAME = 6,
- /* Full Ethernet frame with MAC addressing and optional fields:
+ /**
+ * Full Ethernet frame with MAC addressing and optional fields:
* <[8] 64-bit network ID>
* <[1] flags>
* [<[...] certificate of network membership>]
@@ -658,23 +673,44 @@ public:
*/
VERB_EXT_FRAME = 7,
- /* DEPRECATED */
- VERB_P5_MULTICAST_FRAME = 8,
+ /**
+ * ECHO request (a.k.a. ping):
+ * <[...] arbitrary payload to be echoed back>
+ *
+ * This generates OK with a copy of the transmitted payload. No ERROR
+ * is generated. Response to ECHO requests is optional.
+ *
+ * Support for fragmented echo packets is optional and their use is not
+ * recommended.
+ */
+ VERB_ECHO = 8,
- /* Announce interest in multicast group(s):
+ /**
+ * Announce interest in multicast group(s):
* <[8] 64-bit network ID>
* <[6] multicast Ethernet address>
* <[4] multicast additional distinguishing information (ADI)>
* [... additional tuples of network/address/adi ...]
*
- * LIKEs are sent to peers with whom you have a direct peer to peer
- * connection, and always including root servers.
+ * LIKEs may be sent to any peer, though a good implementation should
+ * restrict them to peers on the same network they're for and to network
+ * controllers and root servers. In the current network, root servers
+ * will provide the service of final multicast cache.
+ *
+ * It is recommended that NETWORK_MEMBERSHIP_CERTIFICATE pushes be sent
+ * along with MULTICAST_LIKE when pushing LIKEs to peers that do not
+ * share a network membership (such as root servers), since this can be
+ * used to authenticate GATHER requests and limit responses to peers
+ * authorized to talk on a network. (Should be an optional field here,
+ * but saving one or two packets every five minutes is not worth an
+ * ugly hack or protocol rev.)
*
* OK/ERROR are not generated.
*/
VERB_MULTICAST_LIKE = 9,
- /* Network member certificate replication/push:
+ /**
+ * Network member certificate replication/push:
* <[...] serialized certificate of membership>
* [ ... additional certificates may follow ...]
*
@@ -685,7 +721,8 @@ public:
*/
VERB_NETWORK_MEMBERSHIP_CERTIFICATE = 10,
- /* Network configuration request:
+ /**
+ * Network configuration request:
* <[8] 64-bit network ID>
* <[2] 16-bit length of request meta-data dictionary>
* <[...] string-serialized request meta-data>
@@ -720,7 +757,8 @@ public:
*/
VERB_NETWORK_CONFIG_REQUEST = 11,
- /* Network configuration refresh request:
+ /**
+ * Network configuration refresh request:
* <[...] array of 64-bit network IDs>
*
* This can be sent by the network controller to inform a node that it
@@ -731,7 +769,8 @@ public:
*/
VERB_NETWORK_CONFIG_REFRESH = 12,
- /* Request endpoints for multicast distribution:
+ /**
+ * Request endpoints for multicast distribution:
* <[8] 64-bit network ID>
* <[1] flags>
* <[6] MAC address of multicast group being queried>
@@ -747,6 +786,9 @@ public:
* to send multicast but does not have the desired number of recipient
* peers.
*
+ * More than one OK response can occur if the response is broken up across
+ * multiple packets or if querying a clustered node.
+ *
* OK response payload:
* <[8] 64-bit network ID>
* <[6] MAC address of multicast group being queried>
@@ -756,20 +798,12 @@ public:
* <[2] 16-bit number of members enumerated in this packet>
* <[...] series of 5-byte ZeroTier addresses of enumerated members>
*
- * If no endpoints are known, OK and ERROR are both optional. It's okay
- * to return nothing in that case since gathering is "lazy."
- *
- * ERROR response payload:
- * <[8] 64-bit network ID>
- * <[6] MAC address of multicast group being queried>
- * <[4] 32-bit ADI for multicast group being queried>
- *
- * ERRORs are optional and are only generated if permission is denied,
- * certificate of membership is out of date, etc.
+ * ERROR is not generated; queries that return no response are dropped.
*/
VERB_MULTICAST_GATHER = 13,
- /* Multicast frame:
+ /**
+ * Multicast frame:
* <[8] 64-bit network ID>
* <[1] flags>
* [<[...] network certificate of membership>]
@@ -810,7 +844,8 @@ public:
*/
VERB_MULTICAST_FRAME = 14,
- /* Ephemeral (PFS) key push: (UNFINISHED, NOT IMPLEMENTED YET)
+ /**
+ * Ephemeral (PFS) key push: (UNFINISHED, NOT IMPLEMENTED YET)
* <[2] flags (unused and reserved, must be 0)>
* <[2] length of padding / extra field section>
* <[...] padding / extra field section>
@@ -866,7 +901,8 @@ public:
*/
VERB_SET_EPHEMERAL_KEY = 15,
- /* Push of potential endpoints for direct communication:
+ /**
+ * Push of potential endpoints for direct communication:
* <[2] 16-bit number of paths>
* <[...] paths>
*
@@ -880,13 +916,10 @@ public:
*
* Path record flags:
* 0x01 - Forget this path if it is currently known
- * 0x02 - Blacklist this path, do not use
+ * 0x02 - (Unused)
* 0x04 - Disable encryption (trust: privacy)
* 0x08 - Disable encryption and authentication (trust: ultimate)
*
- * Address types and addresses are of the same format as the destination
- * address type and address in HELLO.
- *
* The receiver may, upon receiving a push, attempt to establish a
* direct link to one or more of the indicated addresses. It is the
* responsibility of the sender to limit which peers it pushes direct
@@ -906,7 +939,8 @@ public:
*/
VERB_PUSH_DIRECT_PATHS = 16,
- /* Source-routed circuit test message:
+ /**
+ * Source-routed circuit test message:
* <[5] address of originator of circuit test>
* <[2] 16-bit flags>
* <[8] 64-bit timestamp>
@@ -984,7 +1018,8 @@ public:
*/
VERB_CIRCUIT_TEST = 17,
- /* Circuit test hop report:
+ /**
+ * Circuit test hop report:
* <[8] 64-bit timestamp (from original test)>
* <[8] 64-bit test ID (from original test)>
* <[8] 64-bit reporter timestamp (reporter's clock, 0 if unspec)>
@@ -998,6 +1033,7 @@ public:
* <[2] 16-bit error code (set to 0, currently unused)>
* <[8] 64-bit report flags (set to 0, currently unused)>
* <[8] 64-bit source packet ID>
+ * <[5] upstream ZeroTier address from which test was received>
* <[1] 8-bit source packet hop count (ZeroTier hop count)>
* <[...] local wire address on which packet was received>
* <[...] remote wire address from which packet was received>
@@ -1017,7 +1053,50 @@ public:
* If a test report is received and no circuit test was sent, it should be
* ignored. This message generates no OK or ERROR response.
*/
- VERB_CIRCUIT_TEST_REPORT = 18
+ VERB_CIRCUIT_TEST_REPORT = 18,
+
+ /**
+ * Request proof of work:
+ * <[1] 8-bit proof of work type>
+ * <[1] 8-bit proof of work difficulty>
+ * <[2] 16-bit length of proof of work challenge>
+ * <[...] proof of work challenge>
+ *
+ * This requests that a peer perform a proof of work calucation. It can be
+ * sent by highly trusted peers (e.g. root servers, network controllers)
+ * under suspected denial of service conditions in an attempt to filter
+ * out "non-serious" peers and remain responsive to those proving their
+ * intent to actually communicate.
+ *
+ * If the peer obliges to perform the work, it does so and responds with
+ * an OK containing the result. Otherwise it may ignore the message or
+ * response with an ERROR_INVALID_REQUEST or ERROR_UNSUPPORTED_OPERATION.
+ *
+ * Proof of work type IDs:
+ * 0x01 - Salsa20/12+SHA512 hashcash function
+ *
+ * Salsa20/12+SHA512 is based on the following composite hash function:
+ *
+ * (1) Compute SHA512(candidate)
+ * (2) Use the first 256 bits of the result of #1 as a key to encrypt
+ * 131072 zero bytes with Salsa20/12 (with a zero IV).
+ * (3) Compute SHA512(the result of step #2)
+ * (4) Accept this candiate if the first [difficulty] bits of the result
+ * from step #3 are zero. Otherwise generate a new candidate and try
+ * again.
+ *
+ * This is performed repeatedly on candidates generated by appending the
+ * supplied challenge to an arbitrary nonce until a valid candidate
+ * is found. This chosen prepended nonce is then returned as the result
+ * in OK.
+ *
+ * OK payload:
+ * <[2] 16-bit length of result>
+ * <[...] computed proof of work>
+ *
+ * ERROR has no payload.
+ */
+ VERB_REQUEST_PROOF_OF_WORK = 19
};
/**
@@ -1034,7 +1113,7 @@ public:
/* Bad/unsupported protocol version */
ERROR_BAD_PROTOCOL_VERSION = 2,
- /* Unknown object queried (e.g. with WHOIS) */
+ /* Unknown object queried */
ERROR_OBJ_NOT_FOUND = 3,
/* HELLO pushed an identity whose address is already claimed */
diff --git a/node/Defaults.hpp b/node/Path.cpp
index c1df919b..e2475751 100644
--- a/node/Defaults.hpp
+++ b/node/Path.cpp
@@ -25,50 +25,21 @@
* LLC. Start here: http://www.zerotier.com/
*/
-#ifndef ZT_DEFAULTS_HPP
-#define ZT_DEFAULTS_HPP
-
-#include <stdexcept>
-#include <string>
-#include <vector>
-#include <map>
-
-#include "Constants.hpp"
-#include "Identity.hpp"
-#include "InetAddress.hpp"
+#include "Path.hpp"
+#include "AntiRecursion.hpp"
+#include "RuntimeEnvironment.hpp"
+#include "Node.hpp"
namespace ZeroTier {
-/**
- * Static configuration defaults
- *
- * These are the default values that ship baked into the ZeroTier binary. They
- * define the basic parameters required for it to connect to the rest of the
- * network and obtain software updates.
- */
-class Defaults
+bool Path::send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
{
-public:
- Defaults();
-
- /**
- * Default root topology dictionary
- */
- const std::string defaultRootTopology;
-
- /**
- * Identities permitted to sign root topology dictionaries
- */
- const std::map< Address,Identity > rootTopologyAuthorities;
-
- /**
- * Address for IPv4 LAN auto-location broadcasts: 255.255.255.255:9993
- */
- const InetAddress v4Broadcast;
-};
-
-extern const Defaults ZT_DEFAULTS;
+ if (RR->node->putPacket(_localAddress,address(),data,len)) {
+ sent(now);
+ RR->antiRec->logOutgoingZT(data,len);
+ return true;
+ }
+ return false;
+}
} // namespace ZeroTier
-
-#endif
diff --git a/node/Path.hpp b/node/Path.hpp
index 6a69e071..00f8ed36 100644
--- a/node/Path.hpp
+++ b/node/Path.hpp
@@ -28,12 +28,29 @@
#ifndef ZT_PATH_HPP
#define ZT_PATH_HPP
+#include <stdint.h>
+#include <string.h>
+
+#include <stdexcept>
+#include <algorithm>
+
#include "Constants.hpp"
#include "InetAddress.hpp"
-#include "Utils.hpp"
+
+/**
+ * Flag indicating that this path is suboptimal
+ *
+ * This is used in cluster mode to indicate that the peer has been directed
+ * to a better path. This path can continue to be used but shouldn't be kept
+ * or advertised to other cluster members. Not used if clustering is not
+ * built and enabled.
+ */
+#define ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL 0x0001
namespace ZeroTier {
+class RuntimeEnvironment;
+
/**
* Base class for paths
*
@@ -42,43 +59,84 @@ namespace ZeroTier {
class Path
{
public:
+ Path() :
+ _lastSend(0),
+ _lastReceived(0),
+ _addr(),
+ _localAddress(),
+ _flags(0),
+ _ipScope(InetAddress::IP_SCOPE_NONE)
+ {
+ }
+
+ Path(const InetAddress &localAddress,const InetAddress &addr) :
+ _lastSend(0),
+ _lastReceived(0),
+ _addr(addr),
+ _localAddress(localAddress),
+ _flags(0),
+ _ipScope(addr.ipScope())
+ {
+ }
+
+ inline Path &operator=(const Path &p)
+ {
+ if (this != &p)
+ memcpy(this,&p,sizeof(Path));
+ return *this;
+ }
+
/**
- * Path trust category
+ * Called when a packet is sent to this remote path
*
- * Note that this is NOT peer trust and has nothing to do with root server
- * designations or other trust metrics. This indicates how much we trust
- * this path to be secure and/or private. A trust level of normal means
- * encrypt and authenticate all traffic. Privacy trust means we can send
- * traffic in the clear. Ultimate trust means we don't even need
- * authentication. Generally a private path would be a hard-wired local
- * LAN, while an ultimate trust path would be a physically isolated private
- * server backplane.
+ * This is called automatically by Path::send().
*
- * Nearly all paths will be normal trust. The other levels are for high
- * performance local SDN use only.
+ * @param t Time of send
+ */
+ inline void sent(uint64_t t) { _lastSend = t; }
+
+ /**
+ * Called when a packet is received from this remote path
*
- * These values MUST match ZT_LocalInterfaceAddressTrust in ZeroTierOne.h
+ * @param t Time of receive
*/
- enum Trust // NOTE: max 255
- {
- TRUST_NORMAL = 0,
- TRUST_PRIVACY = 10,
- TRUST_ULTIMATE = 20
- };
+ inline void received(uint64_t t) { _lastReceived = t; }
- Path() :
- _addr(),
- _ipScope(InetAddress::IP_SCOPE_NONE),
- _trust(TRUST_NORMAL)
+ /**
+ * @param now Current time
+ * @return True if this path appears active
+ */
+ inline bool active(uint64_t now) const
+ throw()
{
+ return ((now - _lastReceived) < ZT_PEER_ACTIVITY_TIMEOUT);
}
- Path(const InetAddress &addr,int metric,Trust trust) :
- _addr(addr),
- _ipScope(addr.ipScope()),
- _trust(trust)
- {
- }
+ /**
+ * Send a packet via this path
+ *
+ * @param RR Runtime environment
+ * @param data Packet data
+ * @param len Packet length
+ * @param now Current time
+ * @return True if transport reported success
+ */
+ bool send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now);
+
+ /**
+ * @return Address of local side of this path or NULL if unspecified
+ */
+ inline const InetAddress &localAddress() const throw() { return _localAddress; }
+
+ /**
+ * @return Time of last send to this path
+ */
+ inline uint64_t lastSend() const throw() { return _lastSend; }
+
+ /**
+ * @return Time of last receive from this path
+ */
+ inline uint64_t lastReceived() const throw() { return _lastReceived; }
/**
* @return Physical address
@@ -105,16 +163,13 @@ public:
}
/**
- * @return Path trust level
- */
- inline Trust trust() const throw() { return _trust; }
-
- /**
* @return True if path is considered reliable (no NAT keepalives etc. are needed)
*/
inline bool reliable() const throw()
{
- return ( (_addr.ss_family == AF_INET6) || ((_ipScope != InetAddress::IP_SCOPE_GLOBAL)&&(_ipScope != InetAddress::IP_SCOPE_PSEUDOPRIVATE)) );
+ if (_addr.ss_family == AF_INET)
+ return ((_ipScope != InetAddress::IP_SCOPE_GLOBAL)&&(_ipScope != InetAddress::IP_SCOPE_PSEUDOPRIVATE));
+ return true;
}
/**
@@ -155,10 +210,51 @@ public:
return false;
}
-protected:
+#ifdef ZT_ENABLE_CLUSTER
+ /**
+ * @param f New value of ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL
+ */
+ inline void setClusterSuboptimal(bool f) { _flags = ((f) ? (_flags | ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) : (_flags & (~ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL))); }
+
+ /**
+ * @return True if ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL is set
+ */
+ inline bool isClusterSuboptimal() const { return ((_flags & ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) != 0); }
+#endif
+
+ template<unsigned int C>
+ inline void serialize(Buffer<C> &b) const
+ {
+ b.append((uint8_t)0); // version
+ b.append((uint64_t)_lastSend);
+ b.append((uint64_t)_lastReceived);
+ _addr.serialize(b);
+ _localAddress.serialize(b);
+ b.append((uint16_t)_flags);
+ }
+
+ template<unsigned int C>
+ inline unsigned int deserialize(const Buffer<C> &b,unsigned int startAt = 0)
+ {
+ unsigned int p = startAt;
+ if (b[p++] != 0)
+ throw std::invalid_argument("invalid serialized Path");
+ _lastSend = b.template at<uint64_t>(p); p += 8;
+ _lastReceived = b.template at<uint64_t>(p); p += 8;
+ p += _addr.deserialize(b,p);
+ p += _localAddress.deserialize(b,p);
+ _flags = b.template at<uint16_t>(p); p += 2;
+ _ipScope = _addr.ipScope();
+ return (p - startAt);
+ }
+
+private:
+ uint64_t _lastSend;
+ uint64_t _lastReceived;
InetAddress _addr;
+ InetAddress _localAddress;
+ unsigned int _flags;
InetAddress::IpScope _ipScope; // memoize this since it's a computed value checked often
- Trust _trust;
};
} // namespace ZeroTier
diff --git a/node/Peer.cpp b/node/Peer.cpp
index 757f822c..f0f43399 100644
--- a/node/Peer.cpp
+++ b/node/Peer.cpp
@@ -34,6 +34,8 @@
#include "Network.hpp"
#include "AntiRecursion.hpp"
#include "SelfAwareness.hpp"
+#include "Cluster.hpp"
+#include "Packet.hpp"
#include <algorithm>
@@ -52,14 +54,17 @@ Peer::Peer(const Identity &myIdentity,const Identity &peerIdentity)
_lastMulticastFrame(0),
_lastAnnouncedTo(0),
_lastPathConfirmationSent(0),
- _lastDirectPathPush(0),
+ _lastDirectPathPushSent(0),
+ _lastDirectPathPushReceive(0),
_lastPathSort(0),
+ _vProto(0),
_vMajor(0),
_vMinor(0),
_vRevision(0),
_id(peerIdentity),
_numPaths(0),
_latency(0),
+ _directPathPushCutoffCount(0),
_networkComs(4),
_lastPushedComs(4)
{
@@ -77,81 +82,132 @@ void Peer::received(
uint64_t inRePacketId,
Packet::Verb inReVerb)
{
+#ifdef ZT_ENABLE_CLUSTER
+ bool suboptimalPath = false;
+ if ((RR->cluster)&&(hops == 0)) {
+ // Note: findBetterEndpoint() is first since we still want to check
+ // for a better endpoint even if we don't actually send a redirect.
+ InetAddress redirectTo;
+ if ( (RR->cluster->findBetterEndpoint(redirectTo,_id.address(),remoteAddr,false)) && (verb != Packet::VERB_OK)&&(verb != Packet::VERB_ERROR)&&(verb != Packet::VERB_RENDEZVOUS)&&(verb != Packet::VERB_PUSH_DIRECT_PATHS) ) {
+ if (_vProto >= 5) {
+ // For newer peers we can send a more idiomatic verb: PUSH_DIRECT_PATHS.
+ Packet outp(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
+ outp.append((uint16_t)1); // count == 1
+ outp.append((uint8_t)0); // no flags
+ outp.append((uint16_t)0); // no extensions
+ if (redirectTo.ss_family == AF_INET) {
+ outp.append((uint8_t)4);
+ outp.append((uint8_t)6);
+ outp.append(redirectTo.rawIpData(),4);
+ } else {
+ outp.append((uint8_t)6);
+ outp.append((uint8_t)18);
+ outp.append(redirectTo.rawIpData(),16);
+ }
+ outp.append((uint16_t)redirectTo.port());
+ outp.armor(_key,true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
+ RR->node->putPacket(localAddr,remoteAddr,outp.data(),outp.size());
+ } else {
+ // For older peers we use RENDEZVOUS to coax them into contacting us elsewhere.
+ Packet outp(_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
+ outp.append((uint8_t)0); // no flags
+ RR->identity.address().appendTo(outp);
+ outp.append((uint16_t)redirectTo.port());
+ if (redirectTo.ss_family == AF_INET) {
+ outp.append((uint8_t)4);
+ outp.append(redirectTo.rawIpData(),4);
+ } else {
+ outp.append((uint8_t)16);
+ outp.append(redirectTo.rawIpData(),16);
+ }
+ outp.armor(_key,true);
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
+ RR->node->putPacket(localAddr,remoteAddr,outp.data(),outp.size());
+ }
+ suboptimalPath = true;
+ }
+ }
+#endif
+
const uint64_t now = RR->node->now();
bool needMulticastGroupAnnounce = false;
+ bool pathIsConfirmed = false;
- {
+ { // begin _lock
Mutex::Lock _l(_lock);
_lastReceive = now;
+ if ((verb == Packet::VERB_FRAME)||(verb == Packet::VERB_EXT_FRAME))
+ _lastUnicastFrame = now;
+ else if (verb == Packet::VERB_MULTICAST_FRAME)
+ _lastMulticastFrame = now;
- if (!hops) {
- bool pathIsConfirmed = false;
+ if ((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000)) {
+ _lastAnnouncedTo = now;
+ needMulticastGroupAnnounce = true;
+ }
- /* Learn new paths from direct (hops == 0) packets */
- {
- unsigned int np = _numPaths;
- for(unsigned int p=0;p<np;++p) {
- if ((_paths[p].address() == remoteAddr)&&(_paths[p].localAddress() == localAddr)) {
- _paths[p].received(now);
- pathIsConfirmed = true;
- break;
- }
+ if (hops == 0) {
+ unsigned int np = _numPaths;
+ for(unsigned int p=0;p<np;++p) {
+ if ((_paths[p].address() == remoteAddr)&&(_paths[p].localAddress() == localAddr)) {
+ _paths[p].received(now);
+#ifdef ZT_ENABLE_CLUSTER
+ _paths[p].setClusterSuboptimal(suboptimalPath);
+#endif
+ pathIsConfirmed = true;
+ break;
}
+ }
+
+ if (!pathIsConfirmed) {
+ if (verb == Packet::VERB_OK) {
- if (!pathIsConfirmed) {
- if ((verb == Packet::VERB_OK)&&(inReVerb == Packet::VERB_HELLO)) {
-
- // Learn paths if they've been confirmed via a HELLO
- RemotePath *slot = (RemotePath *)0;
- if (np < ZT_MAX_PEER_NETWORK_PATHS) {
- // Add new path
- slot = &(_paths[np++]);
- } else {
- // Replace oldest non-fixed path
- uint64_t slotLRmin = 0xffffffffffffffffULL;
- for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
- if ((!_paths[p].fixed())&&(_paths[p].lastReceived() <= slotLRmin)) {
- slotLRmin = _paths[p].lastReceived();
- slot = &(_paths[p]);
- }
+ Path *slot = (Path *)0;
+ if (np < ZT_MAX_PEER_NETWORK_PATHS) {
+ slot = &(_paths[np++]);
+ } else {
+ uint64_t slotLRmin = 0xffffffffffffffffULL;
+ for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
+ if (_paths[p].lastReceived() <= slotLRmin) {
+ slotLRmin = _paths[p].lastReceived();
+ slot = &(_paths[p]);
}
}
- if (slot) {
- *slot = RemotePath(localAddr,remoteAddr,false);
- slot->received(now);
- _numPaths = np;
- pathIsConfirmed = true;
- _sortPaths(now);
- }
+ }
+ if (slot) {
+ *slot = Path(localAddr,remoteAddr);
+ slot->received(now);
+#ifdef ZT_ENABLE_CLUSTER
+ slot->setClusterSuboptimal(suboptimalPath);
+#endif
+ _numPaths = np;
+ pathIsConfirmed = true;
+ _sortPaths(now);
+ }
- } else {
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster)
+ RR->cluster->broadcastHavePeer(_id);
+#endif
- /* If this path is not known, send a HELLO. We don't learn
- * paths without confirming that a bidirectional link is in
- * fact present, but any packet that decodes and authenticates
- * correctly is considered valid. */
- if ((now - _lastPathConfirmationSent) >= ZT_MIN_PATH_CONFIRMATION_INTERVAL) {
- _lastPathConfirmationSent = now;
- TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),remoteAddr.toString().c_str());
- attemptToContactAt(RR,localAddr,remoteAddr,now);
- }
+ } else {
+ /* If this path is not known, send a HELLO. We don't learn
+ * paths without confirming that a bidirectional link is in
+ * fact present, but any packet that decodes and authenticates
+ * correctly is considered valid. */
+ if ((now - _lastPathConfirmationSent) >= ZT_MIN_PATH_CONFIRMATION_INTERVAL) {
+ _lastPathConfirmationSent = now;
+ TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),remoteAddr.toString().c_str());
+ sendHELLO(RR,localAddr,remoteAddr,now);
}
+
}
}
}
-
- if ((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000)) {
- _lastAnnouncedTo = now;
- needMulticastGroupAnnounce = true;
- }
-
- if ((verb == Packet::VERB_FRAME)||(verb == Packet::VERB_EXT_FRAME))
- _lastUnicastFrame = now;
- else if (verb == Packet::VERB_MULTICAST_FRAME)
- _lastMulticastFrame = now;
- }
+ } // end _lock
if (needMulticastGroupAnnounce) {
const std::vector< SharedPtr<Network> > networks(RR->node->allNetworks());
@@ -160,7 +216,7 @@ void Peer::received(
}
}
-void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now)
+void Peer::sendHELLO(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now,unsigned int ttl)
{
// _lock not required here since _id is immutable and nothing else is accessed
@@ -170,69 +226,76 @@ void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &lo
outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
outp.append(now);
-
RR->identity.serialize(outp,false);
-
- switch(atAddress.ss_family) {
- case AF_INET:
- outp.append((unsigned char)ZT_PROTO_DEST_ADDRESS_TYPE_IPV4);
- outp.append(atAddress.rawIpData(),4);
- outp.append((uint16_t)atAddress.port());
- break;
- case AF_INET6:
- outp.append((unsigned char)ZT_PROTO_DEST_ADDRESS_TYPE_IPV6);
- outp.append(atAddress.rawIpData(),16);
- outp.append((uint16_t)atAddress.port());
- break;
- default:
- outp.append((unsigned char)ZT_PROTO_DEST_ADDRESS_TYPE_NONE);
- break;
- }
+ atAddress.serialize(outp);
+ outp.append((uint64_t)RR->topology->worldId());
+ outp.append((uint64_t)RR->topology->worldTimestamp());
outp.armor(_key,false); // HELLO is sent in the clear
- RR->node->putPacket(localAddr,atAddress,outp.data(),outp.size());
+ RR->antiRec->logOutgoingZT(outp.data(),outp.size());
+ RR->node->putPacket(localAddr,atAddress,outp.data(),outp.size(),ttl);
}
-void Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now)
+bool Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now,int inetAddressFamily)
{
+ Path *p = (Path *)0;
+
Mutex::Lock _l(_lock);
- RemotePath *const bestPath = _getBestPath(now);
- if (bestPath) {
- if ((now - bestPath->lastReceived()) >= ZT_PEER_DIRECT_PING_DELAY) {
- TRACE("PING %s(%s)",_id.address().toString().c_str(),bestPath->address().toString().c_str());
- attemptToContactAt(RR,bestPath->localAddress(),bestPath->address(),now);
- bestPath->sent(now);
- } else if (((now - bestPath->lastSend()) >= ZT_NAT_KEEPALIVE_DELAY)&&(!bestPath->reliable())) {
+ if (inetAddressFamily != 0) {
+ p = _getBestPath(now,inetAddressFamily);
+ } else {
+ p = _getBestPath(now);
+ }
+
+ if (p) {
+ if ((now - p->lastReceived()) >= ZT_PEER_DIRECT_PING_DELAY) {
+ //TRACE("PING %s(%s) after %llums/%llums send/receive inactivity",_id.address().toString().c_str(),p->address().toString().c_str(),now - p->lastSend(),now - p->lastReceived());
+ sendHELLO(RR,p->localAddress(),p->address(),now);
+ p->sent(now);
+ } else if (((now - p->lastSend()) >= ZT_NAT_KEEPALIVE_DELAY)&&(!p->reliable())) {
+ //TRACE("NAT keepalive %s(%s) after %llums/%llums send/receive inactivity",_id.address().toString().c_str(),p->address().toString().c_str(),now - p->lastSend(),now - p->lastReceived());
_natKeepaliveBuf += (uint32_t)((now * 0x9e3779b1) >> 1); // tumble this around to send constantly varying (meaningless) payloads
- TRACE("NAT keepalive %s(%s)",_id.address().toString().c_str(),bestPath->address().toString().c_str());
- RR->node->putPacket(bestPath->localAddress(),bestPath->address(),&_natKeepaliveBuf,sizeof(_natKeepaliveBuf));
- bestPath->sent(now);
+ RR->node->putPacket(p->localAddress(),p->address(),&_natKeepaliveBuf,sizeof(_natKeepaliveBuf));
+ p->sent(now);
+ } else {
+ //TRACE("no PING or NAT keepalive: addr==%s reliable==%d %llums/%llums send/receive inactivity",p->address().toString().c_str(),(int)p->reliable(),now - p->lastSend(),now - p->lastReceived());
}
+ return true;
}
+
+ return false;
}
-void Peer::pushDirectPaths(const RuntimeEnvironment *RR,RemotePath *path,uint64_t now,bool force)
+void Peer::pushDirectPaths(const RuntimeEnvironment *RR,Path *path,uint64_t now,bool force)
{
+#ifdef ZT_ENABLE_CLUSTER
+ // Cluster mode disables normal PUSH_DIRECT_PATHS in favor of cluster-based peer redirection
+ if (RR->cluster)
+ return;
+#endif
+
Mutex::Lock _l(_lock);
- if (((now - _lastDirectPathPush) >= ZT_DIRECT_PATH_PUSH_INTERVAL)||(force)) {
- _lastDirectPathPush = now;
+ if (((now - _lastDirectPathPushSent) >= ZT_DIRECT_PATH_PUSH_INTERVAL)||(force)) {
+ _lastDirectPathPushSent = now;
- std::vector<Path> dps(RR->node->directPaths());
+ std::vector<InetAddress> dps(RR->node->directPaths());
+ if (dps.empty())
+ return;
#ifdef ZT_TRACE
{
std::string ps;
- for(std::vector<Path>::const_iterator p(dps.begin());p!=dps.end();++p) {
+ for(std::vector<InetAddress>::const_iterator p(dps.begin());p!=dps.end();++p) {
if (ps.length() > 0)
ps.push_back(',');
- ps.append(p->address().toString());
+ ps.append(p->toString());
}
TRACE("pushing %u direct paths to %s: %s",(unsigned int)dps.size(),_id.address().toString().c_str(),ps.c_str());
}
#endif
- std::vector<Path>::const_iterator p(dps.begin());
+ std::vector<InetAddress>::const_iterator p(dps.begin());
while (p != dps.end()) {
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
outp.addSize(2); // leave room for count
@@ -240,7 +303,7 @@ void Peer::pushDirectPaths(const RuntimeEnvironment *RR,RemotePath *path,uint64_
unsigned int count = 0;
while ((p != dps.end())&&((outp.size() + 24) < ZT_PROTO_MAX_PACKET_LENGTH)) {
uint8_t addressType = 4;
- switch(p->address().ss_family) {
+ switch(p->ss_family) {
case AF_INET:
break;
case AF_INET6:
@@ -252,6 +315,7 @@ void Peer::pushDirectPaths(const RuntimeEnvironment *RR,RemotePath *path,uint64_
}
uint8_t flags = 0;
+ /* TODO: path trust is not implemented yet
switch(p->trust()) {
default:
break;
@@ -262,13 +326,14 @@ void Peer::pushDirectPaths(const RuntimeEnvironment *RR,RemotePath *path,uint64_
flags |= (0x04 | 0x08); // no encryption, no authentication (redundant but go ahead and set both)
break;
}
+ */
outp.append(flags);
outp.append((uint16_t)0); // no extensions
outp.append(addressType);
outp.append((uint8_t)((addressType == 4) ? 6 : 18));
- outp.append(p->address().rawIpData(),((addressType == 4) ? 4 : 16));
- outp.append((uint16_t)p->address().port());
+ outp.append(p->rawIpData(),((addressType == 4) ? 4 : 16));
+ outp.append((uint16_t)p->port());
++count;
++p;
@@ -283,59 +348,6 @@ void Peer::pushDirectPaths(const RuntimeEnvironment *RR,RemotePath *path,uint64_
}
}
-void Peer::addPath(const RemotePath &newp,uint64_t now)
-{
- Mutex::Lock _l(_lock);
-
- unsigned int np = _numPaths;
-
- for(unsigned int p=0;p<np;++p) {
- if (_paths[p].address() == newp.address()) {
- _paths[p].setFixed(newp.fixed());
- _sortPaths(now);
- return;
- }
- }
-
- RemotePath *slot = (RemotePath *)0;
- if (np < ZT_MAX_PEER_NETWORK_PATHS) {
- // Add new path
- slot = &(_paths[np++]);
- } else {
- // Replace oldest non-fixed path
- uint64_t slotLRmin = 0xffffffffffffffffULL;
- for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
- if ((!_paths[p].fixed())&&(_paths[p].lastReceived() <= slotLRmin)) {
- slotLRmin = _paths[p].lastReceived();
- slot = &(_paths[p]);
- }
- }
- }
- if (slot) {
- *slot = newp;
- _numPaths = np;
- }
-
- _sortPaths(now);
-}
-
-void Peer::clearPaths(bool fixedToo)
-{
- if (fixedToo) {
- _numPaths = 0;
- } else {
- unsigned int np = _numPaths;
- unsigned int x = 0;
- unsigned int y = 0;
- while (x < np) {
- if (_paths[x].fixed())
- _paths[y++] = _paths[x];
- ++x;
- }
- _numPaths = y;
- }
-}
-
bool Peer::resetWithinScope(const RuntimeEnvironment *RR,InetAddress::IpScope scope,uint64_t now)
{
Mutex::Lock _l(_lock);
@@ -344,12 +356,9 @@ bool Peer::resetWithinScope(const RuntimeEnvironment *RR,InetAddress::IpScope sc
unsigned int y = 0;
while (x < np) {
if (_paths[x].address().ipScope() == scope) {
- if (_paths[x].fixed()) {
- attemptToContactAt(RR,_paths[x].localAddress(),_paths[x].address(),now);
- _paths[y++] = _paths[x]; // keep fixed paths
- }
+ sendHELLO(RR,_paths[x].localAddress(),_paths[x].address(),now);
} else {
- _paths[y++] = _paths[x]; // keep paths not in this scope
+ _paths[y++] = _paths[x];
}
++x;
}
@@ -497,7 +506,7 @@ struct _SortPathsByQuality
{
uint64_t _now;
_SortPathsByQuality(const uint64_t now) : _now(now) {}
- inline bool operator()(const RemotePath &a,const RemotePath &b) const
+ inline bool operator()(const Path &a,const Path &b) const
{
const uint64_t qa = (
((uint64_t)a.active(_now) << 63) |
@@ -517,7 +526,7 @@ void Peer::_sortPaths(const uint64_t now)
std::sort(&(_paths[0]),&(_paths[_numPaths]),_SortPathsByQuality(now));
}
-RemotePath *Peer::_getBestPath(const uint64_t now)
+Path *Peer::_getBestPath(const uint64_t now)
{
// assumes _lock is locked
if ((now - _lastPathSort) >= ZT_PEER_PATH_SORT_INTERVAL)
@@ -529,7 +538,22 @@ RemotePath *Peer::_getBestPath(const uint64_t now)
if (_paths[0].active(now))
return &(_paths[0]);
}
- return (RemotePath *)0;
+ return (Path *)0;
+}
+
+Path *Peer::_getBestPath(const uint64_t now,int inetAddressFamily)
+{
+ // assumes _lock is locked
+ if ((now - _lastPathSort) >= ZT_PEER_PATH_SORT_INTERVAL)
+ _sortPaths(now);
+ for(int k=0;k<2;++k) { // try once, and if it fails sort and try one more time
+ for(unsigned int i=0;i<_numPaths;++i) {
+ if ((_paths[i].active(now))&&((int)_paths[i].address().ss_family == inetAddressFamily))
+ return &(_paths[i]);
+ }
+ _sortPaths(now);
+ }
+ return (Path *)0;
}
} // namespace ZeroTier
diff --git a/node/Peer.hpp b/node/Peer.hpp
index 0988561a..7b8d18ea 100644
--- a/node/Peer.hpp
+++ b/node/Peer.hpp
@@ -41,7 +41,7 @@
#include "RuntimeEnvironment.hpp"
#include "CertificateOfMembership.hpp"
-#include "RemotePath.hpp"
+#include "Path.hpp"
#include "Address.hpp"
#include "Utils.hpp"
#include "Identity.hpp"
@@ -130,12 +130,12 @@ public:
Packet::Verb inReVerb = Packet::VERB_NOP);
/**
- * Get the best direct path to this peer
+ * Get the current best direct path to this peer
*
* @param now Current time
- * @return Best path or NULL if there are no active (or fixed) direct paths
+ * @return Best path or NULL if there are no active direct paths
*/
- inline RemotePath *getBestPath(uint64_t now)
+ inline Path *getBestPath(uint64_t now)
{
Mutex::Lock _l(_lock);
return _getBestPath(now);
@@ -150,14 +150,14 @@ public:
* @param now Current time
* @return Path used on success or NULL on failure
*/
- inline RemotePath *send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
+ inline Path *send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
{
- RemotePath *bestPath = getBestPath(now);
+ Path *bestPath = getBestPath(now);
if (bestPath) {
if (bestPath->send(RR,data,len,now))
return bestPath;
}
- return (RemotePath *)0;
+ return (Path *)0;
}
/**
@@ -170,16 +170,19 @@ public:
* @param localAddr Local address
* @param atAddress Destination address
* @param now Current time
+ * @param ttl Desired IP TTL (default: 0 to leave alone)
*/
- void attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now);
+ void sendHELLO(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now,unsigned int ttl = 0);
/**
* Send pings or keepalives depending on configured timeouts
*
* @param RR Runtime environment
* @param now Current time
+ * @param inetAddressFamily Keep this address family alive, or 0 to simply pick current best ignoring family
+ * @return True if at least one direct path seems alive
*/
- void doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now);
+ bool doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now,int inetAddressFamily);
/**
* Push direct paths if we haven't done so in [rate limit] milliseconds
@@ -189,14 +192,14 @@ public:
* @param now Current time
* @param force If true, push regardless of rate limit
*/
- void pushDirectPaths(const RuntimeEnvironment *RR,RemotePath *path,uint64_t now,bool force);
+ void pushDirectPaths(const RuntimeEnvironment *RR,Path *path,uint64_t now,bool force);
/**
* @return All known direct paths to this peer
*/
- inline std::vector<RemotePath> paths() const
+ inline std::vector<Path> paths() const
{
- std::vector<RemotePath> pp;
+ std::vector<Path> pp;
Mutex::Lock _l(_lock);
for(unsigned int p=0,np=_numPaths;p<np;++p)
pp.push_back(_paths[p]);
@@ -204,32 +207,6 @@ public:
}
/**
- * @return Time of last direct packet receive for any path
- */
- inline uint64_t lastDirectReceive() const
- throw()
- {
- Mutex::Lock _l(_lock);
- uint64_t x = 0;
- for(unsigned int p=0,np=_numPaths;p<np;++p)
- x = std::max(x,_paths[p].lastReceived());
- return x;
- }
-
- /**
- * @return Time of last direct packet send for any path
- */
- inline uint64_t lastDirectSend() const
- throw()
- {
- Mutex::Lock _l(_lock);
- uint64_t x = 0;
- for(unsigned int p=0,np=_numPaths;p<np;++p)
- x = std::max(x,_paths[p].lastSend());
- return x;
- }
-
- /**
* @return Time of last receive of anything, whether direct or relayed
*/
inline uint64_t lastReceive() const throw() { return _lastReceive; }
@@ -255,27 +232,44 @@ public:
inline uint64_t lastAnnouncedTo() const throw() { return _lastAnnouncedTo; }
/**
- * @return True if peer has received an actual data frame within ZT_PEER_ACTIVITY_TIMEOUT milliseconds
+ * @return True if this peer is actively sending real network frames
+ */
+ inline uint64_t activelyTransferringFrames(uint64_t now) const throw() { return ((now - lastFrame()) < ZT_PEER_ACTIVITY_TIMEOUT); }
+
+ /**
+ * @return Latency in milliseconds or 0 if unknown
*/
- inline uint64_t alive(uint64_t now) const throw() { return ((now - lastFrame()) < ZT_PEER_ACTIVITY_TIMEOUT); }
+ inline unsigned int latency() const { return _latency; }
/**
- * @return Current latency or 0 if unknown (max: 65535)
+ * This computes a quality score for relays and root servers
+ *
+ * If we haven't heard anything from these in ZT_PEER_ACTIVITY_TIMEOUT, they
+ * receive the worst possible quality (max unsigned int). Otherwise the
+ * quality is a product of latency and the number of potential missed
+ * pings. This causes roots and relays to switch over a bit faster if they
+ * fail.
+ *
+ * @return Relay quality score computed from latency and other factors, lower is better
*/
- inline unsigned int latency() const
- throw()
+ inline unsigned int relayQuality(const uint64_t now) const
{
+ const uint64_t tsr = now - _lastReceive;
+ if (tsr >= ZT_PEER_ACTIVITY_TIMEOUT)
+ return (~(unsigned int)0);
unsigned int l = _latency;
- return std::min(l,(unsigned int)65535);
+ if (!l)
+ l = 0xffff;
+ return (l * (((unsigned int)tsr / (ZT_PEER_DIRECT_PING_DELAY + 1000)) + 1));
}
+
/**
* Update latency with a new direct measurment
*
* @param l Direct latency measurment in ms
*/
inline void addDirectLatencyMeasurment(unsigned int l)
- throw()
{
unsigned int ol = _latency;
if ((ol > 0)&&(ol < 10000))
@@ -284,16 +278,10 @@ public:
}
/**
- * @return True if this peer has at least one direct IP address path
- */
- inline bool hasDirectPath() const throw() { return (_numPaths != 0); }
-
- /**
* @param now Current time
- * @return True if this peer has at least one active or fixed direct path
+ * @return True if this peer has at least one active direct path
*/
inline bool hasActiveDirectPath(uint64_t now) const
- throw()
{
Mutex::Lock _l(_lock);
for(unsigned int p=0,np=_numPaths;p<np;++p) {
@@ -303,27 +291,25 @@ public:
return false;
}
+#ifdef ZT_ENABLE_CLUSTER
/**
- * Add a path (if we don't already have it)
- *
- * @param p New path to add
* @param now Current time
+ * @return True if this peer has at least one active direct path that is not cluster-suboptimal
*/
- void addPath(const RemotePath &newp,uint64_t now);
-
- /**
- * Clear paths
- *
- * @param fixedToo If true, clear fixed paths as well as learned ones
- */
- void clearPaths(bool fixedToo);
+ inline bool hasClusterOptimalPath(uint64_t now) const
+ {
+ Mutex::Lock _l(_lock);
+ for(unsigned int p=0,np=_numPaths;p<np;++p) {
+ if ((_paths[p].active(now))&&(!_paths[p].isClusterSuboptimal()))
+ return true;
+ }
+ return false;
+ }
+#endif
/**
* Reset paths within a given scope
*
- * For fixed paths in this scope, a packet is sent. Non-fixed paths in this
- * scope are forgotten.
- *
* @param RR Runtime environment
* @param scope IP scope of paths to reset
* @param now Current time
@@ -346,7 +332,6 @@ public:
*/
inline void setRemoteVersion(unsigned int vproto,unsigned int vmaj,unsigned int vmin,unsigned int vrev)
{
- Mutex::Lock _l(_lock);
_vProto = (uint16_t)vproto;
_vMajor = (uint16_t)vmaj;
_vMinor = (uint16_t)vmin;
@@ -360,33 +345,6 @@ public:
inline bool remoteVersionKnown() const throw() { return ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)); }
/**
- * Check whether this peer's version is both known and is at least what is specified
- *
- * @param major Major version to check against
- * @param minor Minor version
- * @param rev Revision
- * @return True if peer's version is at least supplied tuple
- */
- inline bool atLeastVersion(unsigned int major,unsigned int minor,unsigned int rev)
- throw()
- {
- Mutex::Lock _l(_lock);
- if ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)) {
- if (_vMajor > major)
- return true;
- else if (_vMajor == major) {
- if (_vMinor > minor)
- return true;
- else if (_vMinor == minor) {
- if (_vRevision >= rev)
- return true;
- }
- }
- }
- return false;
- }
-
- /**
* Get most recently active path addresses for IPv4 and/or IPv6
*
* Note that v4 and v6 are not modified if they are not found, so
@@ -430,6 +388,46 @@ public:
void clean(const RuntimeEnvironment *RR,uint64_t now);
/**
+ * Remove all paths with this remote address
+ *
+ * @param addr Remote address to remove
+ */
+ inline void removePathByAddress(const InetAddress &addr)
+ {
+ Mutex::Lock _l(_lock);
+ unsigned int np = _numPaths;
+ unsigned int x = 0;
+ unsigned int y = 0;
+ while (x < np) {
+ if (_paths[x].address() != addr)
+ _paths[y++] = _paths[x];
+ ++x;
+ }
+ _numPaths = y;
+ }
+
+ /**
+ * Update direct path push stats and return true if we should respond
+ *
+ * This is a circuit breaker to make VERB_PUSH_DIRECT_PATHS not particularly
+ * useful as a DDOS amplification attack vector. Otherwise a malicious peer
+ * could send loads of these and cause others to bombard arbitrary IPs with
+ * traffic.
+ *
+ * @param now Current time
+ * @return True if we should respond
+ */
+ inline bool shouldRespondToDirectPathPush(const uint64_t now)
+ {
+ Mutex::Lock _l(_lock);
+ if ((now - _lastDirectPathPushReceive) <= ZT_PUSH_DIRECT_PATHS_CUTOFF_TIME)
+ ++_directPathPushCutoffCount;
+ else _directPathPushCutoffCount = 0;
+ _lastDirectPathPushReceive = now;
+ return (_directPathPushCutoffCount < ZT_PUSH_DIRECT_PATHS_CUTOFF_LIMIT);
+ }
+
+ /**
* Find a common set of addresses by which two peers can link, if any
*
* @param a Peer A
@@ -454,10 +452,10 @@ public:
{
Mutex::Lock _l(_lock);
- const unsigned int atPos = b.size();
+ const unsigned int recSizePos = b.size();
b.addSize(4); // space for uint32_t field length
- b.append((uint32_t)1); // version of serialized Peer data
+ b.append((uint16_t)0); // version of serialized Peer data
_id.serialize(b,false);
@@ -467,15 +465,17 @@ public:
b.append((uint64_t)_lastMulticastFrame);
b.append((uint64_t)_lastAnnouncedTo);
b.append((uint64_t)_lastPathConfirmationSent);
- b.append((uint64_t)_lastDirectPathPush);
+ b.append((uint64_t)_lastDirectPathPushSent);
+ b.append((uint64_t)_lastDirectPathPushReceive);
b.append((uint64_t)_lastPathSort);
b.append((uint16_t)_vProto);
b.append((uint16_t)_vMajor);
b.append((uint16_t)_vMinor);
b.append((uint16_t)_vRevision);
b.append((uint32_t)_latency);
+ b.append((uint16_t)_directPathPushCutoffCount);
- b.append((uint32_t)_numPaths);
+ b.append((uint16_t)_numPaths);
for(unsigned int i=0;i<_numPaths;++i)
_paths[i].serialize(b);
@@ -502,7 +502,7 @@ public:
}
}
- b.setAt(atPos,(uint32_t)(b.size() - atPos)); // set size
+ b.template setAt<uint32_t>(recSizePos,(uint32_t)(b.size() - (recSizePos + 4))); // set size
}
/**
@@ -516,13 +516,12 @@ public:
template<unsigned int C>
static inline SharedPtr<Peer> deserializeNew(const Identity &myIdentity,const Buffer<C> &b,unsigned int &p)
{
- const uint32_t recSize = b.template at<uint32_t>(p);
+ const unsigned int recSize = b.template at<uint32_t>(p); p += 4;
if ((p + recSize) > b.size())
return SharedPtr<Peer>(); // size invalid
- p += 4;
- if (b.template at<uint32_t>(p) != 1)
+ if (b.template at<uint16_t>(p) != 0)
return SharedPtr<Peer>(); // version mismatch
- p += 4;
+ p += 2;
Identity npid;
p += npid.deserialize(b,p);
@@ -537,21 +536,23 @@ public:
np->_lastMulticastFrame = b.template at<uint64_t>(p); p += 8;
np->_lastAnnouncedTo = b.template at<uint64_t>(p); p += 8;
np->_lastPathConfirmationSent = b.template at<uint64_t>(p); p += 8;
- np->_lastDirectPathPush = b.template at<uint64_t>(p); p += 8;
+ np->_lastDirectPathPushSent = b.template at<uint64_t>(p); p += 8;
+ np->_lastDirectPathPushReceive = b.template at<uint64_t>(p); p += 8;
np->_lastPathSort = b.template at<uint64_t>(p); p += 8;
np->_vProto = b.template at<uint16_t>(p); p += 2;
np->_vMajor = b.template at<uint16_t>(p); p += 2;
np->_vMinor = b.template at<uint16_t>(p); p += 2;
np->_vRevision = b.template at<uint16_t>(p); p += 2;
np->_latency = b.template at<uint32_t>(p); p += 4;
+ np->_directPathPushCutoffCount = b.template at<uint16_t>(p); p += 2;
- const unsigned int numPaths = b.template at<uint32_t>(p); p += 4;
+ const unsigned int numPaths = b.template at<uint16_t>(p); p += 2;
for(unsigned int i=0;i<numPaths;++i) {
if (i < ZT_MAX_PEER_NETWORK_PATHS) {
p += np->_paths[np->_numPaths++].deserialize(b,p);
} else {
// Skip any paths beyond max, but still read stream
- RemotePath foo;
+ Path foo;
p += foo.deserialize(b,p);
}
}
@@ -575,25 +576,29 @@ public:
private:
void _sortPaths(const uint64_t now);
- RemotePath *_getBestPath(const uint64_t now);
+ Path *_getBestPath(const uint64_t now);
+ Path *_getBestPath(const uint64_t now,int inetAddressFamily);
+
+ unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH]; // computed with key agreement, not serialized
- unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH];
uint64_t _lastUsed;
uint64_t _lastReceive; // direct or indirect
uint64_t _lastUnicastFrame;
uint64_t _lastMulticastFrame;
uint64_t _lastAnnouncedTo;
uint64_t _lastPathConfirmationSent;
- uint64_t _lastDirectPathPush;
+ uint64_t _lastDirectPathPushSent;
+ uint64_t _lastDirectPathPushReceive;
uint64_t _lastPathSort;
uint16_t _vProto;
uint16_t _vMajor;
uint16_t _vMinor;
uint16_t _vRevision;
Identity _id;
- RemotePath _paths[ZT_MAX_PEER_NETWORK_PATHS];
+ Path _paths[ZT_MAX_PEER_NETWORK_PATHS];
unsigned int _numPaths;
unsigned int _latency;
+ unsigned int _directPathPushCutoffCount;
struct _NetworkCom
{
diff --git a/node/Poly1305.cpp b/node/Poly1305.cpp
index 77b32a80..b78071f6 100644
--- a/node/Poly1305.cpp
+++ b/node/Poly1305.cpp
@@ -20,6 +20,9 @@ namespace ZeroTier {
#if 0
+// "Naive" implementation, which is slower... might still want this on some older
+// or weird platforms if the later versions have issues.
+
static inline void add(unsigned int h[17],const unsigned int c[17])
{
unsigned int j;
@@ -132,9 +135,236 @@ typedef struct poly1305_context {
unsigned char opaque[136];
} poly1305_context;
-/*
- poly1305 implementation using 32 bit * 32 bit = 64 bit multiplication and 64 bit addition
-*/
+#if (defined(_MSC_VER) || defined(__GNUC__)) && (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__))
+
+//////////////////////////////////////////////////////////////////////////////
+// 128-bit implementation for MSC and GCC from Poly1305-donna
+
+#if defined(_MSC_VER)
+ #include <intrin.h>
+
+ typedef struct uint128_t {
+ unsigned long long lo;
+ unsigned long long hi;
+ } uint128_t;
+
+ #define MUL(out, x, y) out.lo = _umul128((x), (y), &out.hi)
+ #define ADD(out, in) { unsigned long long t = out.lo; out.lo += in.lo; out.hi += (out.lo < t) + in.hi; }
+ #define ADDLO(out, in) { unsigned long long t = out.lo; out.lo += in; out.hi += (out.lo < t); }
+ #define SHR(in, shift) (__shiftright128(in.lo, in.hi, (shift)))
+ #define LO(in) (in.lo)
+
+// #define POLY1305_NOINLINE __declspec(noinline)
+#elif defined(__GNUC__)
+ #if defined(__SIZEOF_INT128__)
+ typedef unsigned __int128 uint128_t;
+ #else
+ typedef unsigned uint128_t __attribute__((mode(TI)));
+ #endif
+
+ #define MUL(out, x, y) out = ((uint128_t)x * y)
+ #define ADD(out, in) out += in
+ #define ADDLO(out, in) out += in
+ #define SHR(in, shift) (unsigned long long)(in >> (shift))
+ #define LO(in) (unsigned long long)(in)
+
+// #define POLY1305_NOINLINE __attribute__((noinline))
+#endif
+
+#define poly1305_block_size 16
+
+/* 17 + sizeof(size_t) + 8*sizeof(unsigned long long) */
+typedef struct poly1305_state_internal_t {
+ unsigned long long r[3];
+ unsigned long long h[3];
+ unsigned long long pad[2];
+ size_t leftover;
+ unsigned char buffer[poly1305_block_size];
+ unsigned char final;
+} poly1305_state_internal_t;
+
+/* interpret eight 8 bit unsigned integers as a 64 bit unsigned integer in little endian */
+static inline unsigned long long
+U8TO64(const unsigned char *p) {
+ return
+ (((unsigned long long)(p[0] & 0xff) ) |
+ ((unsigned long long)(p[1] & 0xff) << 8) |
+ ((unsigned long long)(p[2] & 0xff) << 16) |
+ ((unsigned long long)(p[3] & 0xff) << 24) |
+ ((unsigned long long)(p[4] & 0xff) << 32) |
+ ((unsigned long long)(p[5] & 0xff) << 40) |
+ ((unsigned long long)(p[6] & 0xff) << 48) |
+ ((unsigned long long)(p[7] & 0xff) << 56));
+}
+
+/* store a 64 bit unsigned integer as eight 8 bit unsigned integers in little endian */
+static inline void
+U64TO8(unsigned char *p, unsigned long long v) {
+ p[0] = (v ) & 0xff;
+ p[1] = (v >> 8) & 0xff;
+ p[2] = (v >> 16) & 0xff;
+ p[3] = (v >> 24) & 0xff;
+ p[4] = (v >> 32) & 0xff;
+ p[5] = (v >> 40) & 0xff;
+ p[6] = (v >> 48) & 0xff;
+ p[7] = (v >> 56) & 0xff;
+}
+
+static inline void
+poly1305_init(poly1305_context *ctx, const unsigned char key[32]) {
+ poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
+ unsigned long long t0,t1;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ t0 = U8TO64(&key[0]);
+ t1 = U8TO64(&key[8]);
+
+ st->r[0] = ( t0 ) & 0xffc0fffffff;
+ st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffff;
+ st->r[2] = ((t1 >> 24) ) & 0x00ffffffc0f;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+
+ /* save pad for later */
+ st->pad[0] = U8TO64(&key[16]);
+ st->pad[1] = U8TO64(&key[24]);
+
+ st->leftover = 0;
+ st->final = 0;
+}
+
+static inline void
+poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t bytes) {
+ const unsigned long long hibit = (st->final) ? 0 : ((unsigned long long)1 << 40); /* 1 << 128 */
+ unsigned long long r0,r1,r2;
+ unsigned long long s1,s2;
+ unsigned long long h0,h1,h2;
+ unsigned long long c;
+ uint128_t d0,d1,d2,d;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ s1 = r1 * (5 << 2);
+ s2 = r2 * (5 << 2);
+
+ while (bytes >= poly1305_block_size) {
+ unsigned long long t0,t1;
+
+ /* h += m[i] */
+ t0 = U8TO64(&m[0]);
+ t1 = U8TO64(&m[8]);
+
+ h0 += (( t0 ) & 0xfffffffffff);
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff);
+ h2 += (((t1 >> 24) ) & 0x3ffffffffff) | hibit;
+
+ /* h *= r */
+ MUL(d0, h0, r0); MUL(d, h1, s2); ADD(d0, d); MUL(d, h2, s1); ADD(d0, d);
+ MUL(d1, h0, r1); MUL(d, h1, r0); ADD(d1, d); MUL(d, h2, s2); ADD(d1, d);
+ MUL(d2, h0, r2); MUL(d, h1, r1); ADD(d2, d); MUL(d, h2, r0); ADD(d2, d);
+
+ /* (partial) h %= p */
+ c = SHR(d0, 44); h0 = LO(d0) & 0xfffffffffff;
+ ADDLO(d1, c); c = SHR(d1, 44); h1 = LO(d1) & 0xfffffffffff;
+ ADDLO(d2, c); c = SHR(d2, 42); h2 = LO(d2) & 0x3ffffffffff;
+ h0 += c * 5; c = (h0 >> 44); h0 = h0 & 0xfffffffffff;
+ h1 += c;
+
+ m += poly1305_block_size;
+ bytes -= poly1305_block_size;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+}
+
+static inline void
+poly1305_finish(poly1305_context *ctx, unsigned char mac[16]) {
+ poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
+ unsigned long long h0,h1,h2,c;
+ unsigned long long g0,g1,g2;
+ unsigned long long t0,t1;
+
+ /* process the remaining block */
+ if (st->leftover) {
+ size_t i = st->leftover;
+ st->buffer[i] = 1;
+ for (i = i + 1; i < poly1305_block_size; i++)
+ st->buffer[i] = 0;
+ st->final = 1;
+ poly1305_blocks(st, st->buffer, poly1305_block_size);
+ }
+
+ /* fully carry h */
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+
+ c = (h1 >> 44); h1 &= 0xfffffffffff;
+ h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff;
+ h0 += c * 5; c = (h0 >> 44); h0 &= 0xfffffffffff;
+ h1 += c; c = (h1 >> 44); h1 &= 0xfffffffffff;
+ h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff;
+ h0 += c * 5; c = (h0 >> 44); h0 &= 0xfffffffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff;
+ g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff;
+ g2 = h2 + c - ((unsigned long long)1 << 42);
+
+ /* select h if h < p, or h + -p if h >= p */
+ c = (g2 >> ((sizeof(unsigned long long) * 8) - 1)) - 1;
+ g0 &= c;
+ g1 &= c;
+ g2 &= c;
+ c = ~c;
+ h0 = (h0 & c) | g0;
+ h1 = (h1 & c) | g1;
+ h2 = (h2 & c) | g2;
+
+ /* h = (h + pad) */
+ t0 = st->pad[0];
+ t1 = st->pad[1];
+
+ h0 += (( t0 ) & 0xfffffffffff) ; c = (h0 >> 44); h0 &= 0xfffffffffff;
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff) + c; c = (h1 >> 44); h1 &= 0xfffffffffff;
+ h2 += (((t1 >> 24) ) & 0x3ffffffffff) + c; h2 &= 0x3ffffffffff;
+
+ /* mac = h % (2^128) */
+ h0 = ((h0 ) | (h1 << 44));
+ h1 = ((h1 >> 20) | (h2 << 24));
+
+ U64TO8(&mac[0], h0);
+ U64TO8(&mac[8], h1);
+
+ /* zero out the state */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+ st->r[0] = 0;
+ st->r[1] = 0;
+ st->r[2] = 0;
+ st->pad[0] = 0;
+ st->pad[1] = 0;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+#else
+
+//////////////////////////////////////////////////////////////////////////////
+// More portable 64-bit implementation
#define poly1305_block_size 16
@@ -257,43 +487,6 @@ poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t by
}
static inline void
-poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) {
- poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
- size_t i;
-
- /* handle leftover */
- if (st->leftover) {
- size_t want = (poly1305_block_size - st->leftover);
- if (want > bytes)
- want = bytes;
- for (i = 0; i < want; i++)
- st->buffer[st->leftover + i] = m[i];
- bytes -= want;
- m += want;
- st->leftover += want;
- if (st->leftover < poly1305_block_size)
- return;
- poly1305_blocks(st, st->buffer, poly1305_block_size);
- st->leftover = 0;
- }
-
- /* process full blocks */
- if (bytes >= poly1305_block_size) {
- size_t want = (bytes & ~(poly1305_block_size - 1));
- poly1305_blocks(st, m, want);
- m += want;
- bytes -= want;
- }
-
- /* store leftover */
- if (bytes) {
- for (i = 0; i < bytes; i++)
- st->buffer[st->leftover + i] = m[i];
- st->leftover += bytes;
- }
-}
-
-static inline void
poly1305_finish(poly1305_context *ctx, unsigned char mac[16]) {
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long h0,h1,h2,h3,h4,c;
@@ -380,6 +573,47 @@ poly1305_finish(poly1305_context *ctx, unsigned char mac[16]) {
st->pad[3] = 0;
}
+//////////////////////////////////////////////////////////////////////////////
+
+#endif // MSC/GCC or not
+
+static inline void
+poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) {
+ poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
+ size_t i;
+
+ /* handle leftover */
+ if (st->leftover) {
+ size_t want = (poly1305_block_size - st->leftover);
+ if (want > bytes)
+ want = bytes;
+ for (i = 0; i < want; i++)
+ st->buffer[st->leftover + i] = m[i];
+ bytes -= want;
+ m += want;
+ st->leftover += want;
+ if (st->leftover < poly1305_block_size)
+ return;
+ poly1305_blocks(st, st->buffer, poly1305_block_size);
+ st->leftover = 0;
+ }
+
+ /* process full blocks */
+ if (bytes >= poly1305_block_size) {
+ size_t want = (bytes & ~(poly1305_block_size - 1));
+ poly1305_blocks(st, m, want);
+ m += want;
+ bytes -= want;
+ }
+
+ /* store leftover */
+ if (bytes) {
+ for (i = 0; i < bytes; i++)
+ st->buffer[st->leftover + i] = m[i];
+ st->leftover += bytes;
+ }
+}
+
} // anonymous namespace
void Poly1305::compute(void *auth,const void *data,unsigned int len,const void *key)
diff --git a/node/RemotePath.hpp b/node/RemotePath.hpp
deleted file mode 100644
index d2f99997..00000000
--- a/node/RemotePath.hpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * ZeroTier One - Network Virtualization Everywhere
- * Copyright (C) 2011-2015 ZeroTier, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * --
- *
- * ZeroTier may be used and distributed under the terms of the GPLv3, which
- * are available at: http://www.gnu.org/licenses/gpl-3.0.html
- *
- * If you would like to embed ZeroTier into a commercial application or
- * redistribute it in a modified binary form, please contact ZeroTier Networks
- * LLC. Start here: http://www.zerotier.com/
- */
-
-#ifndef ZT_REMOTEPATH_HPP
-#define ZT_REMOTEPATH_HPP
-
-#include <stdint.h>
-#include <string.h>
-
-#include <stdexcept>
-#include <algorithm>
-
-#include "Path.hpp"
-#include "Node.hpp"
-#include "AntiRecursion.hpp"
-#include "RuntimeEnvironment.hpp"
-
-#define ZT_REMOTEPATH_FLAG_FIXED 0x0001
-
-namespace ZeroTier {
-
-/**
- * Path to a remote peer
- *
- * This extends Path to include status information about path activity.
- */
-class RemotePath : public Path
-{
-public:
- RemotePath() :
- Path(),
- _lastSend(0),
- _lastReceived(0),
- _localAddress(),
- _flags(0) {}
-
- RemotePath(const InetAddress &localAddress,const InetAddress &addr,bool fixed) :
- Path(addr,0,TRUST_NORMAL),
- _lastSend(0),
- _lastReceived(0),
- _localAddress(localAddress),
- _flags(fixed ? ZT_REMOTEPATH_FLAG_FIXED : 0) {}
-
- inline const InetAddress &localAddress() const throw() { return _localAddress; }
-
- inline uint64_t lastSend() const throw() { return _lastSend; }
- inline uint64_t lastReceived() const throw() { return _lastReceived; }
-
- /**
- * @return Is this a fixed path?
- */
- inline bool fixed() const throw() { return ((_flags & ZT_REMOTEPATH_FLAG_FIXED) != 0); }
-
- /**
- * @param f New value of fixed flag
- */
- inline void setFixed(const bool f)
- throw()
- {
- if (f)
- _flags |= ZT_REMOTEPATH_FLAG_FIXED;
- else _flags &= ~ZT_REMOTEPATH_FLAG_FIXED;
- }
-
- /**
- * Called when a packet is sent to this remote path
- *
- * This is called automatically by RemotePath::send().
- *
- * @param t Time of send
- */
- inline void sent(uint64_t t)
- throw()
- {
- _lastSend = t;
- }
-
- /**
- * Called when a packet is received from this remote path
- *
- * @param t Time of receive
- */
- inline void received(uint64_t t)
- throw()
- {
- _lastReceived = t;
- }
-
- /**
- * @param now Current time
- * @return True if this path is fixed or has received data in last ACTIVITY_TIMEOUT ms
- */
- inline bool active(uint64_t now) const
- throw()
- {
- return ( ((_flags & ZT_REMOTEPATH_FLAG_FIXED) != 0) || ((now - _lastReceived) < ZT_PEER_ACTIVITY_TIMEOUT) );
- }
-
- /**
- * Send a packet via this path
- *
- * @param RR Runtime environment
- * @param data Packet data
- * @param len Packet length
- * @param now Current time
- * @return True if transport reported success
- */
- inline bool send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
- {
- if (RR->node->putPacket(_localAddress,address(),data,len)) {
- sent(now);
- RR->antiRec->logOutgoingZT(data,len);
- return true;
- }
- return false;
- }
-
- template<unsigned int C>
- inline void serialize(Buffer<C> &b) const
- {
- b.append((uint8_t)1); // version
- _addr.serialize(b);
- b.append((uint8_t)_trust);
- b.append((uint64_t)_lastSend);
- b.append((uint64_t)_lastReceived);
- _localAddress.serialize(b);
- b.append((uint16_t)_flags);
- }
-
- template<unsigned int C>
- inline unsigned int deserialize(const Buffer<C> &b,unsigned int startAt = 0)
- {
- unsigned int p = startAt;
- if (b[p++] != 1)
- throw std::invalid_argument("invalid serialized RemotePath");
- p += _addr.deserialize(b,p);
- _ipScope = _addr.ipScope();
- _trust = (Path::Trust)b[p++];
- _lastSend = b.template at<uint64_t>(p); p += 8;
- _lastReceived = b.template at<uint64_t>(p); p += 8;
- p += _localAddress.deserialize(b,p);
- _flags = b.template at<uint16_t>(p); p += 2;
- return (p - startAt);
- }
-
-protected:
- uint64_t _lastSend;
- uint64_t _lastReceived;
- InetAddress _localAddress;
- uint16_t _flags;
-};
-
-} // namespace ZeroTier
-
-#endif
diff --git a/node/RuntimeEnvironment.hpp b/node/RuntimeEnvironment.hpp
index e5d1f446..10cc6ec0 100644
--- a/node/RuntimeEnvironment.hpp
+++ b/node/RuntimeEnvironment.hpp
@@ -32,6 +32,7 @@
#include "Constants.hpp"
#include "Identity.hpp"
+#include "Mutex.hpp"
namespace ZeroTier {
@@ -43,6 +44,8 @@ class Multicaster;
class AntiRecursion;
class NetworkController;
class SelfAwareness;
+class Cluster;
+class DeferredPackets;
/**
* Holds global state for an instance of ZeroTier::Node
@@ -51,14 +54,18 @@ class RuntimeEnvironment
{
public:
RuntimeEnvironment(Node *n) :
- node(n),
- identity(),
- localNetworkController((NetworkController *)0),
- sw((Switch *)0),
- mc((Multicaster *)0),
- antiRec((AntiRecursion *)0),
- topology((Topology *)0),
- sa((SelfAwareness *)0)
+ node(n)
+ ,identity()
+ ,localNetworkController((NetworkController *)0)
+ ,sw((Switch *)0)
+ ,mc((Multicaster *)0)
+ ,antiRec((AntiRecursion *)0)
+ ,topology((Topology *)0)
+ ,sa((SelfAwareness *)0)
+ ,dp((DeferredPackets *)0)
+#ifdef ZT_ENABLE_CLUSTER
+ ,cluster((Cluster *)0)
+#endif
{
}
@@ -86,6 +93,15 @@ public:
AntiRecursion *antiRec;
Topology *topology;
SelfAwareness *sa;
+ DeferredPackets *dp;
+
+#ifdef ZT_ENABLE_CLUSTER
+ Cluster *cluster;
+#endif
+
+ // This is set to >0 if background threads are waiting on deferred
+ // packets, otherwise 'dp' should not be used.
+ volatile int dpEnabled;
};
} // namespace ZeroTier
diff --git a/node/Salsa20.cpp b/node/Salsa20.cpp
index f8cf8591..3aa19ac6 100644
--- a/node/Salsa20.cpp
+++ b/node/Salsa20.cpp
@@ -66,7 +66,7 @@ static const _s20sseconsts _S20SSECONSTANTS;
namespace ZeroTier {
-void Salsa20::init(const void *key,unsigned int kbits,const void *iv,unsigned int rounds)
+void Salsa20::init(const void *key,unsigned int kbits,const void *iv)
throw()
{
#ifdef ZT_SALSA20_SSE
@@ -121,11 +121,9 @@ void Salsa20::init(const void *key,unsigned int kbits,const void *iv,unsigned in
_state.i[15] = U8TO32_LITTLE(constants + 12);
_state.i[0] = U8TO32_LITTLE(constants + 0);
#endif
-
- _roundsDiv4 = rounds / 4;
}
-void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
+void Salsa20::encrypt12(const void *in,void *out,unsigned int bytes)
throw()
{
uint8_t tmp[64];
@@ -175,104 +173,169 @@ void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
__m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
__m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
__m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
+ __m128i T;
__m128i X0s = X0;
__m128i X1s = X1;
__m128i X2s = X2;
__m128i X3s = X3;
- for (i=0;i<_roundsDiv4;++i) {
- __m128i T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7));
- X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
- X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13));
- X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
- X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
-
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
-
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7));
- X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
- X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13));
- X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
- X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
-
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
-
- // --
-
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7));
- X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
- X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13));
- X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
- X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
-
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
-
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7));
- X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
- X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13));
- X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
- X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
-
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- }
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
X0 = _mm_add_epi32(X0s,X0);
X1 = _mm_add_epi32(X1s,X1);
X2 = _mm_add_epi32(X2s,X2);
X3 = _mm_add_epi32(X3s,X3);
- {
- __m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
- k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
- __m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
- k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
- __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
- __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
-
- const float *const mv = (const float *)m;
- float *const cv = (float *)c;
-
- _mm_storeu_ps(cv,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(mv)))));
- _mm_storeu_ps(cv + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(mv + 4)))));
- _mm_storeu_ps(cv + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(mv + 8)))));
- _mm_storeu_ps(cv + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(mv + 12)))));
- }
+ __m128i k02 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32)), _MM_SHUFFLE(0, 1, 2, 3));
+ __m128i k13 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32)), _MM_SHUFFLE(0, 1, 2, 3));
+ __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
+ __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
+ _mm_storeu_ps(reinterpret_cast<float *>(c),_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m))))));
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 4)))));
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 8)))));
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 12)))));
if (!(++_state.i[8])) {
++_state.i[5]; // state reordered for SSE
@@ -296,76 +359,942 @@ void Salsa20::encrypt(const void *in,void *out,unsigned int bytes)
x14 = j14;
x15 = j15;
- for(i=0;i<_roundsDiv4;++i) {
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
-
- // --
-
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ x0 = PLUS(x0,j0);
+ x1 = PLUS(x1,j1);
+ x2 = PLUS(x2,j2);
+ x3 = PLUS(x3,j3);
+ x4 = PLUS(x4,j4);
+ x5 = PLUS(x5,j5);
+ x6 = PLUS(x6,j6);
+ x7 = PLUS(x7,j7);
+ x8 = PLUS(x8,j8);
+ x9 = PLUS(x9,j9);
+ x10 = PLUS(x10,j10);
+ x11 = PLUS(x11,j11);
+ x12 = PLUS(x12,j12);
+ x13 = PLUS(x13,j13);
+ x14 = PLUS(x14,j14);
+ x15 = PLUS(x15,j15);
+
+ U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
+ U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
+ U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
+ U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
+ U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
+ U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
+ U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
+ U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
+ U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
+ U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
+ U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
+ U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
+ U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
+ U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
+ U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
+ U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
+
+ if (!(++j8)) {
+ ++j9;
+ /* stopping at 2^70 bytes per nonce is user's responsibility */
+ }
+#endif
+
+ if (bytes <= 64) {
+ if (bytes < 64) {
+ for (i = 0;i < bytes;++i)
+ ctarget[i] = c[i];
+ }
+
+#ifndef ZT_SALSA20_SSE
+ _state.i[8] = j8;
+ _state.i[9] = j9;
+#endif
+
+ return;
+ }
+
+ bytes -= 64;
+ c += 64;
+ m += 64;
+ }
+}
+
+void Salsa20::encrypt20(const void *in,void *out,unsigned int bytes)
+ throw()
+{
+ uint8_t tmp[64];
+ const uint8_t *m = (const uint8_t *)in;
+ uint8_t *c = (uint8_t *)out;
+ uint8_t *ctarget = c;
+ unsigned int i;
+
+#ifndef ZT_SALSA20_SSE
+ uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
+ uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+#endif
+
+ if (!bytes)
+ return;
+
+#ifndef ZT_SALSA20_SSE
+ j0 = _state.i[0];
+ j1 = _state.i[1];
+ j2 = _state.i[2];
+ j3 = _state.i[3];
+ j4 = _state.i[4];
+ j5 = _state.i[5];
+ j6 = _state.i[6];
+ j7 = _state.i[7];
+ j8 = _state.i[8];
+ j9 = _state.i[9];
+ j10 = _state.i[10];
+ j11 = _state.i[11];
+ j12 = _state.i[12];
+ j13 = _state.i[13];
+ j14 = _state.i[14];
+ j15 = _state.i[15];
+#endif
+
+ for (;;) {
+ if (bytes < 64) {
+ for (i = 0;i < bytes;++i)
+ tmp[i] = m[i];
+ m = tmp;
+ ctarget = c;
+ c = tmp;
}
+#ifdef ZT_SALSA20_SSE
+ __m128i X0 = _mm_loadu_si128((const __m128i *)&(_state.v[0]));
+ __m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
+ __m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
+ __m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
+ __m128i T;
+ __m128i X0s = X0;
+ __m128i X1s = X1;
+ __m128i X2s = X2;
+ __m128i X3s = X3;
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ // 2X round -------------------------------------------------------------
+ T = _mm_add_epi32(X0, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X1, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X3, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x93);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x39);
+ T = _mm_add_epi32(X0, X1);
+ X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
+ T = _mm_add_epi32(X3, X0);
+ X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
+ T = _mm_add_epi32(X2, X3);
+ X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
+ T = _mm_add_epi32(X1, X2);
+ X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
+ X1 = _mm_shuffle_epi32(X1, 0x39);
+ X2 = _mm_shuffle_epi32(X2, 0x4E);
+ X3 = _mm_shuffle_epi32(X3, 0x93);
+
+ X0 = _mm_add_epi32(X0s,X0);
+ X1 = _mm_add_epi32(X1s,X1);
+ X2 = _mm_add_epi32(X2s,X2);
+ X3 = _mm_add_epi32(X3s,X3);
+
+ __m128i k02 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32)), _MM_SHUFFLE(0, 1, 2, 3));
+ __m128i k13 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32)), _MM_SHUFFLE(0, 1, 2, 3));
+ __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
+ __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
+ _mm_storeu_ps(reinterpret_cast<float *>(c),_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m))))));
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 4)))));
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 8)))));
+ _mm_storeu_ps(reinterpret_cast<float *>(c) + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 12)))));
+
+ if (!(++_state.i[8])) {
+ ++_state.i[5]; // state reordered for SSE
+ /* stopping at 2^70 bytes per nonce is user's responsibility */
+ }
+#else
+ x0 = j0;
+ x1 = j1;
+ x2 = j2;
+ x3 = j3;
+ x4 = j4;
+ x5 = j5;
+ x6 = j6;
+ x7 = j7;
+ x8 = j8;
+ x9 = j9;
+ x10 = j10;
+ x11 = j11;
+ x12 = j12;
+ x13 = j13;
+ x14 = j14;
+ x15 = j15;
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
+ // 2X round -------------------------------------------------------------
+ x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
+ x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
+ x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
+ x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
+ x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
+ x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
+ x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
+ x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
+ x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
+ x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
+ x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
+ x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
+ x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
+ x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
+ x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
+ x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
+ x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
+ x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
+ x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
+ x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
+ x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
+ x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
+ x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
+ x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
+ x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
+ x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
+ x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
+ x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
+ x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
+ x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
+ x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
+
x0 = PLUS(x0,j0);
x1 = PLUS(x1,j1);
x2 = PLUS(x2,j2);
diff --git a/node/Salsa20.hpp b/node/Salsa20.hpp
index 84baf3da..7e4c1e53 100644
--- a/node/Salsa20.hpp
+++ b/node/Salsa20.hpp
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include "Constants.hpp"
+#include "Utils.hpp"
#if (!defined(ZT_SALSA20_SSE)) && (defined(__SSE2__) || defined(__WINDOWS__))
#define ZT_SALSA20_SSE 1
@@ -31,16 +32,17 @@ class Salsa20
public:
Salsa20() throw() {}
+ ~Salsa20() { Utils::burn(&_state,sizeof(_state)); }
+
/**
* @param key Key bits
* @param kbits Number of key bits: 128 or 256 (recommended)
* @param iv 64-bit initialization vector
- * @param rounds Number of rounds: 8, 12, or 20
*/
- Salsa20(const void *key,unsigned int kbits,const void *iv,unsigned int rounds)
+ Salsa20(const void *key,unsigned int kbits,const void *iv)
throw()
{
- init(key,kbits,iv,rounds);
+ init(key,kbits,iv);
}
/**
@@ -49,19 +51,28 @@ public:
* @param key Key bits
* @param kbits Number of key bits: 128 or 256 (recommended)
* @param iv 64-bit initialization vector
- * @param rounds Number of rounds: 8, 12, or 20
*/
- void init(const void *key,unsigned int kbits,const void *iv,unsigned int rounds)
+ void init(const void *key,unsigned int kbits,const void *iv)
+ throw();
+
+ /**
+ * Encrypt data using Salsa20/12
+ *
+ * @param in Input data
+ * @param out Output buffer
+ * @param bytes Length of data
+ */
+ void encrypt12(const void *in,void *out,unsigned int bytes)
throw();
/**
- * Encrypt data
+ * Encrypt data using Salsa20/20
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
- void encrypt(const void *in,void *out,unsigned int bytes)
+ void encrypt20(const void *in,void *out,unsigned int bytes)
throw();
/**
@@ -71,10 +82,23 @@ public:
* @param out Output buffer
* @param bytes Length of data
*/
- inline void decrypt(const void *in,void *out,unsigned int bytes)
+ inline void decrypt12(const void *in,void *out,unsigned int bytes)
+ throw()
+ {
+ encrypt12(in,out,bytes);
+ }
+
+ /**
+ * Decrypt data
+ *
+ * @param in Input data
+ * @param out Output buffer
+ * @param bytes Length of data
+ */
+ inline void decrypt20(const void *in,void *out,unsigned int bytes)
throw()
{
- encrypt(in,out,bytes);
+ encrypt20(in,out,bytes);
}
private:
@@ -84,7 +108,6 @@ private:
#endif // ZT_SALSA20_SSE
uint32_t i[16];
} _state;
- unsigned int _roundsDiv4;
};
} // namespace ZeroTier
diff --git a/node/SelfAwareness.cpp b/node/SelfAwareness.cpp
index 7329322a..ce75eb03 100644
--- a/node/SelfAwareness.cpp
+++ b/node/SelfAwareness.cpp
@@ -36,6 +36,7 @@
#include "Topology.hpp"
#include "Packet.hpp"
#include "Peer.hpp"
+#include "Switch.hpp"
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
#define ZT_SELFAWARENESS_ENTRY_TIMEOUT 3600000
@@ -65,7 +66,8 @@ private:
};
SelfAwareness::SelfAwareness(const RuntimeEnvironment *renv) :
- RR(renv)
+ RR(renv),
+ _phy(32)
{
}
@@ -77,66 +79,62 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
{
const InetAddress::IpScope scope = myPhysicalAddress.ipScope();
+ // This would be weird, e.g. a public IP talking to 10.0.0.1, so just ignore it.
+ // If your network is this weird it's probably not reliable information.
+ if (scope != reporterPhysicalAddress.ipScope())
+ return;
+
+ // Some scopes we ignore, and global scope IPs are only used for this
+ // mechanism if they come from someone we trust (e.g. a root).
switch(scope) {
case InetAddress::IP_SCOPE_NONE:
case InetAddress::IP_SCOPE_LOOPBACK:
case InetAddress::IP_SCOPE_MULTICAST:
return;
case InetAddress::IP_SCOPE_GLOBAL:
- if ((!trusted)||(scope != reporterPhysicalAddress.ipScope()))
+ if (!trusted)
return;
break;
default:
- if (scope != reporterPhysicalAddress.ipScope())
- return;
break;
}
Mutex::Lock _l(_phy_m);
+ PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter,reporterPhysicalAddress,scope)];
- PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter,scope)];
-
- if ((now - entry.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT) {
+ if ( ((now - entry.ts) < ZT_SELFAWARENESS_ENTRY_TIMEOUT) && (!entry.mySurface.ipsEqual(myPhysicalAddress)) ) {
entry.mySurface = myPhysicalAddress;
entry.ts = now;
- TRACE("learned physical address %s for scope %u as seen from %s(%s) (replaced <null>)",myPhysicalAddress.toString().c_str(),(unsigned int)scope,reporter.toString().c_str(),reporterPhysicalAddress.toString().c_str());
- } else if (entry.mySurface != myPhysicalAddress) {
- entry.mySurface = myPhysicalAddress;
- entry.ts = now;
- TRACE("learned physical address %s for scope %u as seen from %s(%s) (replaced %s, resetting all in scope)",myPhysicalAddress.toString().c_str(),(unsigned int)scope,reporter.toString().c_str(),reporterPhysicalAddress.toString().c_str(),entry.mySurface.toString().c_str());
+ TRACE("physical address %s for scope %u as seen from %s(%s) differs from %s, resetting paths in scope",myPhysicalAddress.toString().c_str(),(unsigned int)scope,reporter.toString().c_str(),reporterPhysicalAddress.toString().c_str(),entry.mySurface.toString().c_str());
- // Erase all entries (other than this one) for this scope to prevent thrashing
- // Note: we should probably not use 'entry' after this
+ // Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
+ // due to multiple reports of endpoint change.
+ // Don't use 'entry' after this since hash table gets modified.
{
Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
PhySurfaceKey *k = (PhySurfaceKey *)0;
PhySurfaceEntry *e = (PhySurfaceEntry *)0;
while (i.next(k,e)) {
- if ((k->reporter != reporter)&&(k->scope == scope))
+ if ((k->reporterPhysicalAddress != reporterPhysicalAddress)&&(k->scope == scope))
_phy.erase(*k);
}
}
+ // Reset all paths within this scope
_ResetWithinScope rset(RR,now,(InetAddress::IpScope)scope);
RR->topology->eachPeer<_ResetWithinScope &>(rset);
- // For all peers for whom we forgot an address, send a packet indirectly if
- // they are still considered alive so that we will re-establish direct links.
- SharedPtr<Peer> sn(RR->topology->getBestRoot());
- if (sn) {
- RemotePath *snp = sn->getBestPath(now);
- if (snp) {
- for(std::vector< SharedPtr<Peer> >::const_iterator p(rset.peersReset.begin());p!=rset.peersReset.end();++p) {
- if ((*p)->alive(now)) {
- TRACE("sending indirect NOP to %s via %s(%s) to re-establish link",(*p)->address().toString().c_str(),sn->address().toString().c_str(),snp->address().toString().c_str());
- Packet outp((*p)->address(),RR->identity.address(),Packet::VERB_NOP);
- outp.armor((*p)->key(),true);
- snp->send(RR,outp.data(),outp.size(),now);
- }
- }
+ // Send a NOP to all peers for whom we forgot a path. This will cause direct
+ // links to be re-established if possible, possibly using a root server or some
+ // other relay.
+ for(std::vector< SharedPtr<Peer> >::const_iterator p(rset.peersReset.begin());p!=rset.peersReset.end();++p) {
+ if ((*p)->activelyTransferringFrames(now)) {
+ Packet outp((*p)->address(),RR->identity.address(),Packet::VERB_NOP);
+ RR->sw->send(outp,true,0);
}
}
} else {
+ entry.mySurface = myPhysicalAddress;
entry.ts = now;
}
}
diff --git a/node/SelfAwareness.hpp b/node/SelfAwareness.hpp
index 3133553e..400b05e6 100644
--- a/node/SelfAwareness.hpp
+++ b/node/SelfAwareness.hpp
@@ -69,14 +69,14 @@ private:
struct PhySurfaceKey
{
Address reporter;
+ InetAddress reporterPhysicalAddress;
InetAddress::IpScope scope;
- inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); }
-
PhySurfaceKey() : reporter(),scope(InetAddress::IP_SCOPE_NONE) {}
- PhySurfaceKey(const Address &r,InetAddress::IpScope s) : reporter(r),scope(s) {}
- inline bool operator<(const PhySurfaceKey &k) const throw() { return ((reporter < k.reporter) ? true : ((reporter == k.reporter) ? ((int)scope < (int)k.scope) : false)); }
- inline bool operator==(const PhySurfaceKey &k) const throw() { return ((reporter == k.reporter)&&(scope == k.scope)); }
+ PhySurfaceKey(const Address &r,const InetAddress &ra,InetAddress::IpScope s) : reporter(r),reporterPhysicalAddress(ra),scope(s) {}
+
+ inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); }
+ inline bool operator==(const PhySurfaceKey &k) const throw() { return ((reporter == k.reporter)&&(reporterPhysicalAddress == k.reporterPhysicalAddress)&&(scope == k.scope)); }
};
struct PhySurfaceEntry
{
diff --git a/node/SharedPtr.hpp b/node/SharedPtr.hpp
index 4ecfa818..289c499f 100644
--- a/node/SharedPtr.hpp
+++ b/node/SharedPtr.hpp
@@ -64,20 +64,6 @@ public:
++obj->__refCount;
}
- SharedPtr(T *obj,bool runAwayFromZombies)
- throw() :
- _ptr(obj)
- {
- // HACK: this is used in "handlers" to take ownership of naked pointers,
- // an ugly pattern that really ought to be factored out.
- if (runAwayFromZombies) {
- if ((int)(++obj->__refCount) < 2) {
- --obj->__refCount;
- _ptr = (T *)0;
- }
- } else ++obj->__refCount;
- }
-
SharedPtr(const SharedPtr &sp)
throw() :
_ptr(sp._getAndInc())
@@ -105,6 +91,25 @@ public:
return *this;
}
+ /**
+ * Set to a naked pointer and increment its reference count
+ *
+ * This assumes this SharedPtr is NULL and that ptr is not a 'zombie.' No
+ * checks are performed.
+ *
+ * @param ptr Naked pointer to assign
+ */
+ inline void setToUnsafe(T *ptr)
+ {
+ ++ptr->__refCount;
+ _ptr = ptr;
+ }
+
+ /**
+ * Swap with another pointer 'for free' without ref count overhead
+ *
+ * @param with Pointer to swap with
+ */
inline void swap(SharedPtr &with)
throw()
{
diff --git a/node/Switch.cpp b/node/Switch.cpp
index 9ea8ac49..74e2f4c6 100644
--- a/node/Switch.cpp
+++ b/node/Switch.cpp
@@ -45,6 +45,7 @@
#include "AntiRecursion.hpp"
#include "SelfAwareness.hpp"
#include "Packet.hpp"
+#include "Cluster.hpp"
namespace ZeroTier {
@@ -153,25 +154,84 @@ void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,c
MulticastGroup mg(to,0);
if (to.isBroadcast()) {
- if (
- (etherType == ZT_ETHERTYPE_ARP)&&
- (len >= 28)&&
- (
- (((const unsigned char *)data)[2] == 0x08)&&
- (((const unsigned char *)data)[3] == 0x00)&&
- (((const unsigned char *)data)[4] == 6)&&
- (((const unsigned char *)data)[5] == 4)&&
- (((const unsigned char *)data)[7] == 0x01)
- )
- ) {
- // Cram IPv4 IP into ADI field to make IPv4 ARP broadcast channel specific and scalable
- // Also: enableBroadcast() does not apply to ARP since it's required for IPv4
+ if ( (etherType == ZT_ETHERTYPE_ARP) && (len >= 28) && ((((const uint8_t *)data)[2] == 0x08)&&(((const uint8_t *)data)[3] == 0x00)&&(((const uint8_t *)data)[4] == 6)&&(((const uint8_t *)data)[5] == 4)&&(((const uint8_t *)data)[7] == 0x01)) ) {
+ /* IPv4 ARP is one of the few special cases that we impose upon what is
+ * otherwise a straightforward Ethernet switch emulation. Vanilla ARP
+ * is dumb old broadcast and simply doesn't scale. ZeroTier multicast
+ * groups have an additional field called ADI (additional distinguishing
+ * information) which was added specifically for ARP though it could
+ * be used for other things too. We then take ARP broadcasts and turn
+ * them into multicasts by stuffing the IP address being queried into
+ * the 32-bit ADI field. In practice this uses our multicast pub/sub
+ * system to implement a kind of extended/distributed ARP table. */
mg = MulticastGroup::deriveMulticastGroupForAddressResolution(InetAddress(((const unsigned char *)data) + 24,4,0));
} else if (!nconf->enableBroadcast()) {
// Don't transmit broadcasts if this network doesn't want them
TRACE("%.16llx: dropped broadcast since ff:ff:ff:ff:ff:ff is not enabled",network->id());
return;
}
+ } else if ((etherType == ZT_ETHERTYPE_IPV6)&&(len >= (40 + 8 + 16))) {
+ /* IPv6 NDP emulation on ZeroTier-RFC4193 addressed networks! This allows
+ * for multicast-free operation in IPv6 networks, which both improves
+ * performance and is friendlier to mobile and (especially) IoT devices.
+ * In the future there may be a no-multicast build option for embedded
+ * and IoT use and this will be the preferred addressing mode. Note that
+ * it plays nice with our L2 emulation philosophy and even with bridging.
+ * While "real" devices behind the bridge can't have ZT-RFC4193 addresses
+ * themselves, they can look these addresses up with NDP and it will
+ * work just fine. */
+ if ((reinterpret_cast<const uint8_t *>(data)[6] == 0x3a)&&(reinterpret_cast<const uint8_t *>(data)[40] == 0x87)) { // ICMPv6 neighbor solicitation
+ for(std::vector<InetAddress>::const_iterator sip(nconf->staticIps().begin()),sipend(nconf->staticIps().end());sip!=sipend;++sip) {
+ if ((sip->ss_family == AF_INET6)&&(Utils::ntoh((uint16_t)reinterpret_cast<const struct sockaddr_in6 *>(&(*sip))->sin6_port) == 88)) {
+ const uint8_t *my6 = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&(*sip))->sin6_addr.s6_addr);
+ if ((my6[0] == 0xfd)&&(my6[9] == 0x99)&&(my6[10] == 0x93)) { // ZT-RFC4193 == fd__:____:____:____:__99:93__:____:____ / 88
+ const uint8_t *pkt6 = reinterpret_cast<const uint8_t *>(data) + 40 + 8;
+ unsigned int ptr = 0;
+ while (ptr != 11) {
+ if (pkt6[ptr] != my6[ptr])
+ break;
+ ++ptr;
+ }
+ if (ptr == 11) { // /88 matches an assigned address on this network
+ const Address atPeer(pkt6 + ptr,5);
+ if (atPeer != RR->identity.address()) {
+ const MAC atPeerMac(atPeer,network->id());
+ TRACE("ZT-RFC4193 NDP emulation: %.16llx: forging response for %s/%s",network->id(),atPeer.toString().c_str(),atPeerMac.toString().c_str());
+
+ uint8_t adv[72];
+ adv[0] = 0x60; adv[1] = 0x00; adv[2] = 0x00; adv[3] = 0x00;
+ adv[4] = 0x00; adv[5] = 0x20;
+ adv[6] = 0x3a; adv[7] = 0xff;
+ for(int i=0;i<16;++i) adv[8 + i] = pkt6[i];
+ for(int i=0;i<16;++i) adv[24 + i] = my6[i];
+ adv[40] = 0x88; adv[41] = 0x00;
+ adv[42] = 0x00; adv[43] = 0x00; // future home of checksum
+ adv[44] = 0x60; adv[45] = 0x00; adv[46] = 0x00; adv[47] = 0x00;
+ for(int i=0;i<16;++i) adv[48 + i] = pkt6[i];
+ adv[64] = 0x02; adv[65] = 0x01;
+ adv[66] = atPeerMac[0]; adv[67] = atPeerMac[1]; adv[68] = atPeerMac[2]; adv[69] = atPeerMac[3]; adv[70] = atPeerMac[4]; adv[71] = atPeerMac[5];
+
+ uint16_t pseudo_[36];
+ uint8_t *const pseudo = reinterpret_cast<uint8_t *>(pseudo_);
+ for(int i=0;i<32;++i) pseudo[i] = adv[8 + i];
+ pseudo[32] = 0x00; pseudo[33] = 0x00; pseudo[34] = 0x00; pseudo[35] = 0x20;
+ pseudo[36] = 0x00; pseudo[37] = 0x00; pseudo[38] = 0x00; pseudo[39] = 0x3a;
+ for(int i=0;i<32;++i) pseudo[40 + i] = adv[40 + i];
+ uint32_t checksum = 0;
+ for(int i=0;i<36;++i) checksum += Utils::hton(pseudo_[i]);
+ while ((checksum >> 16)) checksum = (checksum & 0xffff) + (checksum >> 16);
+ checksum = ~checksum;
+ adv[42] = (checksum >> 8) & 0xff;
+ adv[43] = checksum & 0xff;
+
+ RR->node->putFrame(network->id(),atPeerMac,from,ZT_ETHERTYPE_IPV6,0,adv,72);
+ return; // stop processing: we have handled this frame with a spoofed local reply so no need to send it anywhere
+ }
+ }
+ }
+ }
+ }
+ }
}
/* Learn multicast groups for bridged-in hosts.
@@ -203,7 +263,7 @@ void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,c
Address toZT(to.toAddress(network->id())); // since in-network MACs are derived from addresses and network IDs, we can reverse this
SharedPtr<Peer> toPeer(RR->topology->getPeer(toZT));
- const bool includeCom = ((!toPeer)||(toPeer->needsOurNetworkMembershipCertificate(network->id(),RR->node->now(),true)));;
+ const bool includeCom = ( (nconf->isPrivate()) && (nconf->com()) && ((!toPeer)||(toPeer->needsOurNetworkMembershipCertificate(network->id(),RR->node->now(),true))) );
if ((fromBridged)||(includeCom)) {
Packet outp(toZT,RR->identity.address(),Packet::VERB_EXT_FRAME);
outp.append(network->id());
@@ -271,7 +331,7 @@ void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,c
SharedPtr<Peer> bridgePeer(RR->topology->getPeer(bridges[b]));
Packet outp(bridges[b],RR->identity.address(),Packet::VERB_EXT_FRAME);
outp.append(network->id());
- if ((!bridgePeer)||(bridgePeer->needsOurNetworkMembershipCertificate(network->id(),RR->node->now(),true))) {
+ if ( (nconf->isPrivate()) && (nconf->com()) && ((!bridgePeer)||(bridgePeer->needsOurNetworkMembershipCertificate(network->id(),RR->node->now(),true))) ) {
outp.append((unsigned char)0x01); // 0x01 -- COM included
nconf->com().serialize(outp);
} else {
@@ -294,17 +354,18 @@ void Switch::send(const Packet &packet,bool encrypt,uint64_t nwid)
return;
}
+ //TRACE(">> %s to %s (%u bytes, encrypt==%d, nwid==%.16llx)",Packet::verbString(packet.verb()),packet.destination().toString().c_str(),packet.size(),(int)encrypt,nwid);
+
if (!_trySend(packet,encrypt,nwid)) {
Mutex::Lock _l(_txQueue_m);
_txQueue.push_back(TXQueueEntry(packet.destination(),RR->node->now(),packet,encrypt,nwid));
}
}
-bool Switch::unite(const Address &p1,const Address &p2,bool force)
+bool Switch::unite(const Address &p1,const Address &p2)
{
if ((p1 == RR->identity.address())||(p2 == RR->identity.address()))
return false;
-
SharedPtr<Peer> p1p = RR->topology->getPeer(p1);
if (!p1p)
return false;
@@ -314,14 +375,6 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
const uint64_t now = RR->node->now();
- {
- Mutex::Lock _l(_lastUniteAttempt_m);
- uint64_t &luts = _lastUniteAttempt[_LastUniteKey(p1,p2)];
- if (((now - luts) < ZT_MIN_UNITE_INTERVAL)&&(!force))
- return false;
- luts = now;
- }
-
std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
if ((!(cg.first))||(cg.first.ipScope() != cg.second.ipScope()))
return false;
@@ -382,7 +435,7 @@ void Switch::rendezvous(const SharedPtr<Peer> &peer,const InetAddress &localAddr
{
TRACE("sending NAT-t message to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
const uint64_t now = RR->node->now();
- peer->attemptToContactAt(RR,localAddr,atAddr,now);
+ peer->sendHELLO(RR,localAddr,atAddr,now,2); // first attempt: send low-TTL packet to 'open' local NAT
{
Mutex::Lock _l(_contactQueue_m);
_contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,localAddr,atAddr));
@@ -422,7 +475,7 @@ void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
{ // finish processing any packets waiting on peer's public key / identity
Mutex::Lock _l(_rxQueue_m);
for(std::list< SharedPtr<IncomingPacket> >::iterator rxi(_rxQueue.begin());rxi!=_rxQueue.end();) {
- if ((*rxi)->tryDecode(RR))
+ if ((*rxi)->tryDecode(RR,false))
_rxQueue.erase(rxi++);
else ++rxi;
}
@@ -448,21 +501,21 @@ unsigned long Switch::doTimerTasks(uint64_t now)
Mutex::Lock _l(_contactQueue_m);
for(std::list<ContactQueueEntry>::iterator qi(_contactQueue.begin());qi!=_contactQueue.end();) {
if (now >= qi->fireAtTime) {
- if ((!qi->peer->alive(now))||(qi->peer->hasActiveDirectPath(now))) {
- // Cancel attempt if we've already connected or peer is no longer "alive"
+ if (qi->peer->hasActiveDirectPath(now)) {
+ // Cancel if connection has succeeded
_contactQueue.erase(qi++);
continue;
} else {
if (qi->strategyIteration == 0) {
// First strategy: send packet directly to destination
- qi->peer->attemptToContactAt(RR,qi->localAddr,qi->inaddr,now);
- } else if (qi->strategyIteration <= 4) {
- // Strategies 1-4: try escalating ports for symmetric NATs that remap sequentially
+ qi->peer->sendHELLO(RR,qi->localAddr,qi->inaddr,now);
+ } else if (qi->strategyIteration <= 3) {
+ // Strategies 1-3: try escalating ports for symmetric NATs that remap sequentially
InetAddress tmpaddr(qi->inaddr);
int p = (int)qi->inaddr.port() + qi->strategyIteration;
if (p < 0xffff) {
tmpaddr.setPort((unsigned int)p);
- qi->peer->attemptToContactAt(RR,qi->localAddr,tmpaddr,now);
+ qi->peer->sendHELLO(RR,qi->localAddr,tmpaddr,now);
} else qi->strategyIteration = 5;
} else {
// All strategies tried, expire entry
@@ -545,7 +598,7 @@ unsigned long Switch::doTimerTasks(uint64_t now)
_LastUniteKey *k = (_LastUniteKey *)0;
uint64_t *v = (uint64_t *)0;
while (i.next(k,v)) {
- if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 16))
+ if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 8))
_lastUniteAttempt.erase(*k);
}
}
@@ -567,6 +620,13 @@ void Switch::_handleRemotePacketFragment(const InetAddress &localAddr,const Inet
// It wouldn't hurt anything, just redundant and unnecessary.
SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
if ((!relayTo)||(!relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now()))) {
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster) {
+ RR->cluster->sendViaCluster(Address(),destination,fragment.data(),fragment.size(),false);
+ return;
+ }
+#endif
+
// Don't know peer or no direct path -- so relay via root server
relayTo = RR->topology->getBestRoot();
if (relayTo)
@@ -614,7 +674,7 @@ void Switch::_handleRemotePacketFragment(const InetAddress &localAddr,const Inet
packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength());
_defragQueue.erase(pid); // dq no longer valid after this
- if (!packet->tryDecode(RR)) {
+ if (!packet->tryDecode(RR,false)) {
Mutex::Lock _l(_rxQueue_m);
_rxQueue.push_back(packet);
}
@@ -626,11 +686,17 @@ void Switch::_handleRemotePacketFragment(const InetAddress &localAddr,const Inet
void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
{
- SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,localAddr,fromAddr,RR->node->now()));
+ const uint64_t now = RR->node->now();
+ SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,localAddr,fromAddr,now));
Address source(packet->source());
Address destination(packet->destination());
+ // Catch this and toss it -- it would never work, but it could happen if we somehow
+ // mistakenly guessed an address we're bound to as a destination for another peer.
+ if (source == RR->identity.address())
+ return;
+
//TRACE("<< %.16llx %s -> %s (size: %u)",(unsigned long long)packet->packetId(),source.toString().c_str(),destination.toString().c_str(),packet->size());
if (destination != RR->identity.address()) {
@@ -639,13 +705,32 @@ void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddr
packet->incrementHops();
SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
- if ((relayTo)&&((relayTo->send(RR,packet->data(),packet->size(),RR->node->now())))) {
- unite(source,destination,false);
+ if ((relayTo)&&((relayTo->send(RR,packet->data(),packet->size(),now)))) {
+ Mutex::Lock _l(_lastUniteAttempt_m);
+ uint64_t &luts = _lastUniteAttempt[_LastUniteKey(source,destination)];
+ if ((now - luts) >= ZT_MIN_UNITE_INTERVAL) {
+ luts = now;
+ unite(source,destination);
+ }
} else {
- // Don't know peer or no direct path -- so relay via root server
+#ifdef ZT_ENABLE_CLUSTER
+ if (RR->cluster) {
+ bool shouldUnite;
+ {
+ Mutex::Lock _l(_lastUniteAttempt_m);
+ uint64_t &luts = _lastUniteAttempt[_LastUniteKey(source,destination)];
+ shouldUnite = ((now - luts) >= ZT_MIN_UNITE_INTERVAL);
+ if (shouldUnite)
+ luts = now;
+ }
+ RR->cluster->sendViaCluster(source,destination,packet->data(),packet->size(),shouldUnite);
+ return;
+ }
+#endif
+
relayTo = RR->topology->getBestRoot(&source,1,true);
if (relayTo)
- relayTo->send(RR,packet->data(),packet->size(),RR->node->now());
+ relayTo->send(RR,packet->data(),packet->size(),now);
}
} else {
TRACE("dropped relay %s(%s) -> %s, max hops exceeded",packet->source().toString().c_str(),fromAddr.toString().c_str(),destination.toString().c_str());
@@ -660,7 +745,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddr
if (!dq.creationTime) {
// If we have no other fragments yet, create an entry and save the head
- dq.creationTime = RR->node->now();
+ dq.creationTime = now;
dq.frag0 = packet;
dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment
dq.haveFragments = 1; // head is first bit (left to right)
@@ -677,7 +762,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddr
packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength());
_defragQueue.erase(pid); // dq no longer valid after this
- if (!packet->tryDecode(RR)) {
+ if (!packet->tryDecode(RR,false)) {
Mutex::Lock _l(_rxQueue_m);
_rxQueue.push_back(packet);
}
@@ -688,7 +773,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddr
} // else this is a duplicate head, ignore
} else {
// Packet is unfragmented, so just process it
- if (!packet->tryDecode(RR)) {
+ if (!packet->tryDecode(RR,false)) {
Mutex::Lock _l(_rxQueue_m);
_rxQueue.push_back(packet);
}
@@ -726,17 +811,20 @@ bool Switch::_trySend(const Packet &packet,bool encrypt,uint64_t nwid)
return false; // sanity check: unconfigured network? why are we trying to talk to it?
}
- RemotePath *viaPath = peer->getBestPath(now);
+ Path *viaPath = peer->getBestPath(now);
SharedPtr<Peer> relay;
if (!viaPath) {
// See if this network has a preferred relay (if packet has an associated network)
if (nconf) {
- unsigned int latency = ~((unsigned int)0);
+ unsigned int bestq = ~((unsigned int)0);
for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(nconf->relays().begin());r!=nconf->relays().end();++r) {
if (r->first != peer->address()) {
SharedPtr<Peer> rp(RR->topology->getPeer(r->first));
- if ((rp)&&(rp->hasActiveDirectPath(now))&&(rp->latency() <= latency))
+ const unsigned int q = rp->relayQuality(now);
+ if ((rp)&&(q < bestq)) { // SUBTILE: < == don't use these if they are nil quality (unsigned int max), instead use a root
+ bestq = q;
rp.swap(relay);
+ }
}
}
}
diff --git a/node/Switch.hpp b/node/Switch.hpp
index cf8420cf..1964d1ee 100644
--- a/node/Switch.hpp
+++ b/node/Switch.hpp
@@ -117,15 +117,10 @@ public:
* This only works if both peers are known, with known working direct
* links to this peer. The best link for each peer is sent to the other.
*
- * A rate limiter is in effect via the _lastUniteAttempt map. If force
- * is true, a unite attempt is made even if one has been made less than
- * ZT_MIN_UNITE_INTERVAL milliseconds ago.
- *
* @param p1 One of two peers (order doesn't matter)
* @param p2 Second of pair
- * @param force If true, send now regardless of interval
*/
- bool unite(const Address &p1,const Address &p2,bool force);
+ bool unite(const Address &p1,const Address &p2);
/**
* Attempt NAT traversal to peer at a given physical address
diff --git a/node/Topology.cpp b/node/Topology.cpp
index 908acbc8..d94975dd 100644
--- a/node/Topology.cpp
+++ b/node/Topology.cpp
@@ -28,13 +28,21 @@
#include "Constants.hpp"
#include "Topology.hpp"
#include "RuntimeEnvironment.hpp"
-#include "Defaults.hpp"
-#include "Dictionary.hpp"
#include "Node.hpp"
+#include "Network.hpp"
+#include "NetworkConfig.hpp"
#include "Buffer.hpp"
namespace ZeroTier {
+// 2015-11-16 -- The Fabulous Four (should have named them after Beatles!)
+//#define ZT_DEFAULT_WORLD_LENGTH 494
+//static const unsigned char ZT_DEFAULT_WORLD[ZT_DEFAULT_WORLD_LENGTH] = {0x01,0x00,0x00,0x00,0x00,0x08,0xea,0xc9,0x0a,0x00,0x00,0x01,0x51,0x11,0x70,0xb2,0xfb,0xb8,0xb3,0x88,0xa4,0x69,0x22,0x14,0x91,0xaa,0x9a,0xcd,0x66,0xcc,0x76,0x4c,0xde,0xfd,0x56,0x03,0x9f,0x10,0x67,0xae,0x15,0xe6,0x9c,0x6f,0xb4,0x2d,0x7b,0x55,0x33,0x0e,0x3f,0xda,0xac,0x52,0x9c,0x07,0x92,0xfd,0x73,0x40,0xa6,0xaa,0x21,0xab,0xa8,0xa4,0x89,0xfd,0xae,0xa4,0x4a,0x39,0xbf,0x2d,0x00,0x65,0x9a,0xc9,0xc8,0x18,0xeb,0x80,0x31,0xa4,0x65,0x95,0x45,0x06,0x1c,0xfb,0xc2,0x4e,0x5d,0xe7,0x0a,0x40,0x7a,0x97,0xce,0x36,0xa2,0x3d,0x05,0xca,0x87,0xc7,0x59,0x27,0x5c,0x8b,0x0d,0x4c,0xb4,0xbb,0x26,0x2f,0x77,0x17,0x5e,0xb7,0x4d,0xb8,0xd3,0xb4,0xe9,0x23,0x5d,0xcc,0xa2,0x71,0xa8,0xdf,0xf1,0x23,0xa3,0xb2,0x66,0x74,0xea,0xe5,0xdc,0x8d,0xef,0xd3,0x0a,0xa9,0xac,0xcb,0xda,0x93,0xbd,0x6c,0xcd,0x43,0x1d,0xa7,0x98,0x6a,0xde,0x70,0xc0,0xc6,0x1c,0xaf,0xf0,0xfd,0x7f,0x8a,0xb9,0x76,0x13,0xe1,0xde,0x4f,0xf3,0xd6,0x13,0x04,0x7e,0x19,0x87,0x6a,0xba,0x00,0x2a,0x6e,0x2b,0x23,0x18,0x93,0x0f,0x60,0xeb,0x09,0x7f,0x70,0xd0,0xf4,0xb0,0x28,0xb2,0xcd,0x6d,0x3d,0x0c,0x63,0xc0,0x14,0xb9,0x03,0x9f,0xf3,0x53,0x90,0xe4,0x11,0x81,0xf2,0x16,0xfb,0x2e,0x6f,0xa8,0xd9,0x5c,0x1e,0xe9,0x66,0x71,0x56,0x41,0x19,0x05,0xc3,0xdc,0xcf,0xea,0x78,0xd8,0xc6,0xdf,0xaf,0xba,0x68,0x81,0x70,0xb3,0xfa,0x00,0x01,0x04,0xc6,0xc7,0x61,0xdc,0x27,0x09,0x88,0x41,0x40,0x8a,0x2e,0x00,0xbb,0x1d,0x31,0xf2,0xc3,0x23,0xe2,0x64,0xe9,0xe6,0x41,0x72,0xc1,0xa7,0x4f,0x77,0x89,0x95,0x55,0xed,0x10,0x75,0x1c,0xd5,0x6e,0x86,0x40,0x5c,0xde,0x11,0x8d,0x02,0xdf,0xfe,0x55,0x5d,0x46,0x2c,0xcf,0x6a,0x85,0xb5,0x63,0x1c,0x12,0x35,0x0c,0x8d,0x5d,0xc4,0x09,0xba,0x10,0xb9,0x02,0x5d,0x0f,0x44,0x5c,0xf4,0x49,0xd9,0x2b,0x1c,0x00,0x01,0x04,0x6b,0xbf,0x2e,0xd2,0x27,0x09,0x8a,0xcf,0x05,0x9f,0xe3,0x00,0x48,0x2f,0x6e,0xe5,0xdf,0xe9,0x02,0x31,0x9b,0x41,0x9d,0xe5,0xbd,0xc7,0x65,0x20,0x9c,0x0e,0xcd,0xa3,0x8c,0x4d,0x6e,0x4f,0xcf,0x0d,0x33,0x65,0x83,0x98,0xb4,0x52,0x7d,0xcd,0x22,0xf9,0x31,0x12,0xfb,0x9b,0xef,0xd0,0x2f,0xd7,0x8b,0xf7,0x26,0x1b,0x33,0x3f,0xc1,0x05,0xd1,0x92,0xa6,0x23,0xca,0x9e,0x50,0xfc,0x60,0xb3,0x74,0xa5,0x00,0x01,0x04,0xa2,0xf3,0x4d,0x6f,0x27,0x09,0x9d,0x21,0x90,0x39,0xf3,0x00,0x01,0xf0,0x92,0x2a,0x98,0xe3,0xb3,0x4e,0xbc,0xbf,0xf3,0x33,0x26,0x9d,0xc2,0x65,0xd7,0xa0,0x20,0xaa,0xb6,0x9d,0x72,0xbe,0x4d,0x4a,0xcc,0x9c,0x8c,0x92,0x94,0x78,0x57,0x71,0x25,0x6c,0xd1,0xd9,0x42,0xa9,0x0d,0x1b,0xd1,0xd2,0xdc,0xa3,0xea,0x84,0xef,0x7d,0x85,0xaf,0xe6,0x61,0x1f,0xb4,0x3f,0xf0,0xb7,0x41,0x26,0xd9,0x0a,0x6e,0x00,0x01,0x04,0x80,0xc7,0xc5,0xd9,0x27,0x09};
+
+// 2015-11-20 -- Alice and Bob are live, and we're now IPv6 dual-stack!
+#define ZT_DEFAULT_WORLD_LENGTH 792
+static const unsigned char ZT_DEFAULT_WORLD[ZT_DEFAULT_WORLD_LENGTH] = {0x01,0x00,0x00,0x00,0x00,0x08,0xea,0xc9,0x0a,0x00,0x00,0x01,0x51,0x26,0x6f,0x7c,0x8a,0xb8,0xb3,0x88,0xa4,0x69,0x22,0x14,0x91,0xaa,0x9a,0xcd,0x66,0xcc,0x76,0x4c,0xde,0xfd,0x56,0x03,0x9f,0x10,0x67,0xae,0x15,0xe6,0x9c,0x6f,0xb4,0x2d,0x7b,0x55,0x33,0x0e,0x3f,0xda,0xac,0x52,0x9c,0x07,0x92,0xfd,0x73,0x40,0xa6,0xaa,0x21,0xab,0xa8,0xa4,0x89,0xfd,0xae,0xa4,0x4a,0x39,0xbf,0x2d,0x00,0x65,0x9a,0xc9,0xc8,0x18,0xeb,0xe8,0x0a,0xf5,0xbc,0xf8,0x3d,0x97,0xcd,0xc3,0xf8,0xe2,0x41,0x16,0x42,0x0f,0xc7,0x76,0x8e,0x07,0xf3,0x7e,0x9e,0x7d,0x1b,0xb3,0x23,0x21,0x79,0xce,0xb9,0xd0,0xcb,0xb5,0x94,0x7b,0x89,0x21,0x57,0x72,0xf6,0x70,0xa1,0xdd,0x67,0x38,0xcf,0x45,0x45,0xc2,0x8d,0x46,0xec,0x00,0x2c,0xe0,0x2a,0x63,0x3f,0x63,0x8d,0x33,0x08,0x51,0x07,0x77,0x81,0x5b,0x32,0x49,0xae,0x87,0x89,0xcf,0x31,0xaa,0x41,0xf1,0x52,0x97,0xdc,0xa2,0x55,0xe1,0x4a,0x6e,0x3c,0x04,0xf0,0x4f,0x8a,0x0e,0xe9,0xca,0xec,0x24,0x30,0x04,0x9d,0x21,0x90,0x39,0xf3,0x00,0x01,0xf0,0x92,0x2a,0x98,0xe3,0xb3,0x4e,0xbc,0xbf,0xf3,0x33,0x26,0x9d,0xc2,0x65,0xd7,0xa0,0x20,0xaa,0xb6,0x9d,0x72,0xbe,0x4d,0x4a,0xcc,0x9c,0x8c,0x92,0x94,0x78,0x57,0x71,0x25,0x6c,0xd1,0xd9,0x42,0xa9,0x0d,0x1b,0xd1,0xd2,0xdc,0xa3,0xea,0x84,0xef,0x7d,0x85,0xaf,0xe6,0x61,0x1f,0xb4,0x3f,0xf0,0xb7,0x41,0x26,0xd9,0x0a,0x6e,0x00,0x0c,0x04,0xbc,0xa6,0x5e,0xb1,0x27,0x09,0x06,0x2a,0x03,0xb0,0xc0,0x00,0x02,0x00,0xd0,0x00,0x00,0x00,0x00,0x00,0x7d,0x00,0x01,0x27,0x09,0x04,0x9a,0x42,0xc5,0x21,0x27,0x09,0x06,0x2c,0x0f,0xf8,0x50,0x01,0x54,0x01,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x33,0x27,0x09,0x04,0x9f,0xcb,0x61,0xab,0x27,0x09,0x06,0x26,0x04,0xa8,0x80,0x08,0x00,0x00,0xa1,0x00,0x00,0x00,0x00,0x00,0x54,0x60,0x01,0x27,0x09,0x04,0xa9,0x39,0x8f,0x68,0x27,0x09,0x06,0x26,0x07,0xf0,0xd0,0x1d,0x01,0x00,0x57,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x27,0x09,0x04,0x6b,0xaa,0xc5,0x0e,0x27,0x09,0x06,0x26,0x04,0xa8,0x80,0x00,0x01,0x00,0x20,0x00,0x00,0x00,0x00,0x02,0x00,0xe0,0x01,0x27,0x09,0x04,0x80,0xc7,0xc5,0xd9,0x27,0x09,0x06,0x24,0x00,0x61,0x80,0x00,0x00,0x00,0xd0,0x00,0x00,0x00,0x00,0x00,0xb7,0x40,0x01,0x27,0x09,0x88,0x41,0x40,0x8a,0x2e,0x00,0xbb,0x1d,0x31,0xf2,0xc3,0x23,0xe2,0x64,0xe9,0xe6,0x41,0x72,0xc1,0xa7,0x4f,0x77,0x89,0x95,0x55,0xed,0x10,0x75,0x1c,0xd5,0x6e,0x86,0x40,0x5c,0xde,0x11,0x8d,0x02,0xdf,0xfe,0x55,0x5d,0x46,0x2c,0xcf,0x6a,0x85,0xb5,0x63,0x1c,0x12,0x35,0x0c,0x8d,0x5d,0xc4,0x09,0xba,0x10,0xb9,0x02,0x5d,0x0f,0x44,0x5c,0xf4,0x49,0xd9,0x2b,0x1c,0x00,0x0c,0x04,0x2d,0x20,0xc6,0x82,0x27,0x09,0x06,0x20,0x01,0x19,0xf0,0x64,0x00,0x81,0xc3,0x54,0x00,0x00,0xff,0xfe,0x18,0x1d,0x61,0x27,0x09,0x04,0x2e,0x65,0xa0,0xf9,0x27,0x09,0x06,0x2a,0x03,0xb0,0xc0,0x00,0x03,0x00,0xd0,0x00,0x00,0x00,0x00,0x00,0x6a,0x30,0x01,0x27,0x09,0x04,0x6b,0xbf,0x2e,0xd2,0x27,0x09,0x06,0x20,0x01,0x19,0xf0,0x68,0x00,0x83,0xa4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x27,0x09,0x04,0x2d,0x20,0xf6,0xb3,0x27,0x09,0x06,0x20,0x01,0x19,0xf0,0x58,0x00,0x8b,0xf8,0x54,0x00,0x00,0xff,0xfe,0x15,0xb3,0x9a,0x27,0x09,0x04,0x2d,0x20,0xf8,0x57,0x27,0x09,0x06,0x20,0x01,0x19,0xf0,0x70,0x00,0x9b,0xc9,0x54,0x00,0x00,0xff,0xfe,0x15,0xc4,0xf5,0x27,0x09,0x04,0x9f,0xcb,0x02,0x9a,0x27,0x09,0x06,0x26,0x04,0xa8,0x80,0x0c,0xad,0x00,0xd0,0x00,0x00,0x00,0x00,0x00,0x26,0x70,0x01,0x27,0x09,0x7e,0x19,0x87,0x6a,0xba,0x00,0x2a,0x6e,0x2b,0x23,0x18,0x93,0x0f,0x60,0xeb,0x09,0x7f,0x70,0xd0,0xf4,0xb0,0x28,0xb2,0xcd,0x6d,0x3d,0x0c,0x63,0xc0,0x14,0xb9,0x03,0x9f,0xf3,0x53,0x90,0xe4,0x11,0x81,0xf2,0x16,0xfb,0x2e,0x6f,0xa8,0xd9,0x5c,0x1e,0xe9,0x66,0x71,0x56,0x41,0x19,0x05,0xc3,0xdc,0xcf,0xea,0x78,0xd8,0xc6,0xdf,0xaf,0xba,0x68,0x81,0x70,0xb3,0xfa,0x00,0x01,0x04,0xc6,0xc7,0x61,0xdc,0x27,0x09,0x8a,0xcf,0x05,0x9f,0xe3,0x00,0x48,0x2f,0x6e,0xe5,0xdf,0xe9,0x02,0x31,0x9b,0x41,0x9d,0xe5,0xbd,0xc7,0x65,0x20,0x9c,0x0e,0xcd,0xa3,0x8c,0x4d,0x6e,0x4f,0xcf,0x0d,0x33,0x65,0x83,0x98,0xb4,0x52,0x7d,0xcd,0x22,0xf9,0x31,0x12,0xfb,0x9b,0xef,0xd0,0x2f,0xd7,0x8b,0xf7,0x26,0x1b,0x33,0x3f,0xc1,0x05,0xd1,0x92,0xa6,0x23,0xca,0x9e,0x50,0xfc,0x60,0xb3,0x74,0xa5,0x00,0x01,0x04,0xa2,0xf3,0x4d,0x6f,0x27,0x09};
+
Topology::Topology(const RuntimeEnvironment *renv) :
RR(renv),
_amRoot(false)
@@ -43,130 +51,110 @@ Topology::Topology(const RuntimeEnvironment *renv) :
const uint8_t *all = reinterpret_cast<const uint8_t *>(alls.data());
RR->node->dataStoreDelete("peers.save");
+ Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE> *deserializeBuf = new Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE>();
unsigned int ptr = 0;
while ((ptr + 4) < alls.size()) {
- // Each Peer serializes itself prefixed by a record length (not including the size of the length itself)
- unsigned int reclen = (unsigned int)all[ptr] & 0xff;
- reclen <<= 8;
- reclen |= (unsigned int)all[ptr + 1] & 0xff;
- reclen <<= 8;
- reclen |= (unsigned int)all[ptr + 2] & 0xff;
- reclen <<= 8;
- reclen |= (unsigned int)all[ptr + 3] & 0xff;
-
- if (((ptr + reclen) > alls.size())||(reclen > ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE))
- break;
-
try {
+ const unsigned int reclen = ( // each Peer serialized record is prefixed by a record length
+ ((((unsigned int)all[ptr]) & 0xff) << 24) |
+ ((((unsigned int)all[ptr + 1]) & 0xff) << 16) |
+ ((((unsigned int)all[ptr + 2]) & 0xff) << 8) |
+ (((unsigned int)all[ptr + 3]) & 0xff)
+ );
unsigned int pos = 0;
- SharedPtr<Peer> p(Peer::deserializeNew(RR->identity,Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE>(all + ptr,reclen),pos));
- if (pos != reclen)
- break;
+ deserializeBuf->copyFrom(all + ptr,reclen + 4);
+ SharedPtr<Peer> p(Peer::deserializeNew(RR->identity,*deserializeBuf,pos));
ptr += pos;
- if ((p)&&(p->address() != RR->identity.address())) {
- _peers[p->address()] = p;
- } else {
+ if (!p)
break; // stop if invalid records
- }
- } catch (std::exception &exc) {
- break;
+ if (p->address() != RR->identity.address())
+ _peers.set(p->address(),p);
} catch ( ... ) {
break; // stop if invalid records
}
}
+ delete deserializeBuf;
clean(RR->node->now());
-}
-
-Topology::~Topology()
-{
- Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE> pbuf;
- std::string all;
- Address *a = (Address *)0;
- SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
- Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
- while (i.next(a,p)) {
- if (std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()) {
- pbuf.clear();
- try {
- (*p)->serialize(pbuf);
- try {
- all.append((const char *)pbuf.data(),pbuf.size());
- } catch ( ... ) {
- return; // out of memory? just skip
- }
- } catch ( ... ) {} // peer too big? shouldn't happen, but it so skip
+ std::string dsWorld(RR->node->dataStoreGet("world"));
+ World cachedWorld;
+ if (dsWorld.length() > 0) {
+ try {
+ Buffer<ZT_WORLD_MAX_SERIALIZED_LENGTH> dswtmp(dsWorld.data(),(unsigned int)dsWorld.length());
+ cachedWorld.deserialize(dswtmp,0);
+ } catch ( ... ) {
+ cachedWorld = World(); // clear if cached world is invalid
}
}
-
- RR->node->dataStorePut("peers.save",all,true);
-}
-
-void Topology::setRootServers(const std::map< Identity,std::vector<InetAddress> > &sn)
-{
- Mutex::Lock _l(_lock);
-
- if (_roots == sn)
- return; // no change
-
- _roots = sn;
- _rootAddresses.clear();
- _rootPeers.clear();
- const uint64_t now = RR->node->now();
-
- for(std::map< Identity,std::vector<InetAddress> >::const_iterator i(sn.begin());i!=sn.end();++i) {
- if (i->first != RR->identity) { // do not add self as a peer
- SharedPtr<Peer> &p = _peers[i->first.address()];
- if (!p)
- p = SharedPtr<Peer>(new Peer(RR->identity,i->first));
- for(std::vector<InetAddress>::const_iterator j(i->second.begin());j!=i->second.end();++j)
- p->addPath(RemotePath(InetAddress(),*j,true),now);
- p->use(now);
- _rootPeers.push_back(p);
- }
- _rootAddresses.push_back(i->first.address());
+ World defaultWorld;
+ {
+ Buffer<ZT_DEFAULT_WORLD_LENGTH> wtmp(ZT_DEFAULT_WORLD,ZT_DEFAULT_WORLD_LENGTH);
+ defaultWorld.deserialize(wtmp,0); // throws on error, which would indicate a bad static variable up top
}
-
- std::sort(_rootAddresses.begin(),_rootAddresses.end());
-
- _amRoot = (_roots.find(RR->identity) != _roots.end());
+ if (cachedWorld.shouldBeReplacedBy(defaultWorld,false)) {
+ _setWorld(defaultWorld);
+ if (dsWorld.length() > 0)
+ RR->node->dataStoreDelete("world");
+ } else _setWorld(cachedWorld);
}
-void Topology::setRootServers(const Dictionary &sn)
+Topology::~Topology()
{
- std::map< Identity,std::vector<InetAddress> > m;
- for(Dictionary::const_iterator d(sn.begin());d!=sn.end();++d) {
- if ((d->first.length() == ZT_ADDRESS_LENGTH_HEX)&&(d->second.length() > 0)) {
- try {
- Dictionary snspec(d->second);
- std::vector<InetAddress> &a = m[Identity(snspec.get("id",""))];
- std::string udp(snspec.get("udp",std::string()));
- if (udp.length() > 0)
- a.push_back(InetAddress(udp));
- } catch ( ... ) {
- TRACE("root server list contained invalid entry for: %s",d->first.c_str());
+ Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE> *pbuf = 0;
+ try {
+ pbuf = new Buffer<ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE>();
+ std::string all;
+
+ Address *a = (Address *)0;
+ SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+ Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
+ while (i.next(a,p)) {
+ if (std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()) {
+ pbuf->clear();
+ try {
+ (*p)->serialize(*pbuf);
+ try {
+ all.append((const char *)pbuf->data(),pbuf->size());
+ } catch ( ... ) {
+ return; // out of memory? just skip
+ }
+ } catch ( ... ) {} // peer too big? shouldn't happen, but it so skip
}
}
+
+ RR->node->dataStorePut("peers.save",all,true);
+
+ delete pbuf;
+ } catch ( ... ) {
+ delete pbuf;
}
- this->setRootServers(m);
}
SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
{
- if (peer->address() == RR->identity.address()) {
- TRACE("BUG: addNewPeer() caught and ignored attempt to add peer for self");
- throw std::logic_error("cannot add peer for self");
+#ifdef ZT_TRACE
+ if ((!peer)||(peer->address() == RR->identity.address())) {
+ if (!peer)
+ fprintf(stderr,"FATAL BUG: addPeer() caught attempt to add NULL peer" ZT_EOL_S);
+ else fprintf(stderr,"FATAL BUG: addPeer() caught attempt to add peer for self" ZT_EOL_S);
+ abort();
+ }
+#endif
+
+ SharedPtr<Peer> np;
+ {
+ Mutex::Lock _l(_lock);
+ SharedPtr<Peer> &hp = _peers[peer->address()];
+ if (!hp)
+ hp = peer;
+ np = hp;
}
- const uint64_t now = RR->node->now();
- Mutex::Lock _l(_lock);
-
- SharedPtr<Peer> &p = _peers.set(peer->address(),peer);
- p->use(now);
- _saveIdentity(p->identity());
+ np->use(RR->node->now());
+ saveIdentity(np->identity());
- return p;
+ return np;
}
SharedPtr<Peer> Topology::getPeer(const Address &zta)
@@ -176,33 +164,58 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
return SharedPtr<Peer>();
}
- const uint64_t now = RR->node->now();
- Mutex::Lock _l(_lock);
+ {
+ Mutex::Lock _l(_lock);
+ const SharedPtr<Peer> *const ap = _peers.get(zta);
+ if (ap) {
+ (*ap)->use(RR->node->now());
+ return *ap;
+ }
+ }
- SharedPtr<Peer> &ap = _peers[zta];
+ try {
+ Identity id(_getIdentity(zta));
+ if (id) {
+ SharedPtr<Peer> np(new Peer(RR->identity,id));
+ {
+ Mutex::Lock _l(_lock);
+ SharedPtr<Peer> &ap = _peers[zta];
+ if (!ap)
+ ap.swap(np);
+ ap->use(RR->node->now());
+ return ap;
+ }
+ }
+ } catch ( ... ) {
+ fprintf(stderr,"EXCEPTION in getPeer() part 2\n");
+ abort();
+ } // invalid identity on disk?
+
+ return SharedPtr<Peer>();
+}
- if (ap) {
- ap->use(now);
- return ap;
+Identity Topology::getIdentity(const Address &zta)
+{
+ {
+ Mutex::Lock _l(_lock);
+ const SharedPtr<Peer> *const ap = _peers.get(zta);
+ if (ap)
+ return (*ap)->identity();
}
+ return _getIdentity(zta);
+}
- Identity id(_getIdentity(zta));
+void Topology::saveIdentity(const Identity &id)
+{
if (id) {
- try {
- ap = SharedPtr<Peer>(new Peer(RR->identity,id));
- ap->use(now);
- return ap;
- } catch ( ... ) {} // invalid identity?
+ char p[128];
+ Utils::snprintf(p,sizeof(p),"iddb.d/%.10llx",(unsigned long long)id.address().toInt());
+ RR->node->dataStorePut(p,id.toString(false),false);
}
-
- _peers.erase(zta);
-
- return SharedPtr<Peer>();
}
SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
{
- SharedPtr<Peer> bestRoot;
const uint64_t now = RR->node->now();
Mutex::Lock _l(_lock);
@@ -212,97 +225,92 @@ SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCou
* causes packets searching for a route to pretty much literally
* circumnavigate the globe rather than bouncing between just two. */
- if (_rootAddresses.size() > 1) { // gotta be one other than me for this to work
- std::vector<Address>::const_iterator sna(std::find(_rootAddresses.begin(),_rootAddresses.end(),RR->identity.address()));
- if (sna != _rootAddresses.end()) { // sanity check -- _amRoot should've been false in this case
- for(;;) {
- if (++sna == _rootAddresses.end())
- sna = _rootAddresses.begin(); // wrap around at end
- if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
- SharedPtr<Peer> *p = _peers.get(*sna);
- if ((p)&&((*p)->hasActiveDirectPath(now))) {
- bestRoot = *p;
- break;
- }
+ for(unsigned long p=0;p<_rootAddresses.size();++p) {
+ if (_rootAddresses[p] == RR->identity.address()) {
+ for(unsigned long q=1;q<_rootAddresses.size();++q) {
+ const SharedPtr<Peer> *const nextsn = _peers.get(_rootAddresses[(p + q) % _rootAddresses.size()]);
+ if ((nextsn)&&((*nextsn)->hasActiveDirectPath(now))) {
+ (*nextsn)->use(now);
+ return *nextsn;
}
}
+ break;
}
}
+
} else {
/* If I am not a root server, the best root server is the active one with
- * the lowest latency. */
+ * the lowest quality score. (lower == better) */
- unsigned int l,bestLatency = 65536;
- uint64_t lds,ldr;
+ unsigned int bestQualityOverall = ~((unsigned int)0);
+ unsigned int bestQualityNotAvoid = ~((unsigned int)0);
+ const SharedPtr<Peer> *bestOverall = (const SharedPtr<Peer> *)0;
+ const SharedPtr<Peer> *bestNotAvoid = (const SharedPtr<Peer> *)0;
- // First look for a best root by comparing latencies, but exclude
- // root servers that have not responded to direct messages in order to
- // try to exclude any that are dead or unreachable.
- for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootPeers.begin());sn!=_rootPeers.end();) {
- // Skip explicitly avoided relays
+ for(std::vector< SharedPtr<Peer> >::const_iterator r(_rootPeers.begin());r!=_rootPeers.end();++r) {
+ bool avoiding = false;
for(unsigned int i=0;i<avoidCount;++i) {
- if (avoid[i] == (*sn)->address())
- goto keep_searching_for_roots;
- }
-
- // Skip possibly comatose or unreachable relays
- lds = (*sn)->lastDirectSend();
- ldr = (*sn)->lastDirectReceive();
- if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
- goto keep_searching_for_roots;
-
- if ((*sn)->hasActiveDirectPath(now)) {
- l = (*sn)->latency();
- if (bestRoot) {
- if ((l)&&(l < bestLatency)) {
- bestLatency = l;
- bestRoot = *sn;
- }
- } else {
- if (l)
- bestLatency = l;
- bestRoot = *sn;
+ if (avoid[i] == (*r)->address()) {
+ avoiding = true;
+ break;
}
}
+ const unsigned int q = (*r)->relayQuality(now);
+ if (q <= bestQualityOverall) {
+ bestQualityOverall = q;
+ bestOverall = &(*r);
+ }
+ if ((!avoiding)&&(q <= bestQualityNotAvoid)) {
+ bestQualityNotAvoid = q;
+ bestNotAvoid = &(*r);
+ }
+ }
-keep_searching_for_roots:
- ++sn;
+ if (bestNotAvoid) {
+ (*bestNotAvoid)->use(now);
+ return *bestNotAvoid;
+ } else if ((!strictAvoid)&&(bestOverall)) {
+ (*bestOverall)->use(now);
+ return *bestOverall;
}
- if (bestRoot) {
- bestRoot->use(now);
- return bestRoot;
- } else if (strictAvoid)
- return SharedPtr<Peer>();
-
- // If we have nothing from above, just pick one without avoidance criteria.
- for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootPeers.begin();sn!=_rootPeers.end();++sn) {
- if ((*sn)->hasActiveDirectPath(now)) {
- unsigned int l = (*sn)->latency();
- if (bestRoot) {
- if ((l)&&(l < bestLatency)) {
- bestLatency = l;
- bestRoot = *sn;
- }
- } else {
- if (l)
- bestLatency = l;
- bestRoot = *sn;
- }
+ }
+
+ return SharedPtr<Peer>();
+}
+
+bool Topology::isUpstream(const Identity &id) const
+{
+ if (isRoot(id))
+ return true;
+ std::vector< SharedPtr<Network> > nws(RR->node->allNetworks());
+ for(std::vector< SharedPtr<Network> >::const_iterator nw(nws.begin());nw!=nws.end();++nw) {
+ SharedPtr<NetworkConfig> nc((*nw)->config2());
+ if (nc) {
+ for(std::vector< std::pair<Address,InetAddress> >::const_iterator r(nc->relays().begin());r!=nc->relays().end();++r) {
+ if (r->first == id.address())
+ return true;
}
}
}
-
- if (bestRoot)
- bestRoot->use(now);
- return bestRoot;
+ return false;
}
-bool Topology::isRoot(const Identity &id) const
- throw()
+bool Topology::worldUpdateIfValid(const World &newWorld)
{
Mutex::Lock _l(_lock);
- return (_roots.count(id) != 0);
+ if (_world.shouldBeReplacedBy(newWorld,true)) {
+ _setWorld(newWorld);
+ try {
+ Buffer<ZT_WORLD_MAX_SERIALIZED_LENGTH> dswtmp;
+ newWorld.serialize(dswtmp,false);
+ RR->node->dataStorePut("world",dswtmp.data(),dswtmp.size(),false);
+ } catch ( ... ) {
+ RR->node->dataStoreDelete("world");
+ }
+ return true;
+ }
+ return false;
}
void Topology::clean(uint64_t now)
@@ -320,24 +328,6 @@ void Topology::clean(uint64_t now)
}
}
-bool Topology::authenticateRootTopology(const Dictionary &rt)
-{
- try {
- std::string signer(rt.signingIdentity());
- if (!signer.length())
- return false;
- Identity signerId(signer);
- std::map< Address,Identity >::const_iterator authority(ZT_DEFAULTS.rootTopologyAuthorities.find(signerId.address()));
- if (authority == ZT_DEFAULTS.rootTopologyAuthorities.end())
- return false;
- if (signerId != authority->second)
- return false;
- return rt.verify(authority->second);
- } catch ( ... ) {
- return false;
- }
-}
-
Identity Topology::_getIdentity(const Address &zta)
{
char p[128];
@@ -351,12 +341,27 @@ Identity Topology::_getIdentity(const Address &zta)
return Identity();
}
-void Topology::_saveIdentity(const Identity &id)
+void Topology::_setWorld(const World &newWorld)
{
- if (id) {
- char p[128];
- Utils::snprintf(p,sizeof(p),"iddb.d/%.10llx",(unsigned long long)id.address().toInt());
- RR->node->dataStorePut(p,id.toString(false),false);
+ // assumed _lock is locked (or in constructor)
+ _world = newWorld;
+ _amRoot = false;
+ _rootAddresses.clear();
+ _rootPeers.clear();
+ for(std::vector<World::Root>::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) {
+ _rootAddresses.push_back(r->identity.address());
+ if (r->identity.address() == RR->identity.address()) {
+ _amRoot = true;
+ } else {
+ SharedPtr<Peer> *rp = _peers.get(r->identity.address());
+ if (rp) {
+ _rootPeers.push_back(*rp);
+ } else {
+ SharedPtr<Peer> newrp(new Peer(RR->identity,r->identity));
+ _peers.set(r->identity.address(),newrp);
+ _rootPeers.push_back(newrp);
+ }
+ }
}
}
diff --git a/node/Topology.hpp b/node/Topology.hpp
index 4df545e1..07daa276 100644
--- a/node/Topology.hpp
+++ b/node/Topology.hpp
@@ -31,10 +31,10 @@
#include <stdio.h>
#include <string.h>
-#include <map>
#include <vector>
#include <stdexcept>
#include <algorithm>
+#include <utility>
#include "Constants.hpp"
@@ -43,8 +43,8 @@
#include "Peer.hpp"
#include "Mutex.hpp"
#include "InetAddress.hpp"
-#include "Dictionary.hpp"
#include "Hashtable.hpp"
+#include "World.hpp"
namespace ZeroTier {
@@ -60,21 +60,6 @@ public:
~Topology();
/**
- * @param sn Root server identities and addresses
- */
- void setRootServers(const std::map< Identity,std::vector<InetAddress> > &sn);
-
- /**
- * Set up root servers for this network
- *
- * This performs no signature verification of any kind. The caller must
- * check the signature of the root topology dictionary first.
- *
- * @param sn 'rootservers' key from root-topology Dictionary (deserialized as Dictionary)
- */
- void setRootServers(const Dictionary &sn);
-
- /**
* Add a peer to database
*
* This will not replace existing peers. In that case the existing peer
@@ -94,23 +79,48 @@ public:
SharedPtr<Peer> getPeer(const Address &zta);
/**
- * @return Vector of peers that are root servers
+ * Get a peer only if it is presently in memory (no disk cache)
+ *
+ * This also does not update the lastUsed() time for peers, which means
+ * that it won't prevent them from falling out of RAM. This is currently
+ * used in the Cluster code to update peer info without forcing all peers
+ * across the entire cluster to remain in memory cache.
+ *
+ * @param zta ZeroTier address
*/
- inline std::vector< SharedPtr<Peer> > rootPeers() const
+ inline SharedPtr<Peer> getPeerNoCache(const Address &zta)
{
Mutex::Lock _l(_lock);
- return _rootPeers;
+ const SharedPtr<Peer> *const ap = _peers.get(zta);
+ if (ap)
+ return *ap;
+ return SharedPtr<Peer>();
}
/**
+ * Get the identity of a peer
+ *
+ * @param zta ZeroTier address of peer
+ * @return Identity or NULL Identity if not found
+ */
+ Identity getIdentity(const Address &zta);
+
+ /**
+ * Cache an identity
+ *
+ * This is done automatically on addPeer(), and so is only useful for
+ * cluster identity replication.
+ *
+ * @param id Identity to cache
+ */
+ void saveIdentity(const Identity &id);
+
+ /**
* Get the current favorite root server
*
* @return Root server with lowest latency or NULL if none
*/
- inline SharedPtr<Peer> getBestRoot()
- {
- return getBestRoot((const Address *)0,0,false);
- }
+ inline SharedPtr<Peer> getBestRoot() { return getBestRoot((const Address *)0,0,false); }
/**
* Get the best root server, avoiding root servers listed in an array
@@ -128,10 +138,19 @@ public:
/**
* @param id Identity to check
- * @return True if this is a designated root server
+ * @return True if this is a designated root server in this world
*/
- bool isRoot(const Identity &id) const
- throw();
+ inline bool isRoot(const Identity &id) const
+ {
+ Mutex::Lock _l(_lock);
+ return (std::find(_rootAddresses.begin(),_rootAddresses.end(),id.address()) != _rootAddresses.end());
+ }
+
+ /**
+ * @param id Identity to check
+ * @return True if this is a root server or a network preferred relay from one of our networks
+ */
+ bool isUpstream(const Identity &id) const;
/**
* @return Vector of root server addresses
@@ -143,11 +162,61 @@ public:
}
/**
+ * @return Current World (copy)
+ */
+ inline World world() const
+ {
+ Mutex::Lock _l(_lock);
+ return _world;
+ }
+
+ /**
+ * @return Current world ID
+ */
+ inline uint64_t worldId() const
+ {
+ return _world.id(); // safe to read without lock, and used from within eachPeer() so don't lock
+ }
+
+ /**
+ * @return Current world timestamp
+ */
+ inline uint64_t worldTimestamp() const
+ {
+ return _world.timestamp(); // safe to read without lock, and used from within eachPeer() so don't lock
+ }
+
+ /**
+ * Validate new world and update if newer and signature is okay
+ *
+ * @param newWorld Potential new world definition revision
+ * @return True if an update actually occurred
+ */
+ bool worldUpdateIfValid(const World &newWorld);
+
+ /**
* Clean and flush database
*/
void clean(uint64_t now);
/**
+ * @param now Current time
+ * @return Number of peers with active direct paths
+ */
+ inline unsigned long countActive(uint64_t now) const
+ {
+ unsigned long cnt = 0;
+ Mutex::Lock _l(_lock);
+ Hashtable< Address,SharedPtr<Peer> >::Iterator i(const_cast<Topology *>(this)->_peers);
+ Address *a = (Address *)0;
+ SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+ while (i.next(a,p)) {
+ cnt += (unsigned long)((*p)->hasActiveDirectPath(now));
+ }
+ return cnt;
+ }
+
+ /**
* Apply a function or function object to all peers
*
* Note: explicitly template this by reference if you want the object
@@ -167,12 +236,19 @@ public:
Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
Address *a = (Address *)0;
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
- while (i.next(a,p))
- f(*this,*p);
+ while (i.next(a,p)) {
+#ifdef ZT_TRACE
+ if (!(*p)) {
+ fprintf(stderr,"FATAL BUG: eachPeer() caught NULL peer for %s -- peer pointers in Topology should NEVER be NULL" ZT_EOL_S,a->toString().c_str());
+ abort();
+ }
+#endif
+ f(*this,*((const SharedPtr<Peer> *)p));
+ }
}
/**
- * @return All currently active peers by address
+ * @return All currently active peers by address (unsorted)
*/
inline std::vector< std::pair< Address,SharedPtr<Peer> > > allPeers() const
{
@@ -181,27 +257,23 @@ public:
}
/**
- * Validate a root topology dictionary against the identities specified in Defaults
- *
- * @param rt Root topology dictionary
- * @return True if dictionary signature is valid
+ * @return True if I am a root server in the current World
*/
- static bool authenticateRootTopology(const Dictionary &rt);
+ inline bool amRoot() const throw() { return _amRoot; }
private:
Identity _getIdentity(const Address &zta);
- void _saveIdentity(const Identity &id);
+ void _setWorld(const World &newWorld);
- const RuntimeEnvironment *RR;
+ const RuntimeEnvironment *const RR;
+ World _world;
Hashtable< Address,SharedPtr<Peer> > _peers;
- std::map< Identity,std::vector<InetAddress> > _roots;
std::vector< Address > _rootAddresses;
std::vector< SharedPtr<Peer> > _rootPeers;
+ bool _amRoot;
Mutex _lock;
-
- bool _amRoot;
};
} // namespace ZeroTier
diff --git a/node/Utils.cpp b/node/Utils.cpp
index 658c397d..10146e6c 100644
--- a/node/Utils.cpp
+++ b/node/Utils.cpp
@@ -168,20 +168,20 @@ void Utils::getSecureRandom(void *buf,unsigned int bytes)
fprintf(stderr,"FATAL ERROR: Utils::getSecureRandom() CryptGenRandom failed!\r\n");
exit(1);
}
- s20.init(s20key,256,s20key,8);
+ s20.init(s20key,256,s20key);
}
if (!CryptGenRandom(cryptProvider,(DWORD)bytes,(BYTE *)buf)) {
fprintf(stderr,"FATAL ERROR: Utils::getSecureRandom() CryptGenRandom failed!\r\n");
exit(1);
}
- s20.encrypt(buf,buf,bytes);
+ s20.encrypt12(buf,buf,bytes);
#else // not __WINDOWS__
#ifdef __UNIX_LIKE__
- static char randomBuf[65536];
+ static char randomBuf[131072];
static unsigned int randomPtr = sizeof(randomBuf);
static int devURandomFd = -1;
static Mutex globalLock;
@@ -191,7 +191,7 @@ void Utils::getSecureRandom(void *buf,unsigned int bytes)
if (devURandomFd <= 0) {
devURandomFd = ::open("/dev/urandom",O_RDONLY);
if (devURandomFd <= 0) {
- fprintf(stderr,"FATAL ERROR: Utils::getSecureRandom() unable to open /dev/urandom\r\n");
+ fprintf(stderr,"FATAL ERROR: Utils::getSecureRandom() unable to open /dev/urandom\n");
exit(1);
return;
}
@@ -199,10 +199,16 @@ void Utils::getSecureRandom(void *buf,unsigned int bytes)
for(unsigned int i=0;i<bytes;++i) {
if (randomPtr >= sizeof(randomBuf)) {
- if ((int)::read(devURandomFd,randomBuf,sizeof(randomBuf)) != (int)sizeof(randomBuf)) {
- fprintf(stderr,"FATAL ERROR: Utils::getSecureRandom() unable to read from /dev/urandom\r\n");
- exit(1);
- return;
+ for(;;) {
+ if ((int)::read(devURandomFd,randomBuf,sizeof(randomBuf)) != (int)sizeof(randomBuf)) {
+ ::close(devURandomFd);
+ devURandomFd = ::open("/dev/urandom",O_RDONLY);
+ if (devURandomFd <= 0) {
+ fprintf(stderr,"FATAL ERROR: Utils::getSecureRandom() unable to open /dev/urandom\n");
+ exit(1);
+ return;
+ }
+ } else break;
}
randomPtr = 0;
}
diff --git a/node/World.hpp b/node/World.hpp
new file mode 100644
index 00000000..c6d20d84
--- /dev/null
+++ b/node/World.hpp
@@ -0,0 +1,241 @@
+/*
+ * ZeroTier One - Network Virtualization Everywhere
+ * Copyright (C) 2011-2015 ZeroTier, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * --
+ *
+ * ZeroTier may be used and distributed under the terms of the GPLv3, which
+ * are available at: http://www.gnu.org/licenses/gpl-3.0.html
+ *
+ * If you would like to embed ZeroTier into a commercial application or
+ * redistribute it in a modified binary form, please contact ZeroTier Networks
+ * LLC. Start here: http://www.zerotier.com/
+ */
+
+#ifndef ZT_WORLD_HPP
+#define ZT_WORLD_HPP
+
+#include <vector>
+#include <string>
+
+#include "Constants.hpp"
+#include "InetAddress.hpp"
+#include "Identity.hpp"
+#include "Buffer.hpp"
+#include "C25519.hpp"
+
+/**
+ * Maximum number of roots (sanity limit, okay to increase)
+ *
+ * A given root can (through multi-homing) be distributed across any number of
+ * physical endpoints, but having more than one is good to permit total failure
+ * of one root or its withdrawal due to compromise without taking the whole net
+ * down.
+ */
+#define ZT_WORLD_MAX_ROOTS 4
+
+/**
+ * Maximum number of stable endpoints per root (sanity limit, okay to increase)
+ */
+#define ZT_WORLD_MAX_STABLE_ENDPOINTS_PER_ROOT 32
+
+/**
+ * The (more than) maximum length of a serialized World
+ */
+#define ZT_WORLD_MAX_SERIALIZED_LENGTH (((1024 + (32 * ZT_WORLD_MAX_STABLE_ENDPOINTS_PER_ROOT)) * ZT_WORLD_MAX_ROOTS) + ZT_C25519_PUBLIC_KEY_LEN + ZT_C25519_SIGNATURE_LEN + 128)
+
+/**
+ * World ID indicating null / empty World object
+ */
+#define ZT_WORLD_ID_NULL 0
+
+/**
+ * World ID for a test network with ephemeral or temporary roots
+ */
+#define ZT_WORLD_ID_TESTNET 1
+
+/**
+ * World ID for Earth
+ *
+ * This is the ID for the ZeroTier World used on planet Earth. It is unrelated
+ * to the public network 8056c2e21c000001 of the same name. It was chosen
+ * from Earth's approximate distance from the sun in kilometers.
+ */
+#define ZT_WORLD_ID_EARTH 149604618
+
+/**
+ * World ID for Mars -- for future use by SpaceX or others
+ */
+#define ZT_WORLD_ID_MARS 227883110
+
+namespace ZeroTier {
+
+/**
+ * A world definition (formerly known as a root topology)
+ *
+ * Think of a World as a single data center. Within this data center a set
+ * of distributed fault tolerant root servers provide stable anchor points
+ * for a peer to peer network that provides VLAN service. Updates to a world
+ * definition can be published by signing them with the previous revision's
+ * signing key, and should be very infrequent.
+ *
+ * The maximum data center size is approximately 2.5 cubic light seconds,
+ * since many protocols have issues with >5s RTT latencies.
+ *
+ * ZeroTier operates a World for Earth capable of encompassing the planet, its
+ * orbits, the Moon (about 1.3 light seconds), and nearby Lagrange points. A
+ * world ID for Mars and nearby space is defined but not yet used, and a test
+ * world ID is provided for testing purposes.
+ *
+ * If you absolutely must run your own "unofficial" ZeroTier network, please
+ * define your world IDs above 0xffffffff (4294967295). Code to make a World
+ * is in mkworld.cpp in the parent directory and must be edited to change
+ * settings.
+ */
+class World
+{
+public:
+ struct Root
+ {
+ Identity identity;
+ std::vector<InetAddress> stableEndpoints;
+
+ inline bool operator==(const Root &r) const throw() { return ((identity == r.identity)&&(stableEndpoints == r.stableEndpoints)); }
+ inline bool operator!=(const Root &r) const throw() { return (!(*this == r)); }
+ inline bool operator<(const Root &r) const throw() { return (identity < r.identity); } // for sorting
+ };
+
+ /**
+ * Construct an empty / null World
+ */
+ World() :
+ _id(ZT_WORLD_ID_NULL),
+ _ts(0) {}
+
+ /**
+ * @return Root servers for this world and their stable endpoints
+ */
+ inline const std::vector<World::Root> &roots() const throw() { return _roots; }
+
+ /**
+ * @return World unique identifier
+ */
+ inline uint64_t id() const throw() { return _id; }
+
+ /**
+ * @return World definition timestamp
+ */
+ inline uint64_t timestamp() const throw() { return _ts; }
+
+ /**
+ * Check whether a world update should replace this one
+ *
+ * A new world update is valid if it is for the same world ID, is newer,
+ * and is signed by the current world's signing key. If this world object
+ * is null, it can always be updated.
+ *
+ * @param update Candidate update
+ * @param fullSignatureCheck Perform full cryptographic signature check (true == yes, false == skip)
+ * @return True if update is newer than current and is properly signed
+ */
+ inline bool shouldBeReplacedBy(const World &update,bool fullSignatureCheck)
+ {
+ if (_id == ZT_WORLD_ID_NULL)
+ return true;
+ if ((_id == update._id)&&(_ts < update._ts)) {
+ if (fullSignatureCheck) {
+ Buffer<ZT_WORLD_MAX_SERIALIZED_LENGTH> tmp;
+ update.serialize(tmp,true);
+ return C25519::verify(_updateSigningKey,tmp.data(),tmp.size(),update._signature);
+ } else return true;
+ }
+ return false;
+ }
+
+ /**
+ * @return True if this World is non-empty
+ */
+ inline operator bool() const throw() { return (_id != ZT_WORLD_ID_NULL); }
+
+ template<unsigned int C>
+ inline void serialize(Buffer<C> &b,bool forSign = false) const
+ {
+ if (forSign)
+ b.append((uint64_t)0x7f7f7f7f7f7f7f7fULL);
+ b.append((uint8_t)0x01); // version -- only one valid value for now
+ b.append((uint64_t)_id);
+ b.append((uint64_t)_ts);
+ b.append(_updateSigningKey.data,ZT_C25519_PUBLIC_KEY_LEN);
+ if (!forSign)
+ b.append(_signature.data,ZT_C25519_SIGNATURE_LEN);
+ b.append((uint8_t)_roots.size());
+ for(std::vector<Root>::const_iterator r(_roots.begin());r!=_roots.end();++r) {
+ r->identity.serialize(b);
+ b.append((uint8_t)r->stableEndpoints.size());
+ for(std::vector<InetAddress>::const_iterator ep(r->stableEndpoints.begin());ep!=r->stableEndpoints.end();++ep)
+ ep->serialize(b);
+ }
+ if (forSign)
+ b.append((uint64_t)0xf7f7f7f7f7f7f7f7ULL);
+ }
+
+ template<unsigned int C>
+ inline unsigned int deserialize(const Buffer<C> &b,unsigned int startAt = 0)
+ {
+ unsigned int p = startAt;
+
+ _roots.clear();
+
+ if (b[p++] != 0x01)
+ throw std::invalid_argument("invalid World serialized version");
+
+ _id = b.template at<uint64_t>(p); p += 8;
+ _ts = b.template at<uint64_t>(p); p += 8;
+ memcpy(_updateSigningKey.data,b.field(p,ZT_C25519_PUBLIC_KEY_LEN),ZT_C25519_PUBLIC_KEY_LEN); p += ZT_C25519_PUBLIC_KEY_LEN;
+ memcpy(_signature.data,b.field(p,ZT_C25519_SIGNATURE_LEN),ZT_C25519_SIGNATURE_LEN); p += ZT_C25519_SIGNATURE_LEN;
+ unsigned int numRoots = b[p++];
+ if (numRoots > ZT_WORLD_MAX_ROOTS)
+ throw std::invalid_argument("too many roots in World");
+ for(unsigned int k=0;k<numRoots;++k) {
+ _roots.push_back(Root());
+ Root &r = _roots.back();
+ p += r.identity.deserialize(b,p);
+ unsigned int numStableEndpoints = b[p++];
+ if (numStableEndpoints > ZT_WORLD_MAX_STABLE_ENDPOINTS_PER_ROOT)
+ throw std::invalid_argument("too many stable endpoints in World/Root");
+ for(unsigned int kk=0;kk<numStableEndpoints;++kk) {
+ r.stableEndpoints.push_back(InetAddress());
+ p += r.stableEndpoints.back().deserialize(b,p);
+ }
+ }
+
+ return (p - startAt);
+ }
+
+ inline bool operator==(const World &w) const throw() { return ((_id == w._id)&&(_ts == w._ts)&&(_updateSigningKey == w._updateSigningKey)&&(_signature == w._signature)&&(_roots == w._roots)); }
+ inline bool operator!=(const World &w) const throw() { return (!(*this == w)); }
+
+protected:
+ uint64_t _id;
+ uint64_t _ts;
+ C25519::Public _updateSigningKey;
+ C25519::Signature _signature;
+ std::vector<Root> _roots;
+};
+
+} // namespace ZeroTier
+
+#endif