From 2f20258807f8665bc3f9c527106e61761e01ecc3 Mon Sep 17 00:00:00 2001
From: Adam Ierymenko <adam.ierymenko@gmail.com>
Date: Thu, 6 Jul 2017 10:25:36 -0700
Subject: .

---
 include/ZeroTierOne.h       |  34 +++++-----
 node/Constants.hpp          |  12 ++--
 node/Identity.hpp           |   5 +-
 node/IncomingPacket.cpp     |  12 ----
 node/Network.cpp            |  10 ---
 node/Node.cpp               | 151 +++++++++++---------------------------------
 node/Node.hpp               |   2 +-
 node/Path.hpp               |  13 ----
 node/Peer.cpp               | 113 ++++++++++++++++++++-------------
 node/Peer.hpp               |  16 ++++-
 node/RuntimeEnvironment.hpp |  19 ++++--
 node/Switch.cpp             |  93 ++-------------------------
 node/Topology.cpp           |  59 +++++------------
 node/Topology.hpp           |  27 ++------
 osdep/Binder.hpp            |   4 +-
 service/OneService.cpp      |  94 +++++++++++++--------------
 16 files changed, 240 insertions(+), 424 deletions(-)

diff --git a/include/ZeroTierOne.h b/include/ZeroTierOne.h
index 40cae3b4..133ae340 100644
--- a/include/ZeroTierOne.h
+++ b/include/ZeroTierOne.h
@@ -1072,9 +1072,9 @@ typedef struct
  * identity of a node and its address, the identity (public and secret)
  * must be saved at a minimum.
  *
- * The reference service implementation currently persists identity,
- * peer identities (for a period of time), planet, moons, and network
- * configurations. Other state is treated as ephemeral.
+ * State objects actually have two IDs (uint64_t[2]). If only one is
+ * listed the second ([1]) should be zero and is ignored in storage
+ * and replication.
  *
  * All state objects should be replicated in cluster mode. The reference
  * clustering implementation uses a rumor mill algorithm in which state
@@ -1118,22 +1118,25 @@ enum ZT_StateObjectType
 	ZT_STATE_OBJECT_PEER_STATE = 3,
 
 	/**
-	 * The identity of a known peer
+	 * Network configuration
 	 *
 	 * Object ID: peer address
-	 * Canonical path: <HOME>/iddb.d/<ADDRESS> (10-digit hex address)
-	 * Persistence: recommended, can be purged at any time, recommended ttl 30-60 days
+	 * Canonical path: <HOME>/networks.d/<NETWORKID>.conf (16-digit hex ID)
+	 * Persistence: required if network memberships should persist
 	 */
-	ZT_STATE_OBJECT_PEER_IDENTITY = 4,
+	ZT_STATE_OBJECT_NETWORK_CONFIG = 4,
 
 	/**
-	 * Network configuration
+	 * Network membership (network X peer intersection)
 	 *
-	 * Object ID: peer address
-	 * Canonical path: <HOME>/networks.d/<NETWORKID>.conf (16-digit hex ID)
-	 * Persistence: required if network memberships should persist
+	 * If these are persisted they must be restored after peer states and
+	 * network configs. Otherwise they are ignored.
+	 *
+	 * Object ID: [0] network ID, [1] peer address
+	 * Canonical path: <HOME>/networks.d/<NETWORKID>/members.d/<ADDRESS>
+	 * Persistence: optional (not usually needed)
 	 */
-	ZT_STATE_OBJECT_NETWORK_CONFIG = 5,
+	ZT_STATE_OBJECT_NETWORK_MEMBERSHIP = 5,
 
 	/**
 	 * The planet (there is only one per... well... planet!)
@@ -1450,7 +1453,8 @@ void ZT_Node_delete(ZT_Node *node);
  *
  * Unless clustering is being implemented this function doesn't need to be
  * used after startup. It could be called in response to filesystem changes
- * to allow some degree of live configurability by filesystem observation.
+ * to allow some degree of live configurability by filesystem observation
+ * but this kind of thing is entirely optional.
  *
  * The return value of this function indicates whether the update was accepted
  * as new. A return value of ZT_RESULT_OK indicates that the node gleaned new
@@ -1468,7 +1472,7 @@ void ZT_Node_delete(ZT_Node *node);
  * @param node Node instance
  * @param tptr Thread pointer to pass to functions/callbacks resulting from this call
  * @param type State object type
- * @param id State object ID
+ * @param id State object ID (if object type has only one ID, second should be zero)
  * @param data State object data
  * @param len Length of state object data in bytes
  * @return ZT_RESULT_OK if object was accepted or ZT_RESULT_OK_IGNORED if non-informative, error if object was invalid
@@ -1477,7 +1481,7 @@ enum ZT_ResultCode ZT_Node_processStateUpdate(
 	ZT_Node *node,
 	void *tptr,
 	ZT_StateObjectType type,
-	uint64_t id,
+	const uint64_t id[2],
 	const void *data,
 	unsigned int len);
 
diff --git a/node/Constants.hpp b/node/Constants.hpp
index 88549937..274b9564 100644
--- a/node/Constants.hpp
+++ b/node/Constants.hpp
@@ -216,7 +216,12 @@
 /**
  * How often Topology::clean() and Network::clean() and similar are called, in ms
  */
-#define ZT_HOUSEKEEPING_PERIOD 10000
+#define ZT_HOUSEKEEPING_PERIOD 60000
+
+/**
+ * How often in ms to write peer state to storage and/or cluster (approximate)
+ */
+#define ZT_PEER_STATE_WRITE_PERIOD 10000
 
 /**
  * How long to remember peer records in RAM if they haven't been used
@@ -322,11 +327,6 @@
  */
 #define ZT_PEER_PATH_EXPIRATION ((ZT_PEER_PING_PERIOD * 4) + 3000)
 
-/**
- * Send a full HELLO every this often (ms)
- */
-#define ZT_PEER_SEND_FULL_HELLO_EVERY (ZT_PEER_PING_PERIOD * 2)
-
 /**
  * How often to retry expired paths that we're still remembering
  */
diff --git a/node/Identity.hpp b/node/Identity.hpp
index b1c7d6f4..79e17f4d 100644
--- a/node/Identity.hpp
+++ b/node/Identity.hpp
@@ -91,7 +91,10 @@ public:
 
 	~Identity()
 	{
-		delete _privateKey;
+		if (_privateKey) {
+			Utils::burn(_privateKey,sizeof(C25519::Private));
+			delete _privateKey;
+		}
 	}
 
 	inline Identity &operator=(const Identity &id)
diff --git a/node/IncomingPacket.cpp b/node/IncomingPacket.cpp
index 4d99e87d..0548387b 100644
--- a/node/IncomingPacket.cpp
+++ b/node/IncomingPacket.cpp
@@ -585,12 +585,6 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,void *tPtr,const Shar
 			} else {
 				// Request unknown WHOIS from upstream from us (if we have one)
 				RR->sw->requestWhois(tPtr,addr);
-#ifdef ZT_ENABLE_CLUSTER
-				// Distribute WHOIS queries across a cluster if we do not know the ID.
-				// This may result in duplicate OKs to the querying peer, which is fine.
-				if (RR->cluster)
-					RR->cluster->sendDistributedQuery(*this);
-#endif
 			}
 		}
 
@@ -1055,12 +1049,6 @@ bool IncomingPacket::_doMULTICAST_GATHER(const RuntimeEnvironment *RR,void *tPtr
 				outp.armor(peer->key(),true,_path->nextOutgoingCounter());
 				_path->send(RR,tPtr,outp.data(),outp.size(),RR->node->now());
 			}
-
-			// If we are a member of a cluster, distribute this GATHER across it
-#ifdef ZT_ENABLE_CLUSTER
-			if ((RR->cluster)&&(gatheredLocally < gatherLimit))
-				RR->cluster->sendDistributedQuery(*this);
-#endif
 		}
 
 		peer->received(tPtr,_path,hops(),packetId(),Packet::VERB_MULTICAST_GATHER,0,Packet::VERB_NOP,trustEstablished);
diff --git a/node/Network.cpp b/node/Network.cpp
index 0a16ded8..bccc0397 100644
--- a/node/Network.cpp
+++ b/node/Network.cpp
@@ -1067,11 +1067,6 @@ uint64_t Network::handleConfigChunk(void *tPtr,const uint64_t packetId,const Add
 				return 0;
 			}
 
-#ifdef ZT_ENABLE_CLUSTER
-			if ((source)&&(RR->cluster))
-				RR->cluster->broadcastNetworkConfigChunk(chunk.field(start,chunk.size() - start),chunk.size() - start);
-#endif
-
 			// New properly verified chunks can be flooded "virally" through the network
 			if (fastPropagate) {
 				Address *a = (Address *)0;
@@ -1099,11 +1094,6 @@ uint64_t Network::handleConfigChunk(void *tPtr,const uint64_t packetId,const Add
 				if ((!c)||(_incomingConfigChunks[i].ts < c->ts))
 					c = &(_incomingConfigChunks[i]);
 			}
-
-#ifdef ZT_ENABLE_CLUSTER
-			if ((source)&&(RR->cluster))
-				RR->cluster->broadcastNetworkConfigChunk(chunk.field(start,chunk.size() - start),chunk.size() - start);
-#endif
 		} else {
 			TRACE("discarded single-chunk unsigned legacy config: this is only allowed if the sender is the controller itself");
 			return 0;
diff --git a/node/Node.cpp b/node/Node.cpp
index 1112c0f2..4ffe496c 100644
--- a/node/Node.cpp
+++ b/node/Node.cpp
@@ -68,6 +68,7 @@ Node::Node(void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,uint6
 		throw std::runtime_error("callbacks struct version mismatch");
 	memcpy(&_cb,callbacks,sizeof(ZT_Node_Callbacks));
 
+	// Initialize non-cryptographic PRNG from a good random source
 	Utils::getSecureRandom((void *)_prngState,sizeof(_prngState));
 
 	_online = false;
@@ -78,33 +79,34 @@ Node::Node(void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,uint6
 
 	uint64_t idtmp[2];
 	idtmp[0] = 0; idtmp[1] = 0;
-	char tmp[512];
-	std::string tmp2;
+	char tmp[1024];
 	int n = stateObjectGet(tptr,ZT_STATE_OBJECT_IDENTITY_SECRET,idtmp,tmp,sizeof(tmp) - 1);
 	if (n > 0) {
 		tmp[n] = (char)0;
-		if (!RR->identity.fromString(tmp))
+		if (RR->identity.fromString(tmp)) {
+			RR->publicIdentityStr = RR->identity.toString(false);
+			RR->secretIdentityStr = RR->identity.toString(true);
+		} else {
 			n = -1;
+		}
 	}
 
 	idtmp[0] = RR->identity.address().toInt(); idtmp[1] = 0;
 	if (n <= 0) {
 		RR->identity.generate();
-		tmp2 = RR->identity.toString(true);
-		stateObjectPut(tptr,ZT_STATE_OBJECT_IDENTITY_SECRET,idtmp,tmp2.data(),(unsigned int)tmp2.length());
-		tmp2 = RR->identity.toString(false);
-		stateObjectPut(tptr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,tmp2.data(),(unsigned int)tmp2.length());
+		RR->publicIdentityStr = RR->identity.toString(false);
+		RR->secretIdentityStr = RR->identity.toString(true);
+		stateObjectPut(tptr,ZT_STATE_OBJECT_IDENTITY_SECRET,idtmp,RR->secretIdentityStr.data(),(unsigned int)RR->secretIdentityStr.length());
+		stateObjectPut(tptr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr.data(),(unsigned int)RR->publicIdentityStr.length());
 	} else {
 		n = stateObjectGet(tptr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,tmp,sizeof(tmp) - 1);
 		if (n > 0) {
 			tmp[n] = (char)0;
-			if (RR->identity.toString(false) != tmp)
+			if (RR->publicIdentityStr != tmp)
 				n = -1;
 		}
-		if (n <= 0) {
-			tmp2 = RR->identity.toString(false);
-			stateObjectPut(tptr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,tmp2.data(),(unsigned int)tmp2.length());
-		}
+		if (n <= 0)
+			stateObjectPut(tptr,ZT_STATE_OBJECT_IDENTITY_PUBLIC,idtmp,RR->publicIdentityStr.data(),(unsigned int)RR->publicIdentityStr.length());
 	}
 
 	try {
@@ -125,24 +127,20 @@ Node::Node(void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,uint6
 
 Node::~Node()
 {
-	Mutex::Lock _l(_networks_m);
-
-	_networks.clear(); // destroy all networks before shutdown
-
+	{
+		Mutex::Lock _l(_networks_m);
+		_networks.clear(); // destroy all networks before shutdown
+	}
 	delete RR->sa;
 	delete RR->topology;
 	delete RR->mc;
 	delete RR->sw;
-
-#ifdef ZT_ENABLE_CLUSTER
-	delete RR->cluster;
-#endif
 }
 
 ZT_ResultCode Node::processStateUpdate(
 	void *tptr,
 	ZT_StateObjectType type,
-	uint64_t id,
+	const uint64_t id[2],
 	const void *data,
 	unsigned int len)
 {
@@ -151,11 +149,12 @@ ZT_ResultCode Node::processStateUpdate(
 
 		case ZT_STATE_OBJECT_PEER_STATE:
 			if (len) {
-			}
-			break;
-
-		case ZT_STATE_OBJECT_PEER_IDENTITY:
-			if (len) {
+				const SharedPtr<Peer> p(RR->topology->getPeer(tptr,Address(id[0])));
+				if (p) {
+					r = (p->applyStateUpdate(data,len)) ? ZT_RESULT_OK : ZT_RESULT_OK_IGNORED;
+				} else {
+					r = (Peer::createFromStateUpdate(RR,tptr,data,len)) ? ZT_RESULT_OK : ZT_RESULT_OK_IGNORED;
+				}
 			}
 			break;
 
@@ -163,9 +162,9 @@ ZT_ResultCode Node::processStateUpdate(
 			if (len <= (ZT_NETWORKCONFIG_DICT_CAPACITY - 1)) {
 				if (len < 2) {
 					Mutex::Lock _l(_networks_m);
-					SharedPtr<Network> &nw = _networks[id];
+					SharedPtr<Network> &nw = _networks[id[0]];
 					if (!nw) {
-						nw = SharedPtr<Network>(new Network(RR,tptr,id,(void *)0,(const NetworkConfig *)0));
+						nw = SharedPtr<Network>(new Network(RR,tptr,id[0],(void *)0,(const NetworkConfig *)0));
 						r = ZT_RESULT_OK;
 					}
 				} else {
@@ -175,7 +174,7 @@ ZT_ResultCode Node::processStateUpdate(
 						try {
 							if (nconf->fromDictionary(*dict)) {
 								Mutex::Lock _l(_networks_m);
-								SharedPtr<Network> &nw = _networks[id];
+								SharedPtr<Network> &nw = _networks[id[0]];
 								if (nw) {
 									switch (nw->setConfiguration(tptr,*nconf,false)) {
 										default:
@@ -189,7 +188,7 @@ ZT_ResultCode Node::processStateUpdate(
 											break;
 									}
 								} else {
-									nw = SharedPtr<Network>(new Network(RR,tptr,id,(void *)0,nconf));
+									nw = SharedPtr<Network>(new Network(RR,tptr,id[0],(void *)0,nconf));
 								}
 							} else {
 								r = ZT_RESULT_ERROR_BAD_PARAMETER;
@@ -208,9 +207,14 @@ ZT_ResultCode Node::processStateUpdate(
 			}
 			break;
 
+		case ZT_STATE_OBJECT_NETWORK_MEMBERSHIP:
+			if (len) {
+			}
+			break;
+
 		case ZT_STATE_OBJECT_PLANET:
 		case ZT_STATE_OBJECT_MOON:
-			if (len <= ZT_WORLD_MAX_SERIALIZED_LENGTH) {
+			if ((len)&&(len <= ZT_WORLD_MAX_SERIALIZED_LENGTH)) {
 				World w;
 				try {
 					w.deserialize(Buffer<ZT_WORLD_MAX_SERIALIZED_LENGTH>(data,len));
@@ -395,18 +399,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,uint64_t now,volatile uint
 	}
 
 	try {
-#ifdef ZT_ENABLE_CLUSTER
-		// If clustering is enabled we have to call cluster->doPeriodicTasks() very often, so we override normal timer deadline behavior
-		if (RR->cluster) {
-			RR->sw->doTimerTasks(tptr,now);
-			RR->cluster->doPeriodicTasks();
-			*nextBackgroundTaskDeadline = now + ZT_CLUSTER_PERIODIC_TASK_PERIOD; // this is really short so just tick at this rate
-		} else {
-#endif
-			*nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(tptr,now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
-#ifdef ZT_ENABLE_CLUSTER
-		}
-#endif
+		*nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(tptr,now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
 	} catch ( ... ) {
 		return ZT_RESULT_FATAL_ERROR_INTERNAL;
 	}
@@ -620,76 +613,6 @@ void Node::setNetconfMaster(void *networkControllerInstance)
 		RR->localNetworkController->init(RR->identity,this);
 }
 
-/*
-ZT_ResultCode Node::clusterInit(
-	unsigned int myId,
-	const struct sockaddr_storage *zeroTierPhysicalEndpoints,
-	unsigned int numZeroTierPhysicalEndpoints,
-	int x,
-	int y,
-	int z,
-	void (*sendFunction)(void *,unsigned int,const void *,unsigned int),
-	void *sendFunctionArg,
-	int (*addressToLocationFunction)(void *,const struct sockaddr_storage *,int *,int *,int *),
-	void *addressToLocationFunctionArg)
-{
-#ifdef ZT_ENABLE_CLUSTER
-	if (RR->cluster)
-		return ZT_RESULT_ERROR_BAD_PARAMETER;
-
-	std::vector<InetAddress> eps;
-	for(unsigned int i=0;i<numZeroTierPhysicalEndpoints;++i)
-		eps.push_back(InetAddress(zeroTierPhysicalEndpoints[i]));
-	std::sort(eps.begin(),eps.end());
-	RR->cluster = new Cluster(RR,myId,eps,x,y,z,sendFunction,sendFunctionArg,addressToLocationFunction,addressToLocationFunctionArg);
-
-	return ZT_RESULT_OK;
-#else
-	return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
-#endif
-}
-
-ZT_ResultCode Node::clusterAddMember(unsigned int memberId)
-{
-#ifdef ZT_ENABLE_CLUSTER
-	if (!RR->cluster)
-		return ZT_RESULT_ERROR_BAD_PARAMETER;
-	RR->cluster->addMember((uint16_t)memberId);
-	return ZT_RESULT_OK;
-#else
-	return ZT_RESULT_ERROR_UNSUPPORTED_OPERATION;
-#endif
-}
-
-void Node::clusterRemoveMember(unsigned int memberId)
-{
-#ifdef ZT_ENABLE_CLUSTER
-	if (RR->cluster)
-		RR->cluster->removeMember((uint16_t)memberId);
-#endif
-}
-
-void Node::clusterHandleIncomingMessage(const void *msg,unsigned int len)
-{
-#ifdef ZT_ENABLE_CLUSTER
-	if (RR->cluster)
-		RR->cluster->handleIncomingStateMessage(msg,len);
-#endif
-}
-
-void Node::clusterStatus(ZT_ClusterStatus *cs)
-{
-	if (!cs)
-		return;
-#ifdef ZT_ENABLE_CLUSTER
-	if (RR->cluster)
-		RR->cluster->status(*cs);
-	else
-#endif
-	memset(cs,0,sizeof(ZT_ClusterStatus));
-}
-*/
-
 /****************************************************************************/
 /* Node methods used only within node/                                      */
 /****************************************************************************/
@@ -918,7 +841,7 @@ enum ZT_ResultCode ZT_Node_processStateUpdate(
 	ZT_Node *node,
 	void *tptr,
 	ZT_StateObjectType type,
-	uint64_t id,
+	const uint64_t id[2],
 	const void *data,
 	unsigned int len)
 {
diff --git a/node/Node.hpp b/node/Node.hpp
index f1209d00..17050d24 100644
--- a/node/Node.hpp
+++ b/node/Node.hpp
@@ -85,7 +85,7 @@ public:
 	ZT_ResultCode processStateUpdate(
 		void *tptr,
 		ZT_StateObjectType type,
-		uint64_t id,
+		const uint64_t id[2],
 		const void *data,
 		unsigned int len);
 	ZT_ResultCode processWirePacket(
diff --git a/node/Path.hpp b/node/Path.hpp
index 74b31d8d..a6f56d31 100644
--- a/node/Path.hpp
+++ b/node/Path.hpp
@@ -46,11 +46,6 @@
  */
 #define ZT_PATH_MAX_PREFERENCE_RANK ((ZT_INETADDRESS_MAX_SCOPE << 1) | 1)
 
-/**
- * Maximum distance for a path
- */
-#define ZT_PATH_DISTANCE_MAX 0xffff
-
 namespace ZeroTier {
 
 class RuntimeEnvironment;
@@ -125,7 +120,6 @@ public:
 		_incomingLinkQualitySlowLogCounter(-64), // discard first fast log
 		_incomingLinkQualityPreviousPacketCounter(0),
 		_outgoingPacketCounter(0),
-		_distance(ZT_PATH_DISTANCE_MAX),
 		_addr(),
 		_localAddress(),
 		_ipScope(InetAddress::IP_SCOPE_NONE)
@@ -143,7 +137,6 @@ public:
 		_incomingLinkQualitySlowLogCounter(-64), // discard first fast log
 		_incomingLinkQualityPreviousPacketCounter(0),
 		_outgoingPacketCounter(0),
-		_distance(ZT_PATH_DISTANCE_MAX),
 		_addr(addr),
 		_localAddress(localAddress),
 		_ipScope(addr.ipScope())
@@ -311,11 +304,6 @@ public:
 	 */
 	inline uint64_t lastTrustEstablishedPacketReceived() const { return _lastTrustEstablishedPacketReceived; }
 
-	/**
-	 * @return Distance (higher is further)
-	 */
-	inline unsigned int distance() const { return _distance; }
-
 	/**
 	 * @param lo Last out send
 	 * @param li Last in send
@@ -344,7 +332,6 @@ private:
 	volatile signed int _incomingLinkQualitySlowLogCounter;
 	volatile unsigned int _incomingLinkQualityPreviousPacketCounter;
 	volatile unsigned int _outgoingPacketCounter;
-	volatile unsigned int _distance;
 	InetAddress _addr;
 	InetAddress _localAddress;
 	InetAddress::IpScope _ipScope; // memoize this since it's a computed value checked often
diff --git a/node/Peer.cpp b/node/Peer.cpp
index a7466296..18d05875 100644
--- a/node/Peer.cpp
+++ b/node/Peer.cpp
@@ -146,8 +146,8 @@ void Peer::received(
 		path->updateLinkQuality((unsigned int)(packetId & 7));
 
 	if (hops == 0) {
+		// If this is a direct packet (no hops), update existing paths or learn new ones
 		bool pathAlreadyKnown = false;
-		bool newPathLearned = false;
 
 		{
 			Mutex::Lock _l(_paths_m);
@@ -188,7 +188,7 @@ void Peer::received(
 				if (verb == Packet::VERB_OK) {
 					potentialNewPeerPath->lr = now;
 					potentialNewPeerPath->p = path;
-					newPathLearned = true;
+					_lastWroteState = 0; // force state write now
 				} else {
 					TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),path->address().toString().c_str());
 					attemptToContactAt(tPtr,path->localAddress(),path->address(),now,true,path->nextOutgoingCounter());
@@ -196,9 +196,6 @@ void Peer::received(
 				}
 			}
 		}
-
-		if (newPathLearned)
-			writeState(tPtr,now);
 	} else if (this->trustEstablished(now)) {
 		// Send PUSH_DIRECT_PATHS if hops>0 (relayed) and we have a trust relationship (common network membership)
 		if ((now - _lastDirectPathPushSent) >= ZT_DIRECT_PATH_PUSH_INTERVAL) {
@@ -270,6 +267,9 @@ void Peer::received(
 			}
 		}
 	}
+
+	if ((now - _lastWroteState) > ZT_PEER_STATE_WRITE_PERIOD)
+		writeState(tPtr,now);
 }
 
 bool Peer::sendDirect(void *tPtr,const void *data,unsigned int len,uint64_t now,bool force)
@@ -435,7 +435,7 @@ bool Peer::doPingAndKeepalive(void *tPtr,uint64_t now,int inetAddressFamily)
 void Peer::writeState(void *tPtr,const uint64_t now)
 {
 	try {
-		Buffer<sizeof(Peer) + 32 + (sizeof(Path) * 2)> b;
+		Buffer<ZT_PEER_MAX_SERIALIZED_STATE_SIZE> b;
 
 		b.append((uint8_t)1); // version
 		b.append(now);
@@ -455,7 +455,6 @@ void Peer::writeState(void *tPtr,const uint64_t now)
 				b.append(_v4Path.p->lastOut());
 				b.append(_v4Path.p->lastIn());
 				b.append(_v4Path.p->lastTrustEstablishedPacketReceived());
-				b.append((uint16_t)_v4Path.p->distance());
 				_v4Path.p->address().serialize(b);
 				_v4Path.p->localAddress().serialize(b);
 			}
@@ -464,29 +463,29 @@ void Peer::writeState(void *tPtr,const uint64_t now)
 				b.append(_v6Path.p->lastOut());
 				b.append(_v6Path.p->lastIn());
 				b.append(_v6Path.p->lastTrustEstablishedPacketReceived());
-				b.append((uint16_t)_v6Path.p->distance());
 				_v6Path.p->address().serialize(b);
 				_v6Path.p->localAddress().serialize(b);
 			}
 		}
 
-		b.append(_lastReceive);
-		b.append(_lastNontrivialReceive);
-		b.append(_lastTriedMemorizedPath);
-		b.append(_lastDirectPathPushSent);
-		b.append(_lastDirectPathPushReceive);
-		b.append(_lastCredentialRequestSent);
-		b.append(_lastWhoisRequestReceived);
-		b.append(_lastEchoRequestReceived);
-		b.append(_lastComRequestReceived);
-		b.append(_lastComRequestSent);
-		b.append(_lastCredentialsReceived);
-		b.append(_lastTrustEstablishedPacketReceived);
-
-		b.append(_vProto);
-		b.append(_vMajor);
-		b.append(_vMinor);
-		b.append(_vRevision);
+		// Save space by sending these as time since now at 100ms resolution
+		b.append((uint16_t)(std::max(now - _lastReceive,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastNontrivialReceive,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastTriedMemorizedPath,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastDirectPathPushSent,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastDirectPathPushReceive,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastCredentialRequestSent,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastWhoisRequestReceived,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastEchoRequestReceived,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastComRequestReceived,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastComRequestSent,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastCredentialsReceived,(uint64_t)6553500) / 100));
+		b.append((uint16_t)(std::max(now - _lastTrustEstablishedPacketReceived,(uint64_t)6553500) / 100));
+
+		b.append((uint8_t)_vProto);
+		b.append((uint8_t)_vMajor);
+		b.append((uint8_t)_vMinor);
+		b.append((uint16_t)_vRevision);
 
 		b.append((uint16_t)0); // length of additional fields
 
@@ -501,7 +500,7 @@ void Peer::writeState(void *tPtr,const uint64_t now)
 bool Peer::applyStateUpdate(const void *data,unsigned int len)
 {
 	try {
-		Buffer<sizeof(Peer) + 32 + (sizeof(Path) * 2)> b(data,len);
+		Buffer<ZT_PEER_MAX_SERIALIZED_STATE_SIZE> b(data,len);
 		unsigned int ptr = 0;
 
 		if (b[ptr++] != 1)
@@ -510,6 +509,11 @@ bool Peer::applyStateUpdate(const void *data,unsigned int len)
 		if (ts <= _lastReceivedStateTimestamp)
 			return false;
 
+		Identity id;
+		ptr += id.deserialize(b,ptr);
+		if (id != _id) // sanity check
+			return false;
+
 		const unsigned int pathCount = (unsigned int)b[ptr++];
 		{
 			Mutex::Lock _l(_paths_m);
@@ -518,7 +522,6 @@ bool Peer::applyStateUpdate(const void *data,unsigned int len)
 				const uint64_t lastOut = b.at<uint64_t>(ptr); ptr += 8;
 				const uint64_t lastIn = b.at<uint64_t>(ptr); ptr += 8;
 				const uint64_t lastTrustEstablishedPacketReceived = b.at<uint64_t>(ptr); ptr += 8;
-				const unsigned int distance = b.at<uint16_t>(ptr); ptr += 2;
 				InetAddress addr,localAddr;
 				ptr += addr.deserialize(b,ptr);
 				ptr += localAddr.deserialize(b,ptr);
@@ -529,8 +532,9 @@ bool Peer::applyStateUpdate(const void *data,unsigned int len)
 						case AF_INET6: p = &_v6Path; break;
 					}
 					if (p) {
-						if ( ((p->p->address() != addr)||(p->p->localAddress() != localAddr)) && (p->p->distance() > distance) )
+						if ( (!p->p) || ((p->p->address() != addr)||(p->p->localAddress() != localAddr)) ) {
 							p->p = RR->topology->getPath(localAddr,addr);
+						}
 						p->lr = lr;
 						p->p->updateFromRemoteState(lastOut,lastIn,lastTrustEstablishedPacketReceived);
 					}
@@ -538,22 +542,22 @@ bool Peer::applyStateUpdate(const void *data,unsigned int len)
 			}
 		}
 
-		_lastReceive = std::max(_lastReceive,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastNontrivialReceive = std::max(_lastNontrivialReceive,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastTriedMemorizedPath = std::max(_lastTriedMemorizedPath,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastDirectPathPushSent = std::max(_lastDirectPathPushSent,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastDirectPathPushReceive = std::max(_lastDirectPathPushReceive,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastCredentialRequestSent = std::max(_lastCredentialRequestSent,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastWhoisRequestReceived = std::max(_lastWhoisRequestReceived,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastEchoRequestReceived = std::max(_lastEchoRequestReceived,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastComRequestReceived = std::max(_lastComRequestReceived,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastComRequestSent = std::max(_lastComRequestSent,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastCredentialsReceived = std::max(_lastCredentialsReceived,b.at<uint64_t>(ptr)); ptr += 8;
-		_lastTrustEstablishedPacketReceived = std::max(_lastTrustEstablishedPacketReceived,b.at<uint64_t>(ptr)); ptr += 8;
-
-		_vProto = b.at<uint16_t>(ptr); ptr += 2;
-		_vMajor = b.at<uint16_t>(ptr); ptr += 2;
-		_vMinor = b.at<uint16_t>(ptr); ptr += 2;
+		_lastReceive = std::max(_lastReceive,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastNontrivialReceive = std::max(_lastNontrivialReceive,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastTriedMemorizedPath = std::max(_lastTriedMemorizedPath,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastDirectPathPushSent = std::max(_lastDirectPathPushSent,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastDirectPathPushReceive = std::max(_lastDirectPathPushReceive,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastCredentialRequestSent = std::max(_lastCredentialRequestSent,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastWhoisRequestReceived = std::max(_lastWhoisRequestReceived,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastEchoRequestReceived = std::max(_lastEchoRequestReceived,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastComRequestReceived = std::max(_lastComRequestReceived,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastComRequestSent = std::max(_lastComRequestSent,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastCredentialsReceived = std::max(_lastCredentialsReceived,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+		_lastTrustEstablishedPacketReceived = std::max(_lastTrustEstablishedPacketReceived,ts - ((uint64_t)b.at<uint16_t>(ptr) * 100ULL)); ptr += 2;
+
+		_vProto = (uint16_t)b[ptr++];
+		_vMajor = (uint16_t)b[ptr++];
+		_vMinor = (uint16_t)b[ptr++];
 		_vRevision = b.at<uint16_t>(ptr); ptr += 2;
 
 		_lastReceivedStateTimestamp = ts;
@@ -563,4 +567,25 @@ bool Peer::applyStateUpdate(const void *data,unsigned int len)
 	return false;
 }
 
+SharedPtr<Peer> Peer::createFromStateUpdate(const RuntimeEnvironment *renv,void *tPtr,const void *data,unsigned int len)
+{
+	try {
+		Identity id;
+		{
+			Buffer<ZT_PEER_MAX_SERIALIZED_STATE_SIZE> b(data,len);
+			unsigned int ptr = 0;
+			if (b[ptr++] != 1)
+				return SharedPtr<Peer>();
+			ptr += 8; // skip TS, don't care
+			id.deserialize(b,ptr);
+		}
+		if (id) {
+			const SharedPtr<Peer> p(new Peer(renv,renv->identity,id));
+			if (p->applyStateUpdate(data,len))
+				return renv->topology->addPeer(tPtr,p);
+		}
+	} catch ( ... ) {}
+	return SharedPtr<Peer>();
+}
+
 } // namespace ZeroTier
diff --git a/node/Peer.hpp b/node/Peer.hpp
index d6b7dad9..f0eb3ee8 100644
--- a/node/Peer.hpp
+++ b/node/Peer.hpp
@@ -51,6 +51,8 @@
 #include "Mutex.hpp"
 #include "NonCopyable.hpp"
 
+#define ZT_PEER_MAX_SERIALIZED_STATE_SIZE (sizeof(Peer) + 32 + (sizeof(Path) * 2))
+
 namespace ZeroTier {
 
 /**
@@ -194,9 +196,10 @@ public:
 	bool doPingAndKeepalive(void *tPtr,uint64_t now,int inetAddressFamily);
 
 	/**
-	 * Write current peer state to external storage / cluster network
+	 * Write object state to external storage and/or cluster network
 	 *
 	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
+	 * @param now Current time
 	 */
 	void writeState(void *tPtr,const uint64_t now);
 
@@ -437,6 +440,17 @@ public:
 		return false;
 	}
 
+	/**
+	 * Create a peer from a remote state update
+	 *
+	 * @param renv Runtime environment
+	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
+	 * @param data State update data
+	 * @param len State update length
+	 * @return Peer or NULL if data was invalid
+	 */
+	static SharedPtr<Peer> createFromStateUpdate(const RuntimeEnvironment *renv,void *tPtr,const void *data,unsigned int len);
+
 private:
 	struct _PeerPath
 	{
diff --git a/node/RuntimeEnvironment.hpp b/node/RuntimeEnvironment.hpp
index d8e1d699..ee0c8c24 100644
--- a/node/RuntimeEnvironment.hpp
+++ b/node/RuntimeEnvironment.hpp
@@ -30,8 +30,8 @@
 #include <string>
 
 #include "Constants.hpp"
+#include "Utils.hpp"
 #include "Identity.hpp"
-#include "Mutex.hpp"
 
 namespace ZeroTier {
 
@@ -58,10 +58,13 @@ public:
 		,mc((Multicaster *)0)
 		,topology((Topology *)0)
 		,sa((SelfAwareness *)0)
-#ifdef ZT_ENABLE_CLUSTER
-		,cluster((Cluster *)0)
-#endif
 	{
+		Utils::getSecureRandom(&instanceId,sizeof(instanceId));
+	}
+
+	~RuntimeEnvironment()
+	{
+		Utils::burn(reinterpret_cast<void *>(const_cast<char *>(secretIdentityStr.data())),(unsigned int)secretIdentityStr.length());
 	}
 
 	// Node instance that owns this RuntimeEnvironment
@@ -87,9 +90,11 @@ public:
 	Multicaster *mc;
 	Topology *topology;
 	SelfAwareness *sa;
-#ifdef ZT_ENABLE_CLUSTER
-	Cluster *cluster;
-#endif
+
+	/**
+	 * A random integer identifying this run of ZeroTier
+	 */
+	uint32_t instanceId;
 };
 
 } // namespace ZeroTier
diff --git a/node/Switch.cpp b/node/Switch.cpp
index 2be54b37..cbd73a83 100644
--- a/node/Switch.cpp
+++ b/node/Switch.cpp
@@ -108,13 +108,7 @@ void Switch::onRemotePacket(void *tPtr,const InetAddress &localAddr,const InetAd
 				const Address destination(fragment.destination());
 
 				if (destination != RR->identity.address()) {
-#ifdef ZT_ENABLE_CLUSTER
-					const bool isClusterFrontplane = ((RR->cluster)&&(RR->cluster->isClusterPeerFrontplane(fromAddr)));
-#else
-					const bool isClusterFrontplane = false;
-#endif
-
-					if ( (!RR->topology->amRoot()) && (!path->trustEstablished(now)) && (!isClusterFrontplane) )
+					if ( (!RR->topology->amRoot()) && (!path->trustEstablished(now)) )
 						return;
 
 					if (fragment.hops() < ZT_RELAY_MAX_HOPS) {
@@ -124,13 +118,6 @@ void Switch::onRemotePacket(void *tPtr,const InetAddress &localAddr,const InetAd
 						// It wouldn't hurt anything, just redundant and unnecessary.
 						SharedPtr<Peer> relayTo = RR->topology->getPeer(tPtr,destination);
 						if ((!relayTo)||(!relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,false))) {
-#ifdef ZT_ENABLE_CLUSTER
-							if ((RR->cluster)&&(!isClusterFrontplane)) {
-								RR->cluster->relayViaCluster(Address(),destination,fragment.data(),fragment.size(),false);
-								return;
-							}
-#endif
-
 							// Don't know peer or no direct path -- so relay via someone upstream
 							relayTo = RR->topology->getUpstreamPeer();
 							if (relayTo)
@@ -197,13 +184,8 @@ void Switch::onRemotePacket(void *tPtr,const InetAddress &localAddr,const InetAd
 
 				//TRACE("<< %.16llx %s -> %s (size: %u)",(unsigned long long)packet->packetId(),source.toString().c_str(),destination.toString().c_str(),packet->size());
 
-#ifdef ZT_ENABLE_CLUSTER
-				if ( (source == RR->identity.address()) && ((!RR->cluster)||(!RR->cluster->isClusterPeerFrontplane(fromAddr))) )
-					return;
-#else
 				if (source == RR->identity.address())
 					return;
-#endif
 
 				if (destination != RR->identity.address()) {
 					if ( (!RR->topology->amRoot()) && (!path->trustEstablished(now)) && (source != RR->identity.address()) )
@@ -212,12 +194,7 @@ void Switch::onRemotePacket(void *tPtr,const InetAddress &localAddr,const InetAd
 					Packet packet(data,len);
 
 					if (packet.hops() < ZT_RELAY_MAX_HOPS) {
-#ifdef ZT_ENABLE_CLUSTER
-						if (source != RR->identity.address()) // don't increment hops for cluster frontplane relays
-							packet.incrementHops();
-#else
 						packet.incrementHops();
-#endif
 
 						SharedPtr<Peer> relayTo = RR->topology->getPeer(tPtr,destination);
 						if ((relayTo)&&(relayTo->sendDirect(tPtr,packet.data(),packet.size(),now,false))) {
@@ -277,12 +254,6 @@ void Switch::onRemotePacket(void *tPtr,const InetAddress &localAddr,const InetAd
 								}
 							}
 						} else {
-#ifdef ZT_ENABLE_CLUSTER
-							if ((RR->cluster)&&(source != RR->identity.address())) {
-								RR->cluster->relayViaCluster(source,destination,packet.data(),packet.size(),_shouldUnite(now,source,destination));
-								return;
-							}
-#endif
 							relayTo = RR->topology->getUpstreamPeer(&source,1,true);
 							if (relayTo)
 								relayTo->sendDirect(tPtr,packet.data(),packet.size(),now,true);
@@ -769,14 +740,6 @@ bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt)
 	const uint64_t now = RR->node->now();
 	const Address destination(packet.destination());
 
-#ifdef ZT_ENABLE_CLUSTER
-	uint64_t clusterMostRecentTs = 0;
-	int clusterMostRecentMemberId = -1;
-	uint8_t clusterPeerSecret[ZT_PEER_SECRET_KEY_LENGTH];
-	if (RR->cluster)
-		clusterMostRecentMemberId = RR->cluster->checkSendViaCluster(destination,clusterMostRecentTs,clusterPeerSecret);
-#endif
-
 	const SharedPtr<Peer> peer(RR->topology->getPeer(tPtr,destination));
 	if (peer) {
 		/* First get the best path, and if it's dead (and this is not a root)
@@ -788,74 +751,37 @@ bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt)
 
 		viaPath = peer->getBestPath(now,false);
 		if ( (viaPath) && (!viaPath->alive(now)) && (!RR->topology->isUpstream(peer->identity())) ) {
-#ifdef ZT_ENABLE_CLUSTER
-			if ((clusterMostRecentMemberId < 0)||(viaPath->lastIn() > clusterMostRecentTs)) {
-#endif
-				if ((now - viaPath->lastOut()) > std::max((now - viaPath->lastIn()) * 4,(uint64_t)ZT_PATH_MIN_REACTIVATE_INTERVAL)) {
-					peer->attemptToContactAt(tPtr,viaPath->localAddress(),viaPath->address(),now,false,viaPath->nextOutgoingCounter());
-					viaPath->sent(now);
-				}
-#ifdef ZT_ENABLE_CLUSTER
+			if ((now - viaPath->lastOut()) > std::max((now - viaPath->lastIn()) * 4,(uint64_t)ZT_PATH_MIN_REACTIVATE_INTERVAL)) {
+				peer->attemptToContactAt(tPtr,viaPath->localAddress(),viaPath->address(),now,false,viaPath->nextOutgoingCounter());
+				viaPath->sent(now);
 			}
-#endif
 			viaPath.zero();
 		}
 
-#ifdef ZT_ENABLE_CLUSTER
-		if (clusterMostRecentMemberId >= 0) {
-			if ((viaPath)&&(viaPath->lastIn() < clusterMostRecentTs))
-				viaPath.zero();
-		} else if (!viaPath) {
-#else
 		if (!viaPath) {
-#endif
 			peer->tryMemorizedPath(tPtr,now); // periodically attempt memorized or statically defined paths, if any are known
 			const SharedPtr<Peer> relay(RR->topology->getUpstreamPeer());
 			if ( (!relay) || (!(viaPath = relay->getBestPath(now,false))) ) {
 				if (!(viaPath = peer->getBestPath(now,true)))
 					return false;
 			}
-#ifdef ZT_ENABLE_CLUSTER
 		}
-#else
-		}
-#endif
 	} else {
-#ifdef ZT_ENABLE_CLUSTER
-		if (clusterMostRecentMemberId < 0) {
-#else
-			requestWhois(tPtr,destination);
-			return false; // if we are not in cluster mode, there is no way we can send without knowing the peer directly
-#endif
-#ifdef ZT_ENABLE_CLUSTER
-		}
-#endif
+		requestWhois(tPtr,destination);
+		return false; // if we are not in cluster mode, there is no way we can send without knowing the peer directly
 	}
 
 	unsigned int chunkSize = std::min(packet.size(),(unsigned int)ZT_UDP_DEFAULT_PAYLOAD_MTU);
 	packet.setFragmented(chunkSize < packet.size());
 
-#ifdef ZT_ENABLE_CLUSTER
-	const uint64_t trustedPathId = (viaPath) ? RR->topology->getOutboundPathTrust(viaPath->address()) : 0;
-	if (trustedPathId) {
-		packet.setTrusted(trustedPathId);
-	} else {
-		packet.armor((clusterMostRecentMemberId >= 0) ? clusterPeerSecret : peer->key(),encrypt,(viaPath) ? viaPath->nextOutgoingCounter() : 0);
-	}
-#else
 	const uint64_t trustedPathId = RR->topology->getOutboundPathTrust(viaPath->address());
 	if (trustedPathId) {
 		packet.setTrusted(trustedPathId);
 	} else {
 		packet.armor(peer->key(),encrypt,viaPath->nextOutgoingCounter());
 	}
-#endif
 
-#ifdef ZT_ENABLE_CLUSTER
-	if ( ((viaPath)&&(viaPath->send(RR,tPtr,packet.data(),chunkSize,now))) || ((clusterMostRecentMemberId >= 0)&&(RR->cluster->sendViaCluster(clusterMostRecentMemberId,destination,packet.data(),chunkSize))) ) {
-#else
 	if (viaPath->send(RR,tPtr,packet.data(),chunkSize,now)) {
-#endif
 		if (chunkSize < packet.size()) {
 			// Too big for one packet, fragment the rest
 			unsigned int fragStart = chunkSize;
@@ -868,14 +794,7 @@ bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt)
 			for(unsigned int fno=1;fno<totalFragments;++fno) {
 				chunkSize = std::min(remaining,(unsigned int)(ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
 				Packet::Fragment frag(packet,fragStart,chunkSize,fno,totalFragments);
-#ifdef ZT_ENABLE_CLUSTER
-				if (viaPath)
-					viaPath->send(RR,tPtr,frag.data(),frag.size(),now);
-				else if (clusterMostRecentMemberId >= 0)
-					RR->cluster->sendViaCluster(clusterMostRecentMemberId,destination,frag.data(),frag.size());
-#else
 				viaPath->send(RR,tPtr,frag.data(),frag.size(),now);
-#endif
 				fragStart += chunkSize;
 				remaining -= chunkSize;
 			}
diff --git a/node/Topology.cpp b/node/Topology.cpp
index be116b28..09a1a895 100644
--- a/node/Topology.cpp
+++ b/node/Topology.cpp
@@ -108,8 +108,6 @@ SharedPtr<Peer> Topology::addPeer(void *tPtr,const SharedPtr<Peer> &peer)
 		np = hp;
 	}
 
-	saveIdentity(tPtr,np->identity());
-
 	return np;
 }
 
@@ -128,18 +126,20 @@ SharedPtr<Peer> Topology::getPeer(void *tPtr,const Address &zta)
 	}
 
 	try {
-		Identity id(_getIdentity(tPtr,zta));
-		if (id) {
-			SharedPtr<Peer> np(new Peer(RR,RR->identity,id));
-			{
-				Mutex::Lock _l(_peers_m);
-				SharedPtr<Peer> &ap = _peers[zta];
-				if (!ap)
-					ap.swap(np);
+		char buf[ZT_PEER_MAX_SERIALIZED_STATE_SIZE];
+		uint64_t idbuf[2]; idbuf[0] = zta.toInt(); idbuf[1] = 0;
+		int len = RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER_STATE,idbuf,buf,(unsigned int)sizeof(buf));
+		if (len > 0) {
+			Mutex::Lock _l(_peers_m);
+			SharedPtr<Peer> &ap = _peers[zta];
+			if (ap)
 				return ap;
-			}
+			ap = Peer::createFromStateUpdate(RR,tPtr,buf,len);
+			if (!ap)
+				_peers.erase(zta);
+			return ap;
 		}
-	} catch ( ... ) {} // invalid identity on disk?
+	} catch ( ... ) {} // ignore invalid identities or other strage failures
 
 	return SharedPtr<Peer>();
 }
@@ -154,17 +154,7 @@ Identity Topology::getIdentity(void *tPtr,const Address &zta)
 		if (ap)
 			return (*ap)->identity();
 	}
-	return _getIdentity(tPtr,zta);
-}
-
-void Topology::saveIdentity(void *tPtr,const Identity &id)
-{
-	if (id) {
-		const std::string tmp(id.toString(false));
-		uint64_t idtmp[2];
-		idtmp[0] = id.address().toInt(); idtmp[1] = 0;
-		RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_PEER_IDENTITY,idtmp,tmp.data(),(unsigned int)tmp.length());
-	}
+	return Identity();
 }
 
 SharedPtr<Peer> Topology::getUpstreamPeer(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
@@ -423,21 +413,6 @@ void Topology::doPeriodicTasks(void *tPtr,uint64_t now)
 	}
 }
 
-Identity Topology::_getIdentity(void *tPtr,const Address &zta)
-{
-	char tmp[512];
-	uint64_t idtmp[2];
-	idtmp[0] = zta.toInt(); idtmp[1] = 0;
-	int n = RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_PEER_IDENTITY,idtmp,tmp,sizeof(tmp) - 1);
-	if (n > 0) {
-		tmp[n] = (char)0;
-		try {
-			return Identity(tmp);
-		} catch ( ... ) {} // ignore invalid IDs
-	}
-	return Identity();
-}
-
 void Topology::_memoizeUpstreams(void *tPtr)
 {
 	// assumes _upstreams_m and _peers_m are locked
@@ -450,10 +425,8 @@ void Topology::_memoizeUpstreams(void *tPtr)
 		} else if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),i->identity.address()) == _upstreamAddresses.end()) {
 			_upstreamAddresses.push_back(i->identity.address());
 			SharedPtr<Peer> &hp = _peers[i->identity.address()];
-			if (!hp) {
+			if (!hp)
 				hp = new Peer(RR,RR->identity,i->identity);
-				saveIdentity(tPtr,i->identity);
-			}
 		}
 	}
 
@@ -464,10 +437,8 @@ void Topology::_memoizeUpstreams(void *tPtr)
 			} else if (std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),i->identity.address()) == _upstreamAddresses.end()) {
 				_upstreamAddresses.push_back(i->identity.address());
 				SharedPtr<Peer> &hp = _peers[i->identity.address()];
-				if (!hp) {
+				if (!hp)
 					hp = new Peer(RR,RR->identity,i->identity);
-					saveIdentity(tPtr,i->identity);
-				}
 			}
 		}
 	}
diff --git a/node/Topology.hpp b/node/Topology.hpp
index 9bc7c0d8..32e38dd3 100644
--- a/node/Topology.hpp
+++ b/node/Topology.hpp
@@ -81,6 +81,13 @@ public:
 	 */
 	SharedPtr<Peer> getPeer(void *tPtr,const Address &zta);
 
+	/**
+	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
+	 * @param zta ZeroTier address of peer
+	 * @return Identity or NULL identity if not found
+	 */
+	Identity getIdentity(void *tPtr,const Address &zta);
+
 	/**
 	 * Get a peer only if it is presently in memory (no disk cache)
 	 *
@@ -116,26 +123,6 @@ public:
 		return p;
 	}
 
-	/**
-	 * Get the identity of a peer
-	 *
-	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
-	 * @param zta ZeroTier address of peer
-	 * @return Identity or NULL Identity if not found
-	 */
-	Identity getIdentity(void *tPtr,const Address &zta);
-
-	/**
-	 * Cache an identity
-	 *
-	 * This is done automatically on addPeer(), and so is only useful for
-	 * cluster identity replication.
-	 *
-	 * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
-	 * @param id Identity to cache
-	 */
-	void saveIdentity(void *tPtr,const Identity &id);
-
 	/**
 	 * Get the current best upstream peer
 	 *
diff --git a/osdep/Binder.hpp b/osdep/Binder.hpp
index a0b47367..b1fe5921 100644
--- a/osdep/Binder.hpp
+++ b/osdep/Binder.hpp
@@ -180,7 +180,7 @@ public:
 		const unsigned long pid = (unsigned long)getpid();
 
 		// Get all device names
-		Utils::snprintf(fn,sizeof(fn),"/proc/%lu/net/dev",pid);
+		Utils::ztsnprintf(fn,sizeof(fn),"/proc/%lu/net/dev",pid);
 		FILE *procf = fopen(fn,"r");
 		if (procf) {
 			while (fgets(tmp,sizeof(tmp),procf)) {
@@ -196,7 +196,7 @@ public:
 		}
 
 		// Get IPv6 addresses (and any device names we don't already know)
-		Utils::snprintf(fn,sizeof(fn),"/proc/%lu/net/if_inet6",pid);
+		Utils::ztsnprintf(fn,sizeof(fn),"/proc/%lu/net/if_inet6",pid);
 		procf = fopen(fn,"r");
 		if (procf) {
 			while (fgets(tmp,sizeof(tmp),procf)) {
diff --git a/service/OneService.cpp b/service/OneService.cpp
index f949f348..b5b11111 100644
--- a/service/OneService.cpp
+++ b/service/OneService.cpp
@@ -154,9 +154,6 @@ namespace ZeroTier { typedef BSDEthernetTap EthernetTap; }
 // How often to check for local interface addresses
 #define ZT_LOCAL_INTERFACE_CHECK_INTERVAL 60000
 
-// Clean files from iddb.d that are older than this (60 days)
-#define ZT_IDDB_CLEANUP_AGE 5184000000ULL
-
 // Maximum write buffer size for outgoing TCP connections (sanity limit)
 #define ZT_TCP_MAX_WRITEQ_SIZE 33554432
 
@@ -414,7 +411,6 @@ public:
 	const std::string _homePath;
 	std::string _authToken;
 	std::string _controllerDbPath;
-	const std::string _iddbPath;
 	const std::string _networksPath;
 	const std::string _moonsPath;
 
@@ -513,7 +509,6 @@ public:
 	OneServiceImpl(const char *hp,unsigned int port) :
 		_homePath((hp) ? hp : ".")
 		,_controllerDbPath(_homePath + ZT_PATH_SEPARATOR_S "controller.d")
-		,_iddbPath(_homePath + ZT_PATH_SEPARATOR_S "iddb.d")
 		,_networksPath(_homePath + ZT_PATH_SEPARATOR_S "networks.d")
 		,_moonsPath(_homePath + ZT_PATH_SEPARATOR_S "moons.d")
 		,_controller((EmbeddedNetworkController *)0)
@@ -732,6 +727,9 @@ public:
 			}
 #endif
 
+			// Delete legacy iddb.d if present (cleanup)
+			OSUtils::rmDashRf((_homePath + ZT_PATH_SEPARATOR_S "iddb.d").c_str());
+
 			// Network controller is now enabled by default for desktop and server
 			_controller = new EmbeddedNetworkController(_node,_controllerDbPath.c_str());
 			_node->setNetconfMaster((void *)_controller);
@@ -781,7 +779,6 @@ public:
 			uint64_t lastBindRefresh = 0;
 			uint64_t lastUpdateCheck = clockShouldBe;
 			uint64_t lastLocalInterfaceAddressCheck = (clockShouldBe - ZT_LOCAL_INTERFACE_CHECK_INTERVAL) + 15000; // do this in 15s to give portmapper time to configure and other things time to settle
-			uint64_t lastCleanedIddb = 0;
 			uint64_t lastTcpCheck = 0;
 			for(;;) {
 				_run_m.lock();
@@ -797,12 +794,6 @@ public:
 
 				const uint64_t now = OSUtils::now();
 
-				// Clean iddb.d on start and every 24 hours
-				if ((now - lastCleanedIddb) > 86400000) {
-					lastCleanedIddb = now;
-					OSUtils::cleanDirectory(_iddbPath.c_str(),now - ZT_IDDB_CLEANUP_AGE);
-				}
-
 				// Attempt to detect sleep/wake events by detecting delay overruns
 				bool restarted = false;
 				if ((now > clockShouldBe)&&((now - clockShouldBe) > 10000)) {
@@ -1027,7 +1018,7 @@ public:
 		return NULL;
 	}
 
-	virtual Node * getNode()
+	virtual Node *getNode()
 	{
 		return _node;
 	}
@@ -1903,27 +1894,16 @@ public:
 
 		char *const outdata = const_cast<char *>(tc->writeq.data()) + startpos;
 		encryptClusterMessage(outdata,mlen);
-	}
-
-	void replicateStateObjectToCluster(const ZT_StateObjectType type,const uint64_t id[2],const void *const data,const unsigned int len,const uint64_t everyoneBut)
-	{
-		std::vector<uint64_t> sentTo;
-		if (everyoneBut)
-			sentTo.push_back(everyoneBut);
-		Mutex::Lock _l(_tcpConnections_m);
-		for(std::vector<TcpConnection *>::const_iterator ci(_tcpConnections.begin());ci!=_tcpConnections.end();++ci) {
-			TcpConnection *const c = *ci;
-			if ((c->type == TcpConnection::TCP_CLUSTER_BACKPLANE)&&(c->clusterMemberId != 0)&&(std::find(sentTo.begin(),sentTo.end(),c->clusterMemberId) == sentTo.end())) {
-				sentTo.push_back(c->clusterMemberId);
-				replicateStateObject(type,id,data,len,c);
-			}
-		}
+		tc->writeq.append(outdata,mlen);
 	}
 
 	void writeStateObject(enum ZT_StateObjectType type,const uint64_t id[2],const void *data,int len)
 	{
-		char p[4096];
+		char buf[65535];
+		char p[1024];
+		FILE *f;
 		bool secure = false;
+
 		switch(type) {
 			case ZT_STATE_OBJECT_IDENTITY_PUBLIC:
 				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "identity.public",_homePath.c_str());
@@ -1932,13 +1912,14 @@ public:
 				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "identity.secret",_homePath.c_str());
 				secure = true;
 				break;
-			case ZT_STATE_OBJECT_PEER_IDENTITY:
-				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "iddb.d/%.10llx",_homePath.c_str(),(unsigned long long)id[0]);
-				break;
+			//case ZT_STATE_OBJECT_PEER_STATE:
+			//	break;
 			case ZT_STATE_OBJECT_NETWORK_CONFIG:
 				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "networks.d/%.16llx.conf",_homePath.c_str(),(unsigned long long)id[0]);
 				secure = true;
 				break;
+			//case ZT_STATE_OBJECT_NETWORK_MEMBERSHIP:
+			//	break;
 			case ZT_STATE_OBJECT_PLANET:
 				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "planet",_homePath.c_str());
 				break;
@@ -1949,17 +1930,30 @@ public:
 				p[0] = (char)0;
 				break;
 		}
+
 		if (p[0]) {
 			if (len >= 0) {
-				FILE *f = fopen(p,"w");
+				// Check to see if we've already written this first. This reduces
+				// redundant writes and I/O overhead on most platforms and has
+				// little effect on others.
+				f = fopen(p,"r");
+				bool redundant = false;
 				if (f) {
-					if (fwrite(data,len,1,f) != 1)
-						fprintf(stderr,"WARNING: unable to write to file: %s (I/O error)" ZT_EOL_S,p);
+					long l = (long)fread(buf,1,sizeof(buf),f);
 					fclose(f);
-					if (secure)
-						OSUtils::lockDownFile(p,false);
-				} else {
-					fprintf(stderr,"WARNING: unable to write to file: %s (unable to open)" ZT_EOL_S,p);
+					redundant = ((l == (long)len)&&(memcmp(data,buf,l) == 0));
+				}
+				if (!redundant) {
+					f = fopen(p,"w");
+					if (f) {
+						if (fwrite(data,len,1,f) != 1)
+							fprintf(stderr,"WARNING: unable to write to file: %s (I/O error)" ZT_EOL_S,p);
+						fclose(f);
+						if (secure)
+							OSUtils::lockDownFile(p,false);
+					} else {
+						fprintf(stderr,"WARNING: unable to write to file: %s (unable to open)" ZT_EOL_S,p);
+					}
 				}
 			} else {
 				OSUtils::rm(p);
@@ -2314,7 +2308,7 @@ public:
 									break;
 
 								case CLUSTER_MESSAGE_STATE_OBJECT:
-									if (mlen >= 42) { // type + object ID + [data]
+									if (mlen > 42) { // type + object ID + [data]
 										uint64_t objId[2];
 										objId[0] = (
 											((uint64_t)data[26] << 56) |
@@ -2336,10 +2330,8 @@ public:
 											((uint64_t)data[40] << 8) |
 											(uint64_t)data[41]
 										);
-										if (_node->processStateUpdate((void *)0,(ZT_StateObjectType)data[25],objId[0],data + 42,(unsigned int)(mlen - 42)) == ZT_RESULT_OK) {
+										if (_node->processStateUpdate((void *)0,(ZT_StateObjectType)data[25],objId,data + 42,(unsigned int)(mlen - 42)) == ZT_RESULT_OK)
 											writeStateObject((ZT_StateObjectType)data[25],objId,data + 42,(unsigned int)(mlen - 42));
-											replicateStateObjectToCluster((ZT_StateObjectType)data[25],objId,data + 42,(unsigned int)(mlen - 42),tc->clusterMemberId);
-										}
 									}
 									break;
 
@@ -2558,7 +2550,18 @@ public:
 	inline void nodeStatePutFunction(enum ZT_StateObjectType type,const uint64_t id[2],const void *data,int len)
 	{
 		writeStateObject(type,id,data,len);
-		replicateStateObjectToCluster(type,id,data,len,0);
+
+		std::vector<uint64_t> sentTo;
+		{
+			Mutex::Lock _l(_tcpConnections_m);
+			for(std::vector<TcpConnection *>::const_iterator ci(_tcpConnections.begin());ci!=_tcpConnections.end();++ci) {
+				TcpConnection *const c = *ci;
+				if ((c->type == TcpConnection::TCP_CLUSTER_BACKPLANE)&&(c->clusterMemberId != 0)&&(std::find(sentTo.begin(),sentTo.end(),c->clusterMemberId) == sentTo.end())) {
+					sentTo.push_back(c->clusterMemberId);
+					replicateStateObject(type,id,data,len,c);
+				}
+			}
+		}
 	}
 
 	inline int nodeStateGetFunction(enum ZT_StateObjectType type,const uint64_t id[2],void *data,unsigned int maxlen)
@@ -2571,9 +2574,6 @@ public:
 			case ZT_STATE_OBJECT_IDENTITY_SECRET:
 				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "identity.secret",_homePath.c_str());
 				break;
-			case ZT_STATE_OBJECT_PEER_IDENTITY:
-				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "iddb.d/%.10llx",_homePath.c_str(),(unsigned long long)id);
-				break;
 			case ZT_STATE_OBJECT_NETWORK_CONFIG:
 				Utils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "networks.d/%.16llx.conf",_homePath.c_str(),(unsigned long long)id);
 				break;
-- 
cgit v1.2.3