From 7711eba297955d4cf3852f36fe7c6d118da38171 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Thu, 22 Oct 2015 16:02:01 -0700 Subject: More cluster wiring... --- node/Cluster.cpp | 4 +++- node/Cluster.hpp | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'node') diff --git a/node/Cluster.cpp b/node/Cluster.cpp index d9514db5..e7aa5a41 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -504,7 +504,7 @@ void Cluster::doPeriodicTasks() void Cluster::addMember(uint16_t memberId) { - if (memberId >= ZT_CLUSTER_MAX_MEMBERS) + if ((memberId >= ZT_CLUSTER_MAX_MEMBERS)||(memberId == _id)) return; Mutex::Lock _l2(_members[memberId].lock); @@ -622,6 +622,8 @@ bool Cluster::redirectPeer(const Address &peerAddress,const InetAddress &peerPhy void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len) { + if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check + return; _Member &m = _members[memberId]; // assumes m.lock is locked! if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH) diff --git a/node/Cluster.hpp b/node/Cluster.hpp index 2e60fd6b..6c9a2917 100644 --- a/node/Cluster.hpp +++ b/node/Cluster.hpp @@ -47,7 +47,7 @@ /** * Timeout for cluster members being considered "alive" */ -#define ZT_CLUSTER_TIMEOUT 30000 +#define ZT_CLUSTER_TIMEOUT 10000 /** * How often should we announce that we have a peer? @@ -57,7 +57,7 @@ /** * Desired period between doPeriodicTasks() in milliseconds */ -#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 50 +#define ZT_CLUSTER_PERIODIC_TASK_PERIOD 100 namespace ZeroTier { -- cgit v1.2.3 From 964b30902ad93d2ea8599721611a488dac6adbca Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Oct 2015 11:51:18 -0700 Subject: Cluster fix: was accumulating remote endpoints endlessly. --- node/Cluster.cpp | 379 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 195 insertions(+), 184 deletions(-) (limited to 'node') diff --git a/node/Cluster.cpp b/node/Cluster.cpp index e7aa5a41..c943e62b 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -143,212 +143,223 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len) return; const uint16_t fromMemberId = dmsg.at(0); unsigned int ptr = 2; - if (fromMemberId == _id) + if (fromMemberId == _id) // sanity check: we don't talk to ourselves return; const uint16_t toMemberId = dmsg.at(ptr); ptr += 2; - if (toMemberId != _id) + if (toMemberId != _id) // sanity check: message not for us? return; - _Member &m = _members[fromMemberId]; - Mutex::Lock mlck(m.lock); - - try { - while (ptr < dmsg.size()) { - const unsigned int mlen = dmsg.at(ptr); ptr += 2; - const unsigned int nextPtr = ptr + mlen; - - int mtype = -1; - try { - switch((StateMessageType)(mtype = (int)dmsg[ptr++])) { - default: - break; - - case STATE_MESSAGE_ALIVE: { - ptr += 7; // skip version stuff, not used yet - m.x = dmsg.at(ptr); ptr += 4; - m.y = dmsg.at(ptr); ptr += 4; - m.z = dmsg.at(ptr); ptr += 4; - ptr += 8; // skip local clock, not used - m.load = dmsg.at(ptr); ptr += 8; - ptr += 8; // skip flags, unused + { // make sure sender is actually considered a member + Mutex::Lock _l3(_memberIds_m); + if (std::find(_memberIds.begin(),_memberIds.end(),fromMemberId) == _memberIds.end()) + return; + } + + { + _Member &m = _members[fromMemberId]; + Mutex::Lock mlck(m.lock); + + try { + while (ptr < dmsg.size()) { + const unsigned int mlen = dmsg.at(ptr); ptr += 2; + const unsigned int nextPtr = ptr + mlen; + if (nextPtr > dmsg.size()) + break; + + int mtype = -1; + try { + switch((StateMessageType)(mtype = (int)dmsg[ptr++])) { + default: + break; + + case STATE_MESSAGE_ALIVE: { + ptr += 7; // skip version stuff, not used yet + m.x = dmsg.at(ptr); ptr += 4; + m.y = dmsg.at(ptr); ptr += 4; + m.z = dmsg.at(ptr); ptr += 4; + ptr += 8; // skip local clock, not used + m.load = dmsg.at(ptr); ptr += 8; + ptr += 8; // skip flags, unused #ifdef ZT_TRACE - std::string addrs; + std::string addrs; #endif - unsigned int physicalAddressCount = dmsg[ptr++]; - for(unsigned int i=0;i 0) - addrs.push_back(','); - addrs.append(m.zeroTierPhysicalEndpoints.back().toString()); - } + else { + if (addrs.length() > 0) + addrs.push_back(','); + addrs.append(m.zeroTierPhysicalEndpoints.back().toString()); + } #endif - } - m.lastReceivedAliveAnnouncement = RR->node->now(); + } + m.lastReceivedAliveAnnouncement = RR->node->now(); #ifdef ZT_TRACE - TRACE("[%u] I'm alive! send me peers at %s",(unsigned int)fromMemberId,addrs.c_str()); + TRACE("[%u] I'm alive! peers may be redirected to: %s",(unsigned int)fromMemberId,addrs.c_str()); #endif - } break; - - case STATE_MESSAGE_HAVE_PEER: { - try { - Identity id; - ptr += id.deserialize(dmsg,ptr); - if (id) { - RR->topology->saveIdentity(id); - - { // Add or update peer affinity entry - _PeerAffinity pa(id.address(),fromMemberId,RR->node->now()); - Mutex::Lock _l2(_peerAffinities_m); - std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n)) - if ((i != _peerAffinities.end())&&(i->key == pa.key)) { - i->timestamp = pa.timestamp; - } else { - _peerAffinities.push_back(pa); - std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now - } - } + } break; + + case STATE_MESSAGE_HAVE_PEER: { + try { + Identity id; + ptr += id.deserialize(dmsg,ptr); + if (id) { + RR->topology->saveIdentity(id); + + { // Add or update peer affinity entry + _PeerAffinity pa(id.address(),fromMemberId,RR->node->now()); + Mutex::Lock _l2(_peerAffinities_m); + std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n)) + if ((i != _peerAffinities.end())&&(i->key == pa.key)) { + i->timestamp = pa.timestamp; + } else { + _peerAffinities.push_back(pa); + std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now + } + } - TRACE("[%u] has %s",(unsigned int)fromMemberId,id.address().toString().c_str()); - } - } catch ( ... ) { - // ignore invalid identities - } - } break; - - case STATE_MESSAGE_MULTICAST_LIKE: { - const uint64_t nwid = dmsg.at(ptr); ptr += 8; - const Address address(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH; - const MAC mac(dmsg.field(ptr,6),6); ptr += 6; - const uint32_t adi = dmsg.at(ptr); ptr += 4; - RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address); - TRACE("[%u] %s likes %s/%u on %.16llu",(unsigned int)fromMemberId,address.toString().c_str(),mac.toString().c_str(),(unsigned int)adi,nwid); - } break; - - case STATE_MESSAGE_COM: { - CertificateOfMembership com; - ptr += com.deserialize(dmsg,ptr); - if (com) { - TRACE("[%u] COM for %s on %.16llu rev %llu",(unsigned int)fromMemberId,com.issuedTo().toString().c_str(),com.networkId(),com.revision()); - } - } break; - - case STATE_MESSAGE_RELAY: { - const unsigned int numRemotePeerPaths = dmsg[ptr++]; - InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max - for(unsigned int i=0;i(ptr); ptr += 2; - const void *packet = (const void *)dmsg.field(ptr,packetLen); ptr += packetLen; - - if (packetLen >= ZT_PROTO_MIN_FRAGMENT_LENGTH) { // ignore anything too short to contain a dest address - const Address destinationAddress(reinterpret_cast(packet) + 8,ZT_ADDRESS_LENGTH); - TRACE("[%u] relay %u bytes to %s (%u remote paths included)",(unsigned int)fromMemberId,packetLen,destinationAddress.toString().c_str(),numRemotePeerPaths); - - SharedPtr destinationPeer(RR->topology->getPeer(destinationAddress)); - if (destinationPeer) { - if ( - (destinationPeer->send(RR,packet,packetLen,RR->node->now()))&& - (numRemotePeerPaths > 0)&& - (packetLen >= 18)&& - (reinterpret_cast(packet)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) - ) { - // If remote peer paths were sent with this relayed packet, we do - // RENDEZVOUS. It's handled here for cluster-relayed packets since - // we don't have both Peer records so this is a different path. - - const Address remotePeerAddress(reinterpret_cast(packet) + 13,ZT_ADDRESS_LENGTH); - - InetAddress bestDestV4,bestDestV6; - destinationPeer->getBestActiveAddresses(RR->node->now(),bestDestV4,bestDestV6); - InetAddress bestRemoteV4,bestRemoteV6; - for(unsigned int i=0;i(ptr); ptr += 8; + const Address address(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH; + const MAC mac(dmsg.field(ptr,6),6); ptr += 6; + const uint32_t adi = dmsg.at(ptr); ptr += 4; + RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address); + TRACE("[%u] %s likes %s/%u on %.16llu",(unsigned int)fromMemberId,address.toString().c_str(),mac.toString().c_str(),(unsigned int)adi,nwid); + } break; + + case STATE_MESSAGE_COM: { + CertificateOfMembership com; + ptr += com.deserialize(dmsg,ptr); + if (com) { + TRACE("[%u] COM for %s on %.16llu rev %llu",(unsigned int)fromMemberId,com.issuedTo().toString().c_str(),com.networkId(),com.revision()); + } + } break; + + case STATE_MESSAGE_RELAY: { + const unsigned int numRemotePeerPaths = dmsg[ptr++]; + InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max + for(unsigned int i=0;i(ptr); ptr += 2; + const void *packet = (const void *)dmsg.field(ptr,packetLen); ptr += packetLen; + + if (packetLen >= ZT_PROTO_MIN_FRAGMENT_LENGTH) { // ignore anything too short to contain a dest address + const Address destinationAddress(reinterpret_cast(packet) + 8,ZT_ADDRESS_LENGTH); + TRACE("[%u] relay %u bytes to %s (%u remote paths included)",(unsigned int)fromMemberId,packetLen,destinationAddress.toString().c_str(),numRemotePeerPaths); + + SharedPtr destinationPeer(RR->topology->getPeer(destinationAddress)); + if (destinationPeer) { + if ( + (destinationPeer->send(RR,packet,packetLen,RR->node->now()))&& + (numRemotePeerPaths > 0)&& + (packetLen >= 18)&& + (reinterpret_cast(packet)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) + ) { + // If remote peer paths were sent with this relayed packet, we do + // RENDEZVOUS. It's handled here for cluster-relayed packets since + // we don't have both Peer records so this is a different path. + + const Address remotePeerAddress(reinterpret_cast(packet) + 13,ZT_ADDRESS_LENGTH); + + InetAddress bestDestV4,bestDestV6; + destinationPeer->getBestActiveAddresses(RR->node->now(),bestDestV4,bestDestV6); + InetAddress bestRemoteV4,bestRemoteV6; + for(unsigned int i=0;iidentity.address(),Packet::VERB_RENDEZVOUS); - rendezvousForDest.append((uint8_t)0); - remotePeerAddress.appendTo(rendezvousForDest); - - Buffer<2048> rendezvousForOtherEnd; - remotePeerAddress.appendTo(rendezvousForOtherEnd); - rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS); - const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size(); - rendezvousForOtherEnd.addSize(2); // space for actual packet payload length - rendezvousForOtherEnd.append((uint8_t)0); // flags == 0 - destinationAddress.appendTo(rendezvousForOtherEnd); - - bool haveMatch = false; - if ((bestDestV6)&&(bestRemoteV6)) { - haveMatch = true; - - rendezvousForDest.append((uint16_t)bestRemoteV6.port()); - rendezvousForDest.append((uint8_t)16); - rendezvousForDest.append(bestRemoteV6.rawIpData(),16); - - rendezvousForOtherEnd.append((uint16_t)bestDestV6.port()); - rendezvousForOtherEnd.append((uint8_t)16); - rendezvousForOtherEnd.append(bestDestV6.rawIpData(),16); - rendezvousForOtherEnd.setAt(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 16)); - } else if ((bestDestV4)&&(bestRemoteV4)) { - haveMatch = true; - - rendezvousForDest.append((uint16_t)bestRemoteV4.port()); - rendezvousForDest.append((uint8_t)4); - rendezvousForDest.append(bestRemoteV4.rawIpData(),4); - - rendezvousForOtherEnd.append((uint16_t)bestDestV4.port()); - rendezvousForOtherEnd.append((uint8_t)4); - rendezvousForOtherEnd.append(bestDestV4.rawIpData(),4); - rendezvousForOtherEnd.setAt(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 4)); - } + Packet rendezvousForDest(destinationAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS); + rendezvousForDest.append((uint8_t)0); + remotePeerAddress.appendTo(rendezvousForDest); + + Buffer<2048> rendezvousForOtherEnd; + remotePeerAddress.appendTo(rendezvousForOtherEnd); + rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS); + const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size(); + rendezvousForOtherEnd.addSize(2); // space for actual packet payload length + rendezvousForOtherEnd.append((uint8_t)0); // flags == 0 + destinationAddress.appendTo(rendezvousForOtherEnd); + + bool haveMatch = false; + if ((bestDestV6)&&(bestRemoteV6)) { + haveMatch = true; + + rendezvousForDest.append((uint16_t)bestRemoteV6.port()); + rendezvousForDest.append((uint8_t)16); + rendezvousForDest.append(bestRemoteV6.rawIpData(),16); + + rendezvousForOtherEnd.append((uint16_t)bestDestV6.port()); + rendezvousForOtherEnd.append((uint8_t)16); + rendezvousForOtherEnd.append(bestDestV6.rawIpData(),16); + rendezvousForOtherEnd.setAt(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 16)); + } else if ((bestDestV4)&&(bestRemoteV4)) { + haveMatch = true; + + rendezvousForDest.append((uint16_t)bestRemoteV4.port()); + rendezvousForDest.append((uint8_t)4); + rendezvousForDest.append(bestRemoteV4.rawIpData(),4); + + rendezvousForOtherEnd.append((uint16_t)bestDestV4.port()); + rendezvousForOtherEnd.append((uint8_t)4); + rendezvousForOtherEnd.append(bestDestV4.rawIpData(),4); + rendezvousForOtherEnd.setAt(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 4)); + } - if (haveMatch) { - _send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size()); - RR->sw->send(rendezvousForDest,true,0); + if (haveMatch) { + _send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size()); + RR->sw->send(rendezvousForDest,true,0); + } } } } - } - } break; - - case STATE_MESSAGE_PROXY_SEND: { - const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); - const Packet::Verb verb = (Packet::Verb)dmsg[ptr++]; - const unsigned int len = dmsg.at(ptr); ptr += 2; - Packet outp(rcpt,RR->identity.address(),verb); - outp.append(dmsg.field(ptr,len),len); - RR->sw->send(outp,true,0); - TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len); - } break; + } break; + + case STATE_MESSAGE_PROXY_SEND: { + const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); + const Packet::Verb verb = (Packet::Verb)dmsg[ptr++]; + const unsigned int len = dmsg.at(ptr); ptr += 2; + Packet outp(rcpt,RR->identity.address(),verb); + outp.append(dmsg.field(ptr,len),len); + RR->sw->send(outp,true,0); + TRACE("[%u] proxy send %s to %s length %u",(unsigned int)fromMemberId,Packet::verbString(verb),rcpt.toString().c_str(),len); + } break; + } + } catch ( ... ) { + TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype); + // drop invalids } - } catch ( ... ) { - TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype); - // drop invalids - } - ptr = nextPtr; + ptr = nextPtr; + } + } catch ( ... ) { + TRACE("invalid message (outer loop), discarding"); + // drop invalids } - } catch ( ... ) { - TRACE("invalid message (outer loop), discarding"); - // drop invalids } } -- cgit v1.2.3 From 29b966894cb401e6ce396d2c12d94ee90adb4ef7 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Oct 2015 13:03:34 -0700 Subject: (1) Fix bug in geo-ip service that prevented cache lookup, (2) fix problem in SelfAwareness (will need to test ALL versions in the wild with this), and (3) add more TRACE instrumentation to Cluster. --- node/Cluster.cpp | 15 ++++++++++----- node/Cluster.hpp | 2 +- node/SelfAwareness.cpp | 9 +++++---- node/SelfAwareness.hpp | 10 +++++----- service/ClusterGeoIpService.cpp | 10 ++++++++-- service/OneService.cpp | 2 ++ 6 files changed, 31 insertions(+), 17 deletions(-) (limited to 'node') diff --git a/node/Cluster.cpp b/node/Cluster.cpp index c943e62b..4088c967 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -202,7 +202,7 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len) } m.lastReceivedAliveAnnouncement = RR->node->now(); #ifdef ZT_TRACE - TRACE("[%u] I'm alive! peers may be redirected to: %s",(unsigned int)fromMemberId,addrs.c_str()); + TRACE("[%u] I'm alive! peers close to %d,%d,%d can be redirected to: %s",(unsigned int)fromMemberId,m.x,m.y,m.z,addrs.c_str()); #endif } break; @@ -406,10 +406,12 @@ bool Cluster::sendViaCluster(const Address &fromPeerAddress,const Address &toPee _send(canHasPeer,STATE_MESSAGE_RELAY,buf.data(),buf.size()); } + TRACE("sendViaCluster(): relaying %u bytes from %s to %s by way of %u",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str(),(unsigned int)canHasPeer); return true; + } else { + TRACE("sendViaCluster(): unable to relay %u bytes from %s to %s since no cluster members seem to have it!",len,fromPeerAddress.toString().c_str(),toPeerAddress.toString().c_str()); + return false; } - - return false; } void Cluster::replicateHavePeer(const Identity &peerId) @@ -564,11 +566,12 @@ bool Cluster::redirectPeer(const Address &peerAddress,const InetAddress &peerPhy { if (!peerPhysicalAddress) // sanity check return false; + if (_addressToLocationFunction) { // Pick based on location if it can be determined int px = 0,py = 0,pz = 0; if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast(&peerPhysicalAddress),&px,&py,&pz) == 0) { - // No geo-info so no change + TRACE("no geolocation available for %s",peerPhysicalAddress.toIpString().c_str()); return false; } @@ -578,6 +581,7 @@ bool Cluster::redirectPeer(const Address &peerAddress,const InetAddress &peerPhy const double currentDistance = _dist3d(_x,_y,_z,px,py,pz); double bestDistance = (offload ? 2147483648.0 : currentDistance); unsigned int bestMember = _id; + TRACE("%s is at %d,%d,%d -- looking for anyone closer than %d,%d,%d (%fkm)",peerPhysicalAddress.toString().c_str(),px,py,pz,_x,_y,_z,bestDistance); { Mutex::Lock _l(_memberIds_m); for(std::vector::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) { @@ -588,6 +592,7 @@ bool Cluster::redirectPeer(const Address &peerAddress,const InetAddress &peerPhy if ( ((now - m.lastReceivedAliveAnnouncement) < ZT_CLUSTER_TIMEOUT) && ((m.x != 0)||(m.y != 0)||(m.z != 0)) && (m.zeroTierPhysicalEndpoints.size() > 0) ) { double mdist = _dist3d(m.x,m.y,m.z,px,py,pz); if (mdist < bestDistance) { + bestDistance = mdist; bestMember = *mid; best = m.zeroTierPhysicalEndpoints; } @@ -596,7 +601,7 @@ bool Cluster::redirectPeer(const Address &peerAddress,const InetAddress &peerPhy } if (best.size() > 0) { - TRACE("peer %s is at [%d,%d,%d], distance to us is %f, sending to %u instead for better distance %f",peerAddress.toString().c_str(),px,py,pz,currentDistance,bestMember,bestDistance); + TRACE("%s seems closer to %u at %fkm, suggesting redirect...",peerAddress.toString().c_str(),bestMember,bestDistance); /* if (peer->remoteVersionProtocol() >= 5) { // If it's a newer peer send VERB_PUSH_DIRECT_PATHS which is more idiomatic diff --git a/node/Cluster.hpp b/node/Cluster.hpp index 6c9a2917..daa81185 100644 --- a/node/Cluster.hpp +++ b/node/Cluster.hpp @@ -145,7 +145,7 @@ public: STATE_MESSAGE_RELAY = 5, /** - * Request to send a packet to a locally-known peer: + * Request that a cluster member send a packet to a locally-known peer: * <[5] ZeroTier address of recipient> * <[1] packet verb> * <[2] length of packet payload> diff --git a/node/SelfAwareness.cpp b/node/SelfAwareness.cpp index 7329322a..1b70f17c 100644 --- a/node/SelfAwareness.cpp +++ b/node/SelfAwareness.cpp @@ -94,7 +94,7 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi Mutex::Lock _l(_phy_m); - PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter,scope)]; + PhySurfaceEntry &entry = _phy[PhySurfaceKey(reporter,reporterPhysicalAddress,scope)]; if ((now - entry.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT) { entry.mySurface = myPhysicalAddress; @@ -105,14 +105,15 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi entry.ts = now; TRACE("learned physical address %s for scope %u as seen from %s(%s) (replaced %s, resetting all in scope)",myPhysicalAddress.toString().c_str(),(unsigned int)scope,reporter.toString().c_str(),reporterPhysicalAddress.toString().c_str(),entry.mySurface.toString().c_str()); - // Erase all entries (other than this one) for this scope to prevent thrashing - // Note: we should probably not use 'entry' after this + // Erase all entries in this scope that were not reported by this remote address to prevent 'thrashing' + // due to multiple reports of endpoint change. + // Don't use 'entry' after this since hash table gets modified. { Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy); PhySurfaceKey *k = (PhySurfaceKey *)0; PhySurfaceEntry *e = (PhySurfaceEntry *)0; while (i.next(k,e)) { - if ((k->reporter != reporter)&&(k->scope == scope)) + if ((k->reporterPhysicalAddress != reporterPhysicalAddress)&&(k->scope == scope)) _phy.erase(*k); } } diff --git a/node/SelfAwareness.hpp b/node/SelfAwareness.hpp index 3133553e..400b05e6 100644 --- a/node/SelfAwareness.hpp +++ b/node/SelfAwareness.hpp @@ -69,14 +69,14 @@ private: struct PhySurfaceKey { Address reporter; + InetAddress reporterPhysicalAddress; InetAddress::IpScope scope; - inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); } - PhySurfaceKey() : reporter(),scope(InetAddress::IP_SCOPE_NONE) {} - PhySurfaceKey(const Address &r,InetAddress::IpScope s) : reporter(r),scope(s) {} - inline bool operator<(const PhySurfaceKey &k) const throw() { return ((reporter < k.reporter) ? true : ((reporter == k.reporter) ? ((int)scope < (int)k.scope) : false)); } - inline bool operator==(const PhySurfaceKey &k) const throw() { return ((reporter == k.reporter)&&(scope == k.scope)); } + PhySurfaceKey(const Address &r,const InetAddress &ra,InetAddress::IpScope s) : reporter(r),reporterPhysicalAddress(ra),scope(s) {} + + inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); } + inline bool operator==(const PhySurfaceKey &k) const throw() { return ((reporter == k.reporter)&&(reporterPhysicalAddress == k.reporterPhysicalAddress)&&(scope == k.scope)); } }; struct PhySurfaceEntry { diff --git a/service/ClusterGeoIpService.cpp b/service/ClusterGeoIpService.cpp index b47a9b2a..9baa7506 100644 --- a/service/ClusterGeoIpService.cpp +++ b/service/ClusterGeoIpService.cpp @@ -72,11 +72,14 @@ ClusterGeoIpService::~ClusterGeoIpService() bool ClusterGeoIpService::locate(const InetAddress &ip,int &x,int &y,int &z) { + InetAddress ipNoPort(ip); + ipNoPort.setPort(0); // we index cache by IP only const uint64_t now = OSUtils::now(); + bool r = false; { Mutex::Lock _l(_cache_m); - std::map< InetAddress,_CE >::iterator c(_cache.find(ip)); + std::map< InetAddress,_CE >::iterator c(_cache.find(ipNoPort)); if (c != _cache.end()) { x = c->second.x; y = c->second.y; @@ -90,8 +93,9 @@ bool ClusterGeoIpService::locate(const InetAddress &ip,int &x,int &y,int &z) { Mutex::Lock _l(_sOutputLock); if (_sOutputFd >= 0) { - std::string ips(ip.toIpString()); + std::string ips(ipNoPort.toIpString()); ips.push_back('\n'); + //fprintf(stderr,"ClusterGeoIpService: << %s",ips.c_str()); ::write(_sOutputFd,ips.data(),ips.length()); } } @@ -153,6 +157,7 @@ void ClusterGeoIpService::threadMain() if ((buf[i] == '\n')||(buf[i] == '\r')) { linebuf[lineptr] = (char)0; if (lineptr > 0) { + //fprintf(stderr,"ClusterGeoIpService: >> %s\n",linebuf); try { std::vector result(Utils::split(linebuf,",","","")); if ((result.size() >= 7)&&(result[1] == "1")) { @@ -163,6 +168,7 @@ void ClusterGeoIpService::threadMain() ce.x = (int)::strtol(result[4].c_str(),(char **)0,10); ce.y = (int)::strtol(result[5].c_str(),(char **)0,10); ce.z = (int)::strtol(result[6].c_str(),(char **)0,10); + //fprintf(stderr,"ClusterGeoIpService: %s is at %d,%d,%d\n",rip.toIpString().c_str(),ce.x,ce.y,ce.z); { Mutex::Lock _l2(_cache_m); _cache[rip] = ce; diff --git a/service/OneService.cpp b/service/OneService.cpp index a64d680b..729812ed 100644 --- a/service/OneService.cpp +++ b/service/OneService.cpp @@ -1473,6 +1473,7 @@ static int SnodeWirePacketSendFunction(ZT_Node *node,void *uptr,const struct soc static void SnodeVirtualNetworkFrameFunction(ZT_Node *node,void *uptr,uint64_t nwid,uint64_t sourceMac,uint64_t destMac,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len) { reinterpret_cast(uptr)->nodeVirtualNetworkFrameFunction(nwid,sourceMac,destMac,etherType,vlanId,data,len); } +#ifdef ZT_ENABLE_CLUSTER static void SclusterSendFunction(void *uptr,unsigned int toMemberId,const void *data,unsigned int len) { OneServiceImpl *const impl = reinterpret_cast(uptr); @@ -1485,6 +1486,7 @@ static int SclusterGeoIpFunction(void *uptr,const struct sockaddr_storage *addr, OneServiceImpl *const impl = reinterpret_cast(uptr); return (int)(impl->_clusterGeoIpService->locate(*(reinterpret_cast(addr)),*x,*y,*z)); } +#endif static void StapFrameHandler(void *uptr,uint64_t nwid,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len) { reinterpret_cast(uptr)->tapFrameHandler(nwid,from,to,etherType,vlanId,data,len); } -- cgit v1.2.3 From e6a63f5547f928f4908b7436210340c9db752284 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Oct 2015 13:57:02 -0700 Subject: Fix bug in setWorld that might have caused a peer entry for myself (which would never be used) --- node/Topology.cpp | 20 +++++++++++--------- node/Topology.hpp | 5 +++++ 2 files changed, 16 insertions(+), 9 deletions(-) (limited to 'node') diff --git a/node/Topology.cpp b/node/Topology.cpp index 88c8856c..adfcd1f9 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -358,16 +358,18 @@ void Topology::_setWorld(const World &newWorld) _rootAddresses.clear(); _rootPeers.clear(); for(std::vector::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) { - if (r->identity == RR->identity) - _amRoot = true; _rootAddresses.push_back(r->identity.address()); - SharedPtr *rp = _peers.get(r->identity.address()); - if (rp) { - _rootPeers.push_back(*rp); - } else if (r->identity.address() != RR->identity.address()) { - SharedPtr newrp(new Peer(RR->identity,r->identity)); - _peers.set(r->identity.address(),newrp); - _rootPeers.push_back(newrp); + if (r->identity.address() == RR->identity.address()) { + _amRoot = true; + } else { + SharedPtr *rp = _peers.get(r->identity.address()); + if (rp) { + _rootPeers.push_back(*rp); + } else { + SharedPtr newrp(new Peer(RR->identity,r->identity)); + _peers.set(r->identity.address(),newrp); + _rootPeers.push_back(newrp); + } } } } diff --git a/node/Topology.hpp b/node/Topology.hpp index 48e264a8..324ec000 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -225,6 +225,11 @@ public: return _peers.entries(); } + /** + * @return True if I am a root server in the current World + */ + inline bool amRoot() const throw() { return _amRoot; } + private: Identity _getIdentity(const Address &zta); void _setWorld(const World &newWorld); -- cgit v1.2.3 From e9648a6cdf5bd1d9f9e08c7cfef50265114c09d3 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Oct 2015 14:05:12 -0700 Subject: Clarify logic in pinging, and prevent roots from pinging "down." --- node/Node.cpp | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'node') diff --git a/node/Node.cpp b/node/Node.cpp index 6eea3d3d..c54d783f 100644 --- a/node/Node.cpp +++ b/node/Node.cpp @@ -211,8 +211,15 @@ public: } } - // If this is a network preferred relay, also always ping and if a stable endpoint is specified use that if not alive if (!upstream) { + // If I am a root server, only ping other root servers -- roots don't ping "down" + // since that would just be a waste of bandwidth and could potentially cause route + // flapping in Cluster mode. + if (RR->topology->amRoot()) + return; + + // Check for network preferred relays, also considered 'upstream' and thus always + // pinged to keep links up. If they have stable addresses we will try them there. for(std::vector< std::pair >::const_iterator r(_relays.begin());r!=_relays.end();++r) { if (r->first == p->address()) { if (r->second.ss_family == AF_INET) @@ -229,18 +236,22 @@ public: // "Upstream" devices are roots and relays and get special treatment -- they stay alive // forever and we try to keep (if available) both IPv4 and IPv6 channels open to them. bool needToContactIndirect = true; - if (!p->doPingAndKeepalive(RR,_now,AF_INET)) { + if (p->doPingAndKeepalive(RR,_now,AF_INET)) { + needToContactIndirect = false; + } else { if (stableEndpoint4) { needToContactIndirect = false; p->attemptToContactAt(RR,InetAddress(),stableEndpoint4,_now); } - } else needToContactIndirect = false; - if (!p->doPingAndKeepalive(RR,_now,AF_INET6)) { + } + if (p->doPingAndKeepalive(RR,_now,AF_INET6)) { + needToContactIndirect = false; + } else { if (stableEndpoint6) { needToContactIndirect = false; p->attemptToContactAt(RR,InetAddress(),stableEndpoint6,_now); } - } else needToContactIndirect = false; + } if (needToContactIndirect) { // If this is an upstream and we have no stable endpoint for either IPv4 or IPv6, -- cgit v1.2.3 From 35676217e8fea27d271bbc3b976165e1f8436da1 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Oct 2015 14:50:07 -0700 Subject: Refactor multicast group announcement to work directly or indirectly. --- node/Cluster.cpp | 6 ++- node/Network.cpp | 120 ++++++++++++++++++++++++++++++------------------------- node/Network.hpp | 5 ++- 3 files changed, 73 insertions(+), 58 deletions(-) (limited to 'node') diff --git a/node/Cluster.cpp b/node/Cluster.cpp index 4088c967..900804b7 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -449,11 +449,12 @@ void Cluster::replicateHavePeer(const Identity &peerId) void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group) { - Buffer<4096> buf; + Buffer<2048> buf; buf.append((uint64_t)nwid); peerAddress.appendTo(buf); group.mac().appendTo(buf); buf.append((uint32_t)group.adi()); + TRACE("replicating %s MULTICAST_LIKE %.16llx/%s/%u to all members",peerAddress.toString().c_str(),nwid,group.mac().toString().c_str(),(unsigned int)group.adi()); { Mutex::Lock _l(_memberIds_m); for(std::vector::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) { @@ -465,8 +466,9 @@ void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,co void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com) { - Buffer<4096> buf; + Buffer<2048> buf; com.serialize(buf); + TRACE("replicating %s COM for %.16llx to all members",com.issuedTo().toString().c_str(),com.networkId()); { Mutex::Lock _l(_memberIds_m); for(std::vector::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) { diff --git a/node/Network.cpp b/node/Network.cpp index 46f93241..cd30e386 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -144,7 +144,15 @@ void Network::multicastUnsubscribe(const MulticastGroup &mg) bool Network::tryAnnounceMulticastGroupsTo(const SharedPtr &peer) { Mutex::Lock _l(_lock); - return _tryAnnounceMulticastGroupsTo(RR->topology->rootAddresses(),_allMulticastGroups(),peer,RR->node->now()); + if ( + (_isAllowed(peer)) || + (peer->address() == this->controller()) || + (RR->topology->isRoot(peer->identity())) + ) { + _announceMulticastGroupsTo(peer->address(),_allMulticastGroups()); + return true; + } + return false; } bool Network::applyConfiguration(const SharedPtr &conf) @@ -400,77 +408,80 @@ bool Network::_isAllowed(const SharedPtr &peer) const return false; // default position on any failure } -bool Network::_tryAnnounceMulticastGroupsTo(const std::vector
&alwaysAddresses,const std::vector &allMulticastGroups,const SharedPtr &peer,uint64_t now) const -{ - // assumes _lock is locked - if ( - (_isAllowed(peer)) || - (peer->address() == this->controller()) || - (std::find(alwaysAddresses.begin(),alwaysAddresses.end(),peer->address()) != alwaysAddresses.end()) - ) { - - if ((_config)&&(_config->com())&&(!_config->isPublic())&&(peer->needsOurNetworkMembershipCertificate(_id,now,true))) { - Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE); - _config->com().serialize(outp); - outp.armor(peer->key(),true); - peer->send(RR,outp.data(),outp.size(),now); - } - - { - Packet outp(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE); - - for(std::vector::const_iterator mg(allMulticastGroups.begin());mg!=allMulticastGroups.end();++mg) { - if ((outp.size() + 18) >= ZT_UDP_DEFAULT_PAYLOAD_MTU) { - outp.armor(peer->key(),true); - peer->send(RR,outp.data(),outp.size(),now); - outp.reset(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE); - } - - // network ID, MAC, ADI - outp.append((uint64_t)_id); - mg->mac().appendTo(outp); - outp.append((uint32_t)mg->adi()); - } - - if (outp.size() > ZT_PROTO_MIN_PACKET_LENGTH) { - outp.armor(peer->key(),true); - peer->send(RR,outp.data(),outp.size(),now); - } - } - - return true; - } - return false; -} - -class _AnnounceMulticastGroupsToAll +class _GetPeersThatNeedMulticastAnnouncement { public: - _AnnounceMulticastGroupsToAll(const RuntimeEnvironment *renv,Network *nw) : + _GetPeersThatNeedMulticastAnnouncement(const RuntimeEnvironment *renv,Network *nw) : _now(renv->node->now()), + _controller(nw->controller()), _network(nw), - _rootAddresses(renv->topology->rootAddresses()), - _allMulticastGroups(nw->_allMulticastGroups()) + _rootAddresses(renv->topology->rootAddresses()) {} - - inline void operator()(Topology &t,const SharedPtr &p) { _network->_tryAnnounceMulticastGroupsTo(_rootAddresses,_allMulticastGroups,p,_now); } - + inline void operator()(Topology &t,const SharedPtr &p) + { + if ( + (_network->_isAllowed(p)) || + (p->address() == _controller) || + (std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end()) + ) { + peers.push_back(p->address()); + } + } + std::vector
peers; private: uint64_t _now; + Address _controller; Network *_network; std::vector
_rootAddresses; - std::vector _allMulticastGroups; }; void Network::_announceMulticastGroups() { // Assumes _lock is locked - _AnnounceMulticastGroupsToAll afunc(RR,this); - RR->topology->eachPeer<_AnnounceMulticastGroupsToAll &>(afunc); + + _GetPeersThatNeedMulticastAnnouncement gpfunc(RR,this); + RR->topology->eachPeer<_GetPeersThatNeedMulticastAnnouncement &>(gpfunc); + + std::vector allMulticastGroups(_allMulticastGroups()); + for(std::vector
::const_iterator pa(gpfunc.peers.begin());pa!=gpfunc.peers.end();++pa) + _announceMulticastGroupsTo(*pa,allMulticastGroups); +} + +void Network::_announceMulticastGroupsTo(const Address &peerAddress,const std::vector &allMulticastGroups) const +{ + // Assumes _lock is locked + + // We push COMs ahead of MULTICAST_LIKE since they're used for access control -- a COM is a public + // credential so "over-sharing" isn't really an issue (and we only do so with roots). + if ((_config)&&(_config->com())&&(!_config->isPublic())) { + Packet outp(peerAddress,RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE); + _config->com().serialize(outp); + RR->sw->send(outp,true,0); + } + + { + Packet outp(peerAddress,RR->identity.address(),Packet::VERB_MULTICAST_LIKE); + + for(std::vector::const_iterator mg(allMulticastGroups.begin());mg!=allMulticastGroups.end();++mg) { + if ((outp.size() + 18) >= ZT_UDP_DEFAULT_PAYLOAD_MTU) { + RR->sw->send(outp,true,0); + outp.reset(peerAddress,RR->identity.address(),Packet::VERB_MULTICAST_LIKE); + } + + // network ID, MAC, ADI + outp.append((uint64_t)_id); + mg->mac().appendTo(outp); + outp.append((uint32_t)mg->adi()); + } + + if (outp.size() > ZT_PROTO_MIN_PACKET_LENGTH) + RR->sw->send(outp,true,0); + } } std::vector Network::_allMulticastGroups() const { // Assumes _lock is locked + std::vector mgs; mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1); mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end()); @@ -479,6 +490,7 @@ std::vector Network::_allMulticastGroups() const mgs.push_back(Network::BROADCAST); std::sort(mgs.begin(),mgs.end()); mgs.erase(std::unique(mgs.begin(),mgs.end()),mgs.end()); + return mgs; } diff --git a/node/Network.hpp b/node/Network.hpp index f7939323..0effa8e2 100644 --- a/node/Network.hpp +++ b/node/Network.hpp @@ -56,7 +56,7 @@ namespace ZeroTier { class RuntimeEnvironment; class Peer; -class _AnnounceMulticastGroupsToAll; // internal function object in Network.cpp +class _GetPeersThatNeedMulticastAnnouncement; /** * A virtual LAN @@ -64,7 +64,7 @@ class _AnnounceMulticastGroupsToAll; // internal function object in Network.cpp class Network : NonCopyable { friend class SharedPtr; - friend class _AnnounceMulticastGroupsToAll; + friend class _GetPeersThatNeedMulticastAnnouncement; // internal function object public: /** @@ -344,6 +344,7 @@ private: bool _isAllowed(const SharedPtr &peer) const; bool _tryAnnounceMulticastGroupsTo(const std::vector
&rootAddresses,const std::vector &allMulticastGroups,const SharedPtr &peer,uint64_t now) const; void _announceMulticastGroups(); + void _announceMulticastGroupsTo(const Address &peerAddress,const std::vector &allMulticastGroups) const; std::vector _allMulticastGroups() const; const RuntimeEnvironment *RR; -- cgit v1.2.3 From d2b1dfe42465680e10e7971ba99279e5cacad207 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Oct 2015 15:51:50 -0700 Subject: Fully specify new network in alice-test, this will (with different identities) eventually become the World. --- node/Topology.cpp | 5 ----- world/alice-test/mkworld.cpp | 32 +++++++++++++++++++++++++++----- 2 files changed, 27 insertions(+), 10 deletions(-) (limited to 'node') diff --git a/node/Topology.cpp b/node/Topology.cpp index adfcd1f9..624c4987 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -35,14 +35,9 @@ namespace ZeroTier { -// Default World #define ZT_DEFAULT_WORLD_LENGTH 494 static const unsigned char ZT_DEFAULT_WORLD[ZT_DEFAULT_WORLD_LENGTH] = {0x01,0x00,0x00,0x00,0x00,0x08,0xea,0xc9,0x0a,0x00,0x00,0x01,0x4f,0xdf,0xbf,0xfc,0xbb,0x6c,0x7e,0x15,0x67,0x85,0x1b,0xb4,0x65,0x04,0x01,0xaf,0x56,0xbf,0xe7,0x63,0x9d,0x77,0xef,0xa4,0x1e,0x61,0x53,0x88,0xcb,0x8d,0x78,0xe5,0x47,0x38,0x98,0x5a,0x6c,0x8a,0xdd,0xe6,0x9c,0x65,0xdf,0x1a,0x80,0x63,0xce,0x2e,0x4d,0x48,0x24,0x3d,0x68,0x87,0x96,0x13,0x89,0xba,0x25,0x6f,0xc9,0xb0,0x9f,0x20,0xc5,0x4c,0x51,0x7b,0x30,0xb7,0x5f,0xba,0xca,0xa4,0xc5,0x48,0xa3,0x15,0xab,0x2f,0x1d,0x64,0xe8,0x04,0x42,0xb3,0x1c,0x51,0x8b,0x2a,0x04,0x01,0xf8,0xe1,0x81,0xaf,0x60,0x2f,0x70,0x3e,0xcd,0x0b,0x21,0x38,0x19,0x62,0x02,0xbd,0x0e,0x33,0x1d,0x0a,0x7b,0xf1,0xec,0xad,0xef,0x54,0xb3,0x7b,0x17,0x84,0xaa,0xda,0x0a,0x85,0x5d,0x0b,0x1c,0x05,0x83,0xb9,0x0e,0x3e,0xe3,0xb4,0xd1,0x8b,0x5b,0x64,0xf7,0xcf,0xe1,0xff,0x5d,0xc2,0x2a,0xcf,0x60,0x7b,0x09,0xb4,0xa3,0x86,0x3c,0x5a,0x7e,0x31,0xa0,0xc7,0xb4,0x86,0xe3,0x41,0x33,0x04,0x7e,0x19,0x87,0x6a,0xba,0x00,0x2a,0x6e,0x2b,0x23,0x18,0x93,0x0f,0x60,0xeb,0x09,0x7f,0x70,0xd0,0xf4,0xb0,0x28,0xb2,0xcd,0x6d,0x3d,0x0c,0x63,0xc0,0x14,0xb9,0x03,0x9f,0xf3,0x53,0x90,0xe4,0x11,0x81,0xf2,0x16,0xfb,0x2e,0x6f,0xa8,0xd9,0x5c,0x1e,0xe9,0x66,0x71,0x56,0x41,0x19,0x05,0xc3,0xdc,0xcf,0xea,0x78,0xd8,0xc6,0xdf,0xaf,0xba,0x68,0x81,0x70,0xb3,0xfa,0x00,0x01,0x04,0xc6,0xc7,0x61,0xdc,0x27,0x09,0x88,0x41,0x40,0x8a,0x2e,0x00,0xbb,0x1d,0x31,0xf2,0xc3,0x23,0xe2,0x64,0xe9,0xe6,0x41,0x72,0xc1,0xa7,0x4f,0x77,0x89,0x95,0x55,0xed,0x10,0x75,0x1c,0xd5,0x6e,0x86,0x40,0x5c,0xde,0x11,0x8d,0x02,0xdf,0xfe,0x55,0x5d,0x46,0x2c,0xcf,0x6a,0x85,0xb5,0x63,0x1c,0x12,0x35,0x0c,0x8d,0x5d,0xc4,0x09,0xba,0x10,0xb9,0x02,0x5d,0x0f,0x44,0x5c,0xf4,0x49,0xd9,0x2b,0x1c,0x00,0x01,0x04,0x6b,0xbf,0x2e,0xd2,0x27,0x09,0x8a,0xcf,0x05,0x9f,0xe3,0x00,0x48,0x2f,0x6e,0xe5,0xdf,0xe9,0x02,0x31,0x9b,0x41,0x9d,0xe5,0xbd,0xc7,0x65,0x20,0x9c,0x0e,0xcd,0xa3,0x8c,0x4d,0x6e,0x4f,0xcf,0x0d,0x33,0x65,0x83,0x98,0xb4,0x52,0x7d,0xcd,0x22,0xf9,0x31,0x12,0xfb,0x9b,0xef,0xd0,0x2f,0xd7,0x8b,0xf7,0x26,0x1b,0x33,0x3f,0xc1,0x05,0xd1,0x92,0xa6,0x23,0xca,0x9e,0x50,0xfc,0x60,0xb3,0x74,0xa5,0x00,0x01,0x04,0xa2,0xf3,0x4d,0x6f,0x27,0x09,0x9d,0x21,0x90,0x39,0xf3,0x00,0x01,0xf0,0x92,0x2a,0x98,0xe3,0xb3,0x4e,0xbc,0xbf,0xf3,0x33,0x26,0x9d,0xc2,0x65,0xd7,0xa0,0x20,0xaa,0xb6,0x9d,0x72,0xbe,0x4d,0x4a,0xcc,0x9c,0x8c,0x92,0x94,0x78,0x57,0x71,0x25,0x6c,0xd1,0xd9,0x42,0xa9,0x0d,0x1b,0xd1,0xd2,0xdc,0xa3,0xea,0x84,0xef,0x7d,0x85,0xaf,0xe6,0x61,0x1f,0xb4,0x3f,0xf0,0xb7,0x41,0x26,0xd9,0x0a,0x6e,0x00,0x01,0x04,0x80,0xc7,0xc5,0xd9,0x27,0x09}; -// ALICE-TEST -//#define ZT_DEFAULT_WORLD_LENGTH 257 -//static const unsigned char ZT_DEFAULT_WORLD[ZT_DEFAULT_WORLD_LENGTH] = {0x01,0x00,0x00,0x00,0x00,0x08,0xea,0xc9,0x0a,0x00,0x00,0x01,0x50,0x81,0x2a,0x54,0x6f,0x72,0xb0,0x3b,0xbe,0x73,0xda,0xbd,0xfb,0x85,0x77,0x9f,0xc9,0x2e,0x17,0xc8,0x11,0x6e,0xda,0x61,0x80,0xd1,0x41,0xcb,0x7c,0x2d,0x2b,0xa4,0x34,0x75,0x19,0x64,0x20,0x80,0x0a,0x22,0x32,0xf2,0x01,0x6c,0xfe,0x79,0xa6,0x7d,0xec,0x10,0x7e,0x03,0xf3,0xa2,0xa0,0x19,0xc8,0x7c,0xfd,0x6c,0x56,0x52,0xa8,0xfb,0xdc,0xfb,0x93,0x81,0x3e,0x63,0x8b,0xb3,0xb6,0x72,0x45,0xa9,0x81,0x81,0xcc,0xea,0x7f,0x2f,0xd9,0x59,0xce,0xc8,0x51,0x12,0xc3,0xe3,0x44,0x76,0x54,0xed,0xe7,0x8d,0x34,0x0b,0x5d,0x10,0x3d,0x52,0x04,0x9b,0xe1,0xb2,0x36,0x51,0x75,0x14,0x30,0x53,0xe8,0x4b,0xe4,0x91,0x9a,0xed,0x99,0x56,0xa3,0x8d,0x5e,0x14,0xff,0x66,0xd8,0x4f,0xf7,0x3c,0x23,0xbe,0x02,0xbb,0x1e,0xb6,0x7e,0x07,0xfa,0x7c,0x7e,0x50,0xe8,0x40,0xf9,0x37,0x70,0x1a,0x75,0xcf,0x19,0xe6,0x83,0xe1,0x5c,0x20,0x1d,0x1e,0x5b,0xe5,0x6a,0xbe,0xe7,0xab,0xec,0x01,0xd6,0xdd,0xca,0x6a,0xb5,0x00,0x4e,0x76,0x12,0x07,0xd8,0xb4,0x20,0x0b,0xe4,0x4f,0x47,0x8e,0x3d,0xa1,0x48,0xc1,0x60,0x99,0x11,0x0e,0xe7,0x1b,0x64,0x58,0x6d,0xda,0x11,0x8e,0x40,0x22,0xab,0x63,0x68,0x2c,0xe1,0x37,0xda,0x8b,0xa8,0x17,0xfc,0x7f,0x73,0xaa,0x31,0x63,0xf2,0xe3,0x33,0x93,0x3e,0x29,0x94,0xc4,0x6b,0x4f,0x41,0x19,0x30,0x7b,0xe8,0x85,0x5a,0x72,0x00,0x01,0x04,0xa9,0x39,0x8f,0x68,0x27,0x09}; - Topology::Topology(const RuntimeEnvironment *renv) : RR(renv), _amRoot(false) diff --git a/world/alice-test/mkworld.cpp b/world/alice-test/mkworld.cpp index e680b97e..8940db2c 100644 --- a/world/alice-test/mkworld.cpp +++ b/world/alice-test/mkworld.cpp @@ -133,13 +133,35 @@ int main(int argc,char **argv) std::sort(roots.back().stableEndpoints.begin(),roots.back().stableEndpoints.end()); #endif - // ALICE TEST + // NOTE -- these are temporary test identities -- this is not yet the 'real' network. + // (but these are the real nodes) + + // Alice -- global geo-clustered root #1 roots.push_back(World::Root()); roots.back().identity = Identity("d6ddca6ab5:0:4e761207d8b4200be44f478e3da148c16099110ee71b64586dda118e4022ab63682ce137da8ba817fc7f73aa3163f2e333933e2994c46b4f4119307be8855a72"); - roots.back().stableEndpoints.push_back(InetAddress("169.57.143.104/9993")); - std::sort(roots.back().stableEndpoints.begin(),roots.back().stableEndpoints.end()); - - std::sort(roots.begin(),roots.end()); + roots.back().stableEndpoints.push_back(InetAddress("188.166.94.177/9993")); // Amsterdam IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2a03:b0c0:2:d0::7d:1/9993")); // Amsterdam IPv6 + roots.back().stableEndpoints.push_back(InetAddress("159.203.97.171/9993")); // New York IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2604:a880:800:a1::54:6001/9993 ")); // New York IPv6 + roots.back().stableEndpoints.push_back(InetAddress("169.57.143.104/9993")); // Sao Paolo IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2607:f0d0:1d01:57::2/9993")); // Sao Paolo IPv6 + roots.back().stableEndpoints.push_back(InetAddress("104.238.182.83/9993")); // San Francisco IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2001:19f0:ac00:809:5400:ff:fe15:f3f4/9993")); // San Francisco IPv6 + roots.back().stableEndpoints.push_back(InetAddress("128.199.182.9/9993")); // Singapore IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2400:6180:0:d0::1b:1001/9993")); // Singapore IPv6 + + // Bob -- global geo-clustered root #2 + roots.back().identity = Identity("16ebbd6c5d:0:47d39bca9d0a5cf70148e39f6c45199e17e0e32e4e46cac01ae5bcb21224137b097f40bdd982a921c3aabdcb9ada8b4f2bb0593753bfdb21cf12eac28c8d9042"); + roots.back().stableEndpoints.push_back(InetAddress("45.33.4.67/9993")); // Dallas IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2600:3c00::f03c:91ff:fe67:b704/9993")); // Dallas IPv6 + roots.back().stableEndpoints.push_back(InetAddress("139.162.157.243/9993")); // Frankfurt (Germany) IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2a01:7e01::f03c:91ff:fe67:3ffd/9993")); // Frankfurt (Germany) IPv6 + roots.back().stableEndpoints.push_back(InetAddress("45.32.246.179/9993")); // Sydney IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2001:19f0:5800:8bf8:5400:ff:fe15:b39a/9993")); // Sydney IPv6 + roots.back().stableEndpoints.push_back(InetAddress("45.32.248.87/9993")); // Tokyo IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2001:19f0:7000:9bc9:5400:00ff:fe15:c4f5/9993")); // Tokyo IPv6 + roots.back().stableEndpoints.push_back(InetAddress("159.203.2.154/9993")); // Toronto IPv4 + roots.back().stableEndpoints.push_back(InetAddress("2604:a880:cad:d0::26:7001/9993")); // Toronto IPv6 const uint64_t id = ZT_WORLD_ID_EARTH; const uint64_t ts = OSUtils::now(); -- cgit v1.2.3 From 3ce5ad9e2c1a31e1a3cc34f0ac8d950e4f0cf7b4 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Mon, 26 Oct 2015 10:42:30 -0700 Subject: For forward compatibility, add minimal parse for some future physical address types. --- node/InetAddress.hpp | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'node') diff --git a/node/InetAddress.hpp b/node/InetAddress.hpp index 50db272a..ecafcf51 100644 --- a/node/InetAddress.hpp +++ b/node/InetAddress.hpp @@ -409,6 +409,16 @@ struct InetAddress : public sockaddr_storage switch(b[p++]) { case 0: return 1; + case 0x01: + // TODO: Ethernet address (but accept for forward compatibility) + return 7; + case 0x02: + // TODO: Bluetooth address (but accept for forward compatibility) + return 7; + case 0x03: + // TODO: Other address types (but accept for forward compatibility) + // These could be extended/optional things like AF_UNIX, LTE Direct, shared memory, etc. + return (unsigned int)(b.template at(p) + 3); // other addresses begin with 16-bit non-inclusive length case 0x04: ss_family = AF_INET; memcpy(&(reinterpret_cast(this)->sin_addr.s_addr),b.field(p,4),4); p += 4; -- cgit v1.2.3 From 865acfa40f65626f41bbd718b645f57c7d6db9b2 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Mon, 26 Oct 2015 12:41:08 -0700 Subject: Cluster status plumbing. --- include/ZeroTierOne.h | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++ node/Cluster.cpp | 56 +++++++++++++++++++++++++++++++++++++ node/Cluster.hpp | 7 +++++ node/Topology.cpp | 15 ++++++++++ node/Topology.hpp | 5 ++++ 5 files changed, 160 insertions(+) (limited to 'node') diff --git a/include/ZeroTierOne.h b/include/ZeroTierOne.h index 4de4a765..86bffcdf 100644 --- a/include/ZeroTierOne.h +++ b/include/ZeroTierOne.h @@ -133,6 +133,11 @@ extern "C" { */ #define ZT_CLUSTER_MAX_MEMBERS 128 +/** + * Maximum number of physical ZeroTier addresses a cluster member can report + */ +#define ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES 16 + /** * Maximum allowed cluster message length in bytes */ @@ -879,6 +884,78 @@ typedef struct { unsigned int nextHopCount; } ZT_CircuitTestReport; +/** + * A cluster member's status + */ +typedef struct { + /** + * This cluster member's ID (from 0 to 1-ZT_CLUSTER_MAX_MEMBERS) + */ + unsigned int id; + + /** + * Number of milliseconds since last 'alive' heartbeat message received via cluster backplane address + */ + unsigned int msSinceLastHeartbeat; + + /** + * Non-zero if cluster member is alive + */ + int alive; + + /** + * X, Y, and Z coordinates of this member (if specified, otherwise zero) + * + * What these mean depends on the location scheme being used for + * location-aware clustering. At present this is GeoIP and these + * will be the X, Y, and Z coordinates of the location on a spherical + * approximation of Earth where Earth's core is the origin (in km). + * They don't have to be perfect and need only be comparable with others + * to find shortest path via the standard vector distance formula. + */ + int x,y,z; + + /** + * Cluster member's last reported load + */ + uint64_t load; + + /** + * Number of peers this cluster member "has" + */ + uint64_t peers; + + /** + * Physical ZeroTier endpoints for this member (where peers are sent when directed here) + */ + struct sockaddr_storage zeroTierPhysicalEndpoints[ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES]; + + /** + * Number of physical ZeroTier endpoints this member is announcing + */ + unsigned int numZeroTierPhysicalEndpoints; +} ZT_ClusterMemberStatus; + +/** + * ZeroTier cluster status + */ +typedef struct { + /** + * My cluster member ID (a record for 'self' is included in member[]) + */ + unsigned int myId; + + /** + * Number of cluster members + */ + unsigned int clusterSize; + + /** + * Cluster member statuses + */ + ZT_ClusterMemberStatus member[ZT_CLUSTER_MAX_MEMBERS]; +} ZT_ClusterStatus; + /** * An instance of a ZeroTier One node (opaque) */ diff --git a/node/Cluster.cpp b/node/Cluster.cpp index 900804b7..47794214 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -638,6 +638,62 @@ bool Cluster::redirectPeer(const Address &peerAddress,const InetAddress &peerPhy } } +void Cluster::status(ZT_ClusterStatus &status) const +{ + const uint64_t now = RR->node->now(); + memset(&status,0,sizeof(ZT_ClusterStatus)); + ZT_ClusterMemberStatus *ms[ZT_CLUSTER_MAX_MEMBERS]; + memset(ms,0,sizeof(ms)); + + status.myId = _id; + + ms[_id] = &(status.member[status.clusterSize++]); + ms[_id]->id = _id; + ms[_id]->alive = 1; + ms[_id]->x = _x; + ms[_id]->y = _y; + ms[_id]->z = _z; + ms[_id]->peers = RR->topology->countAlive(); + for(std::vector::const_iterator ep(_zeroTierPhysicalEndpoints.begin());ep!=_zeroTierPhysicalEndpoints.end();++ep) { + if (ms[_id]->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check + break; + memcpy(&(ms[_id]->zeroTierPhysicalEndpoints[ms[_id]->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage)); + } + + { + Mutex::Lock _l1(_memberIds_m); + for(std::vector::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) { + if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check + break; + ZT_ClusterMemberStatus *s = ms[*mid] = &(status.member[status.clusterSize++]); + _Member &m = _members[*mid]; + Mutex::Lock ml(m.lock); + + s->id = *mid; + s->msSinceLastHeartbeat = (unsigned int)std::min((uint64_t)(~((unsigned int)0)),(now - m.lastReceivedAliveAnnouncement)); + s->alive = (s->msSinceLastHeartbeat < ZT_CLUSTER_TIMEOUT) ? 1 : 0; + s->x = m.x; + s->y = m.y; + s->z = m.z; + s->load = m.load; + for(std::vector::const_iterator ep(m.zeroTierPhysicalEndpoints.begin());ep!=m.zeroTierPhysicalEndpoints.end();++ep) { + if (s->numZeroTierPhysicalEndpoints >= ZT_CLUSTER_MAX_ZT_PHYSICAL_ADDRESSES) // sanity check + break; + memcpy(&(s->zeroTierPhysicalEndpoints[s->numZeroTierPhysicalEndpoints++]),&(*ep),sizeof(struct sockaddr_storage)); + } + } + } + + { + Mutex::Lock _l2(_peerAffinities_m); + for(std::vector<_PeerAffinity>::const_iterator pi(_peerAffinities.begin());pi!=_peerAffinities.end();++pi) { + unsigned int mid = pi->clusterMemberId(); + if ((ms[mid])&&(mid != _id)&&((now - pi->timestamp) < ZT_PEER_ACTIVITY_TIMEOUT)) + ++ms[mid]->peers; + } + } +} + void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len) { if ((len + 3) > (ZT_CLUSTER_MAX_MESSAGE_LENGTH - (24 + 2 + 2))) // sanity check diff --git a/node/Cluster.hpp b/node/Cluster.hpp index daa81185..be346659 100644 --- a/node/Cluster.hpp +++ b/node/Cluster.hpp @@ -254,6 +254,13 @@ public: */ bool redirectPeer(const Address &peerAddress,const InetAddress &peerPhysicalAddress,bool offload); + /** + * Fill out ZT_ClusterStatus structure (from core API) + * + * @param status Reference to structure to hold result (anything there is replaced) + */ + void status(ZT_ClusterStatus &status) const; + private: void _send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len); void _flush(uint16_t memberId); diff --git a/node/Topology.cpp b/node/Topology.cpp index 624c4987..e56d1f47 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -332,6 +332,21 @@ void Topology::clean(uint64_t now) } } +unsigned long Topology::countAlive() const +{ + const uint64_t now = RR->node->now(); + unsigned long cnt = 0; + Mutex::Lock _l(_lock); + Hashtable< Address,SharedPtr >::Iterator i(const_cast(this)->_peers); + Address *a = (Address *)0; + SharedPtr *p = (SharedPtr *)0; + while (i.next(a,p)) { + if ((*p)->alive(now)) + ++cnt; + } + return cnt; +} + Identity Topology::_getIdentity(const Address &zta) { char p[128]; diff --git a/node/Topology.hpp b/node/Topology.hpp index 324ec000..ee9827b9 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -192,6 +192,11 @@ public: */ void clean(uint64_t now); + /** + * @return Number of 'alive' peers + */ + unsigned long countAlive() const; + /** * Apply a function or function object to all peers * -- cgit v1.2.3 From 5ff7733f84302bbd0ce0a578b3040122d1140f0f Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Mon, 26 Oct 2015 12:49:17 -0700 Subject: More plumbing of cluster status. --- include/ZeroTierOne.h | 11 +++++++++++ node/Node.cpp | 48 +++++++++++++++++++----------------------------- node/Node.hpp | 1 + 3 files changed, 31 insertions(+), 29 deletions(-) (limited to 'node') diff --git a/include/ZeroTierOne.h b/include/ZeroTierOne.h index 86bffcdf..68c1e4d7 100644 --- a/include/ZeroTierOne.h +++ b/include/ZeroTierOne.h @@ -1516,6 +1516,17 @@ void ZT_Node_clusterRemoveMember(ZT_Node *node,unsigned int memberId); */ void ZT_Node_clusterHandleIncomingMessage(ZT_Node *node,const void *msg,unsigned int len); +/** + * Get the current status of the cluster from this node's point of view + * + * Calling this without clusterInit() or without cluster support will just + * zero out the structure and show a cluster size of zero. + * + * @param node Node instance + * @param cs Cluster status structure to fill with data + */ +void ZT_Node_clusterStatus(ZT_Node *node,ZT_ClusterStatus *cs); + /** * Get ZeroTier One version * diff --git a/node/Node.cpp b/node/Node.cpp index c54d783f..2b298903 100644 --- a/node/Node.cpp +++ b/node/Node.cpp @@ -636,6 +636,18 @@ void Node::clusterHandleIncomingMessage(const void *msg,unsigned int len) #endif } +void Node::clusterStatus(ZT_ClusterStatus *cs) +{ + if (!cs) + return; +#ifdef ZT_ENABLE_CLUSTER + if (RR->cluster) + RR->cluster->status(*cs); + else +#endif + memset(cs,0,sizeof(ZT_ClusterStatus)); +} + /****************************************************************************/ /* Node methods used only within node/ */ /****************************************************************************/ @@ -947,15 +959,6 @@ enum ZT_ResultCode ZT_Node_clusterInit( } } -/** - * Add a member to this cluster - * - * Calling this without having called clusterInit() will do nothing. - * - * @param node Node instance - * @param memberId Member ID (must be less than or equal to ZT_CLUSTER_MAX_MEMBERS) - * @return OK or error if clustering is disabled, ID invalid, etc. - */ enum ZT_ResultCode ZT_Node_clusterAddMember(ZT_Node *node,unsigned int memberId) { try { @@ -965,14 +968,6 @@ enum ZT_ResultCode ZT_Node_clusterAddMember(ZT_Node *node,unsigned int memberId) } } -/** - * Remove a member from this cluster - * - * Calling this without having called clusterInit() will do nothing. - * - * @param node Node instance - * @param memberId Member ID to remove (nothing happens if not present) - */ void ZT_Node_clusterRemoveMember(ZT_Node *node,unsigned int memberId) { try { @@ -980,18 +975,6 @@ void ZT_Node_clusterRemoveMember(ZT_Node *node,unsigned int memberId) } catch ( ... ) {} } -/** - * Handle an incoming cluster state message - * - * The message itself contains cluster member IDs, and invalid or badly - * addressed messages will be silently discarded. - * - * Calling this without having called clusterInit() will do nothing. - * - * @param node Node instance - * @param msg Cluster message - * @param len Length of cluster message - */ void ZT_Node_clusterHandleIncomingMessage(ZT_Node *node,const void *msg,unsigned int len) { try { @@ -999,6 +982,13 @@ void ZT_Node_clusterHandleIncomingMessage(ZT_Node *node,const void *msg,unsigned } catch ( ... ) {} } +void ZT_Node_clusterStatus(ZT_Node *node,ZT_ClusterStatus *cs) +{ + try { + reinterpret_cast(node)->clusterStatus(cs); + } catch ( ... ) {} +} + void ZT_version(int *major,int *minor,int *revision,unsigned long *featureFlags) { if (major) *major = ZEROTIER_ONE_VERSION_MAJOR; diff --git a/node/Node.hpp b/node/Node.hpp index b8bd4dc5..4094a79e 100644 --- a/node/Node.hpp +++ b/node/Node.hpp @@ -124,6 +124,7 @@ public: ZT_ResultCode clusterAddMember(unsigned int memberId); void clusterRemoveMember(unsigned int memberId); void clusterHandleIncomingMessage(const void *msg,unsigned int len); + void clusterStatus(ZT_ClusterStatus *cs); // Internal functions ------------------------------------------------------ -- cgit v1.2.3 From debed1ac2dce5822df234ce92bf4692ff1a081db Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Mon, 26 Oct 2015 13:06:10 -0700 Subject: Expose cluster status in /status JSON response. --- include/ZeroTierOne.h | 2 +- node/Cluster.cpp | 4 ++-- service/ControlPlane.cpp | 34 ++++++++++++++++++++++++++++++++-- 3 files changed, 35 insertions(+), 5 deletions(-) (limited to 'node') diff --git a/include/ZeroTierOne.h b/include/ZeroTierOne.h index 68c1e4d7..7af4f760 100644 --- a/include/ZeroTierOne.h +++ b/include/ZeroTierOne.h @@ -953,7 +953,7 @@ typedef struct { /** * Cluster member statuses */ - ZT_ClusterMemberStatus member[ZT_CLUSTER_MAX_MEMBERS]; + ZT_ClusterMemberStatus members[ZT_CLUSTER_MAX_MEMBERS]; } ZT_ClusterStatus; /** diff --git a/node/Cluster.cpp b/node/Cluster.cpp index 47794214..9d25593a 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -647,7 +647,7 @@ void Cluster::status(ZT_ClusterStatus &status) const status.myId = _id; - ms[_id] = &(status.member[status.clusterSize++]); + ms[_id] = &(status.members[status.clusterSize++]); ms[_id]->id = _id; ms[_id]->alive = 1; ms[_id]->x = _x; @@ -665,7 +665,7 @@ void Cluster::status(ZT_ClusterStatus &status) const for(std::vector::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) { if (status.clusterSize >= ZT_CLUSTER_MAX_MEMBERS) // sanity check break; - ZT_ClusterMemberStatus *s = ms[*mid] = &(status.member[status.clusterSize++]); + ZT_ClusterMemberStatus *s = ms[*mid] = &(status.members[status.clusterSize++]); _Member &m = _members[*mid]; Mutex::Lock ml(m.lock); diff --git a/service/ControlPlane.cpp b/service/ControlPlane.cpp index 7affb08c..9770db90 100644 --- a/service/ControlPlane.cpp +++ b/service/ControlPlane.cpp @@ -354,8 +354,36 @@ unsigned int ControlPlane::handleRequest( if (ps[0] == "status") { responseContentType = "application/json"; + ZT_NodeStatus status; _node->status(&status); + + std::string clusterJson; +#ifdef ZT_ENABLE_CLUSTER + { + ZT_ClusterStatus cs; + _node->clusterStatus(&cs); + + char t[4096]; + Utils::snprintf(t,sizeof(t),"{\n\t\t\"myId\": %u,\n\t\t\"clusterSize\": %u,\n\t\t\"members: [\n",cs.myId,cs.clusterSize); + clusterJson.append(t); + for(unsigned int i=0;i 0) ? clusterJson.c_str() : "null")); responseBody = json; scode = 200; } else if (ps[0] == "config") { -- cgit v1.2.3