summaryrefslogtreecommitdiff
path: root/node
diff options
context:
space:
mode:
authorGrant Limberg <glimberg@gmail.com>2015-06-26 17:44:09 -0700
committerGrant Limberg <glimberg@gmail.com>2015-06-26 17:44:09 -0700
commit9c26d10ea645dc1727813ea72a63f526cf0757d0 (patch)
tree25ae8e785f07f93f187a34abe204c423613a009b /node
parentbfb152f53f528934583ee76437453a005610a7ea (diff)
parent3f71afd0fbb2d87a2c9288166299600da51470dc (diff)
downloadinfinitytier-9c26d10ea645dc1727813ea72a63f526cf0757d0.tar.gz
infinitytier-9c26d10ea645dc1727813ea72a63f526cf0757d0.zip
Merge branch 'adamierymenko-dev' into android-jni
Diffstat (limited to 'node')
-rw-r--r--node/BandwidthAccount.hpp153
-rw-r--r--node/Constants.hpp6
-rw-r--r--node/IncomingPacket.cpp16
-rw-r--r--node/Multicaster.cpp51
-rw-r--r--node/Network.cpp50
-rw-r--r--node/Network.hpp11
-rw-r--r--node/NetworkConfig.cpp31
-rw-r--r--node/NetworkConfig.hpp33
-rw-r--r--node/Node.cpp42
-rw-r--r--node/Node.hpp24
-rw-r--r--node/Packet.hpp2
-rw-r--r--node/Peer.cpp6
-rw-r--r--node/SelfAwareness.cpp2
-rw-r--r--node/Switch.cpp32
-rw-r--r--node/Topology.cpp109
-rw-r--r--node/Topology.hpp73
16 files changed, 180 insertions, 461 deletions
diff --git a/node/BandwidthAccount.hpp b/node/BandwidthAccount.hpp
deleted file mode 100644
index 3a6432c4..00000000
--- a/node/BandwidthAccount.hpp
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * ZeroTier One - Network Virtualization Everywhere
- * Copyright (C) 2011-2015 ZeroTier, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * --
- *
- * ZeroTier may be used and distributed under the terms of the GPLv3, which
- * are available at: http://www.gnu.org/licenses/gpl-3.0.html
- *
- * If you would like to embed ZeroTier into a commercial application or
- * redistribute it in a modified binary form, please contact ZeroTier Networks
- * LLC. Start here: http://www.zerotier.com/
- */
-
-#ifndef ZT_BWACCOUNT_HPP
-#define ZT_BWACCOUNT_HPP
-
-#include "Constants.hpp"
-
-#include <algorithm>
-
-#include <stdint.h>
-#include <math.h>
-
-#include "Utils.hpp"
-
-#ifdef __WINDOWS__
-#define round(x) ((x-floor(x))>0.5 ? ceil(x) : floor(x))
-#endif
-
-namespace ZeroTier {
-
-/**
- * Bandwidth account used for rate limiting multicast groups
- *
- * This is used to apply a bank account model to multicast groups. Each
- * multicast packet counts against a balance, which accrues at a given
- * rate in bytes per second. Debt is possible. These parameters are
- * configurable.
- *
- * A bank account model permits bursting behavior, which correctly models
- * how OSes and apps typically use multicast. It's common for things to
- * spew lots of multicast messages at once, wait a while, then do it
- * again. A consistent bandwidth limit model doesn't fit.
- */
-class BandwidthAccount
-{
-public:
- /**
- * Create an uninitialized account
- *
- * init() must be called before this is used.
- */
- BandwidthAccount() throw() {}
-
- /**
- * Create and initialize
- *
- * @param preload Initial balance to place in account
- * @param maxb Maximum allowed balance (> 0)
- * @param acc Rate of accrual in bytes per second
- * @param now Current time
- */
- BandwidthAccount(uint32_t preload,uint32_t maxb,uint32_t acc,uint64_t now)
- throw()
- {
- init(preload,maxb,acc,now);
- }
-
- /**
- * Initialize or re-initialize account
- *
- * @param preload Initial balance to place in account
- * @param maxb Maximum allowed balance (> 0)
- * @param acc Rate of accrual in bytes per second
- * @param now Current time
- */
- inline void init(uint32_t preload,uint32_t maxb,uint32_t acc,uint64_t now)
- throw()
- {
- _lastTime = ((double)now / 1000.0);
- _balance = preload;
- _maxBalance = maxb;
- _accrual = acc;
- }
-
- /**
- * Update and retrieve balance of this account
- *
- * @param now Current time
- * @return New balance updated from current clock
- */
- inline uint32_t update(uint64_t now)
- throw()
- {
- double lt = _lastTime;
- double nowf = ((double)now / 1000.0);
- _lastTime = nowf;
- return (_balance = std::min(_maxBalance,(uint32_t)round((double)_balance + ((double)_accrual * (nowf - lt)))));
- }
-
- /**
- * Update balance and conditionally deduct
- *
- * If the deduction amount fits, it is deducted after update. Otherwise
- * balance is updated and false is returned.
- *
- * @param amt Amount to deduct
- * @param now Current time
- * @return True if amount fit within balance and was deducted
- */
- inline bool deduct(uint32_t amt,uint64_t now)
- throw()
- {
- if (update(now) >= amt) {
- _balance -= amt;
- return true;
- }
- return false;
- }
-
- /**
- * @return Most recent balance without update
- */
- inline uint32_t balance() const
- throw()
- {
- return _balance;
- }
-
-private:
- double _lastTime;
- uint32_t _balance;
- uint32_t _maxBalance;
- uint32_t _accrual;
-};
-
-} // namespace ZeroTier
-
-#endif
diff --git a/node/Constants.hpp b/node/Constants.hpp
index aced6fe7..ac9dbc99 100644
--- a/node/Constants.hpp
+++ b/node/Constants.hpp
@@ -254,7 +254,7 @@
/**
* Delay between scans of the topology active peer DB for peers that need ping
*
- * This is also how often pings will be retried to upstream peers (rootservers)
+ * This is also how often pings will be retried to upstream peers (relays, roots)
* constantly until something is heard.
*/
#define ZT_PING_CHECK_INVERVAL 6250
@@ -279,9 +279,9 @@
*
* When we send something (including frames), we generally expect a response.
* Switching relays if no response in a short period of time causes more
- * rapid failover if a rootserver goes down or becomes unreachable. In the
+ * rapid failover if a root server goes down or becomes unreachable. In the
* mistaken case, little harm is done as it'll pick the next-fastest
- * rootserver and will switch back eventually.
+ * root server and will switch back eventually.
*/
#define ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD 10000
diff --git a/node/IncomingPacket.cpp b/node/IncomingPacket.cpp
index 8f95b9ba..7e2bcdaa 100644
--- a/node/IncomingPacket.cpp
+++ b/node/IncomingPacket.cpp
@@ -110,7 +110,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
case Packet::ERROR_OBJ_NOT_FOUND:
if (inReVerb == Packet::VERB_WHOIS) {
- if (RR->topology->isRootserver(peer->address()))
+ if (RR->topology->isRoot(peer->identity()))
RR->sw->cancelWhoisRequest(Address(field(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH));
} else if (inReVerb == Packet::VERB_NETWORK_CONFIG_REQUEST) {
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)));
@@ -128,7 +128,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
break;
case Packet::ERROR_IDENTITY_COLLISION:
- if (RR->topology->isRootserver(peer->address()))
+ if (RR->topology->isRoot(peer->identity()))
RR->node->postEvent(ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
break;
@@ -268,7 +268,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
bool trusted = false;
- if (RR->topology->isRootserver(id.address())) {
+ if (RR->topology->isRoot(id)) {
RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
trusted = true;
}
@@ -353,7 +353,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
bool trusted = false;
- if (RR->topology->isRootserver(peer->address())) {
+ if (RR->topology->isRoot(peer->identity())) {
RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
trusted = true;
}
@@ -362,10 +362,10 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
} break;
case Packet::VERB_WHOIS: {
- // Right now only rootservers are allowed to send OK(WHOIS) to prevent
- // poisoning attacks. Further decentralization will require some other
- // kind of trust mechanism.
- if (RR->topology->isRootserver(peer->address())) {
+ /* Right now only root servers are allowed to send OK(WHOIS) to prevent
+ * poisoning attacks. Further decentralization will require some other
+ * kind of trust mechanism. */
+ if (RR->topology->isRoot(peer->identity())) {
const Identity id(*this,ZT_PROTO_VERB_WHOIS__OK__IDX_IDENTITY);
if (id.locallyValidate())
RR->sw->doAnythingWaitingForPeer(RR->topology->addPeer(SharedPtr<Peer>(new Peer(RR->identity,id))));
diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp
index 0cc4fb87..77ea2e66 100644
--- a/node/Multicaster.cpp
+++ b/node/Multicaster.cpp
@@ -216,7 +216,7 @@ void Multicaster::send(
if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY) {
gs.lastExplicitGather = now;
- SharedPtr<Peer> sn(RR->topology->getBestRootserver());
+ SharedPtr<Peer> sn(RR->topology->getBestRoot());
if (sn) {
TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
@@ -269,51 +269,6 @@ void Multicaster::send(
// Free allocated memory buffer if any
if (indexes != idxbuf)
delete [] indexes;
-
-#ifdef ZT_SUPPORT_LEGACY_MULTICAST
- // This sends a P5 multicast up to our rootserver, who then
- // redistributes it manually down to all <1.0.0 peers for
- // legacy support. These peers don't support the new multicast
- // frame type, so even if they receive it they will ignore it.
- {
- SharedPtr<Peer> sn(RR->topology->getBestRootserver());
- if (sn) {
- uint32_t rn = RR->prng->next32();
- Packet outp(sn->address(),RR->identity.address(),Packet::VERB_P5_MULTICAST_FRAME);
-
- outp.append((uint16_t)0xffff); // do not forward
- outp.append((unsigned char)0,320 + 1024); // empty queue and bloom filter
-
- outp.append((unsigned char)((com) ? ZT_PROTO_VERB_P5_MULTICAST_FRAME_FLAGS_HAS_MEMBERSHIP_CERTIFICATE : 0));
- outp.append((uint64_t)nwid);
- outp.append((uint16_t)0);
- outp.append((unsigned char)0);
- outp.append((unsigned char)0);
- RR->identity.address().appendTo(outp);
- outp.append((const void *)&rn,3); // random multicast ID
- if (src)
- src.appendTo(outp);
- else MAC(RR->identity.address(),nwid).appendTo(outp);
- mg.mac().appendTo(outp);
- outp.append((uint32_t)mg.adi());
- outp.append((uint16_t)etherType);
- outp.append((uint16_t)len);
- outp.append(data,len);
- unsigned int signedPortionLen = outp.size() - ZT_PROTO_VERB_P5_MULTICAST_FRAME_IDX__START_OF_SIGNED_PORTION;
-
- C25519::Signature sig(RR->identity.sign(outp.field(ZT_PROTO_VERB_P5_MULTICAST_FRAME_IDX__START_OF_SIGNED_PORTION,signedPortionLen),signedPortionLen));
-
- outp.append((uint16_t)sig.size());
- outp.append(sig.data,(unsigned int)sig.size());
-
- if (com) com->serialize(outp);
-
- outp.compress();
- outp.armor(sn->key(),true);
- sn->send(RR,outp.data(),outp.size(),now);
- }
- }
-#endif // ZT_SUPPORT_LEGACY_MULTICAST
}
void Multicaster::clean(uint64_t now)
@@ -372,9 +327,9 @@ void Multicaster::_add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,Multi
//TRACE("..MC %s joined multicast group %.16llx/%s via %s",member.toString().c_str(),nwid,mg.toString().c_str(),((learnedFrom) ? learnedFrom.toString().c_str() : "(direct)"));
for(std::list<OutboundMulticast>::iterator tx(gs.txQueue.begin());tx!=gs.txQueue.end();) {
- if (tx->atLimit()) {
+ if (tx->atLimit())
gs.txQueue.erase(tx++);
- } else {
+ else {
tx->sendIfNew(RR,member);
if (tx->atLimit())
gs.txQueue.erase(tx++);
diff --git a/node/Network.cpp b/node/Network.cpp
index 60262cd5..c072e978 100644
--- a/node/Network.cpp
+++ b/node/Network.cpp
@@ -286,18 +286,28 @@ void Network::addMembershipCertificate(const CertificateOfMembership &cert,bool
return;
}
- SharedPtr<Peer> signer(RR->topology->getPeer(cert.signedBy()));
+ if (cert.signedBy() == RR->identity.address()) {
+ // We are the controller: RR->identity.address() == controller() == cert.signedBy()
+ // So, verify that we signed th cert ourself
+ if (!cert.verify(RR->identity)) {
+ TRACE("rejected network membership certificate for %.16llx self signed by %s: signature check failed",(unsigned long long)_id,cert.signedBy().toString().c_str());
+ return;
+ }
+ } else {
- if (!signer) {
- // This would be rather odd, since this is our controller... could happen
- // if we get packets before we've gotten config.
- RR->sw->requestWhois(cert.signedBy());
- return;
- }
+ SharedPtr<Peer> signer(RR->topology->getPeer(cert.signedBy()));
- if (!cert.verify(signer->identity())) {
- TRACE("rejected network membership certificate for %.16llx signed by %s: signature check failed",(unsigned long long)_id,cert.signedBy().toString().c_str());
- return;
+ if (!signer) {
+ // This would be rather odd, since this is our controller... could happen
+ // if we get packets before we've gotten config.
+ RR->sw->requestWhois(cert.signedBy());
+ return;
+ }
+
+ if (!cert.verify(signer->identity())) {
+ TRACE("rejected network membership certificate for %.16llx signed by %s: signature check failed",(unsigned long long)_id,cert.signedBy().toString().c_str());
+ return;
+ }
}
}
@@ -357,20 +367,6 @@ void Network::clean()
}
}
-bool Network::updateAndCheckMulticastBalance(const MulticastGroup &mg,unsigned int bytes)
-{
- const uint64_t now = RR->node->now();
- Mutex::Lock _l(_lock);
- if (!_config)
- return false;
- std::map< MulticastGroup,BandwidthAccount >::iterator bal(_multicastRateAccounts.find(mg));
- if (bal == _multicastRateAccounts.end()) {
- NetworkConfig::MulticastRate r(_config->multicastRate(mg));
- bal = _multicastRateAccounts.insert(std::pair< MulticastGroup,BandwidthAccount >(mg,BandwidthAccount(r.preload,r.maxBalance,r.accrual,now))).first;
- }
- return bal->second.deduct(bytes,now);
-}
-
void Network::learnBridgeRoute(const MAC &mac,const Address &addr)
{
Mutex::Lock _l(_lock);
@@ -518,13 +514,13 @@ public:
RR(renv),
_now(renv->node->now()),
_network(nw),
- _rootserverAddresses(renv->topology->rootserverAddresses()),
+ _rootAddresses(renv->topology->rootAddresses()),
_allMulticastGroups(nw->_allMulticastGroups())
{}
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
{
- if ( ( (p->hasActiveDirectPath(_now)) && (_network->_isAllowed(p->address())) ) || (std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),p->address()) != _rootserverAddresses.end()) ) {
+ if ( ( (p->hasActiveDirectPath(_now)) && (_network->_isAllowed(p->address())) ) || (std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end()) ) {
Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
for(std::vector<MulticastGroup>::iterator mg(_allMulticastGroups.begin());mg!=_allMulticastGroups.end();++mg) {
@@ -551,7 +547,7 @@ private:
const RuntimeEnvironment *RR;
uint64_t _now;
Network *_network;
- std::vector<Address> _rootserverAddresses;
+ std::vector<Address> _rootAddresses;
std::vector<MulticastGroup> _allMulticastGroups;
};
diff --git a/node/Network.hpp b/node/Network.hpp
index 7976d901..daa4554e 100644
--- a/node/Network.hpp
+++ b/node/Network.hpp
@@ -47,7 +47,6 @@
#include "MulticastGroup.hpp"
#include "MAC.hpp"
#include "Dictionary.hpp"
-#include "BandwidthAccount.hpp"
#include "Multicaster.hpp"
#include "NetworkConfig.hpp"
#include "CertificateOfMembership.hpp"
@@ -238,15 +237,6 @@ public:
}
/**
- * Update and check multicast rate balance for a multicast group
- *
- * @param mg Multicast group
- * @param bytes Size of packet
- * @return True if packet is within budget
- */
- bool updateAndCheckMulticastBalance(const MulticastGroup &mg,unsigned int bytes);
-
- /**
* Get current network config or throw exception
*
* This version never returns null. Instead it throws a runtime error if
@@ -370,7 +360,6 @@ private:
std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to including those behind us (updated periodically)
std::map< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each
- std::map< MulticastGroup,BandwidthAccount > _multicastRateAccounts;
std::map<MAC,Address> _remoteBridgeRoutes; // remote addresses where given MACs are reachable
diff --git a/node/NetworkConfig.cpp b/node/NetworkConfig.cpp
index 5ed1dd3f..8d682947 100644
--- a/node/NetworkConfig.cpp
+++ b/node/NetworkConfig.cpp
@@ -32,10 +32,6 @@
namespace ZeroTier {
-// This is fast enough for things like Apple's mDNS spam, so it should serve
-// as a good default for your average network.
-const NetworkConfig::MulticastRate NetworkConfig::DEFAULT_MULTICAST_RATE(40000,60000,80);
-
SharedPtr<NetworkConfig> NetworkConfig::createTestNetworkConfig(const Address &self)
{
SharedPtr<NetworkConfig> nc(new NetworkConfig());
@@ -85,18 +81,6 @@ std::vector<unsigned int> NetworkConfig::allowedEtherTypes() const
return ets;
}
-const NetworkConfig::MulticastRate &NetworkConfig::multicastRate(const MulticastGroup &mg) const
- throw()
-{
- std::map<MulticastGroup,MulticastRate>::const_iterator r(_multicastRates.find(mg));
- if (r == _multicastRates.end()) {
- r = _multicastRates.find(MulticastGroup()); // zero MG signifies network's default rate
- if (r == _multicastRates.end())
- return DEFAULT_MULTICAST_RATE; // neither specific nor default found in network config
- }
- return r->second;
-}
-
void NetworkConfig::_fromDictionary(const Dictionary &d)
{
static const std::string zero("0");
@@ -181,13 +165,6 @@ void NetworkConfig::_fromDictionary(const Dictionary &d)
std::sort(_activeBridges.begin(),_activeBridges.end());
std::unique(_activeBridges.begin(),_activeBridges.end());
- Dictionary multicastRateEntries(d.get(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_RATES,std::string()));
- for(Dictionary::const_iterator i(multicastRateEntries.begin());i!=multicastRateEntries.end();++i) {
- std::vector<std::string> params(Utils::split(i->second.c_str(),",","",""));
- if (params.size() >= 3)
- _multicastRates[MulticastGroup(i->first)] = MulticastRate(Utils::hexStrToUInt(params[0].c_str()),Utils::hexStrToUInt(params[1].c_str()),Utils::hexStrToUInt(params[2].c_str()));
- }
-
std::vector<std::string> relaysSplit(Utils::split(d.get(ZT_NETWORKCONFIG_DICT_KEY_RELAYS,"").c_str(),",","",""));
for(std::vector<std::string>::const_iterator r(relaysSplit.begin());r!=relaysSplit.end();++r) {
std::size_t semi(r->find(';')); // address;ip/port,...
@@ -221,14 +198,6 @@ bool NetworkConfig::operator==(const NetworkConfig &nc) const
if (_gateways != nc._gateways) return false;
if (_activeBridges != nc._activeBridges) return false;
if (_relays != nc._relays) return false;
- if (_multicastRates.size() == nc._multicastRates.size()) {
- // uclibc++ doesn't seem to implement map<> != map<> correctly, so do
- // it ourselves. Note that this depends on the maps being sorted.
- for(std::map<MulticastGroup,MulticastRate>::const_iterator a(_multicastRates.begin()),b(nc._multicastRates.begin());a!=_multicastRates.end();++a,++b) {
- if ((a->first != b->first)||(a->second != b->second))
- return false;
- }
- } else return false;
if (_com != nc._com) return false;
return true;
}
diff --git a/node/NetworkConfig.hpp b/node/NetworkConfig.hpp
index 2fb56d6a..75395fd5 100644
--- a/node/NetworkConfig.hpp
+++ b/node/NetworkConfig.hpp
@@ -68,9 +68,6 @@ namespace ZeroTier {
// integer(hex)
#define ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT "ml"
-// dictionary of one or more of: MAC/ADI=preload,maxbalance,accrual
-#define ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_RATES "mr"
-
// 0/1
#define ZT_NETWORKCONFIG_DICT_KEY_PRIVATE "p"
@@ -115,27 +112,6 @@ class NetworkConfig
public:
/**
- * Tuple of multicast rate parameters
- */
- struct MulticastRate
- {
- MulticastRate() throw() {}
- MulticastRate(uint32_t pl,uint32_t maxb,uint32_t acc) throw() : preload(pl),maxBalance(maxb),accrual(acc) {}
-
- uint32_t preload;
- uint32_t maxBalance;
- uint32_t accrual;
-
- inline bool operator==(const MulticastRate &mr) const { return ((preload == mr.preload)&&(maxBalance == mr.maxBalance)&&(accrual == mr.accrual)); }
- inline bool operator!=(const MulticastRate &mr) const { return (!(*this == mr)); }
- };
-
- /**
- * A hard-coded default multicast rate for networks that don't specify
- */
- static const MulticastRate DEFAULT_MULTICAST_RATE;
-
- /**
* Create an instance of a NetworkConfig for the test network ID
*
* The test network ID is defined as ZT_TEST_NETWORK_ID. This is a
@@ -176,7 +152,6 @@ public:
inline uint64_t revision() const throw() { return _revision; }
inline const Address &issuedTo() const throw() { return _issuedTo; }
inline unsigned int multicastLimit() const throw() { return _multicastLimit; }
- inline const std::map<MulticastGroup,MulticastRate> &multicastRates() const throw() { return _multicastRates; }
inline bool allowPassiveBridging() const throw() { return _allowPassiveBridging; }
inline bool isPublic() const throw() { return (!_private); }
inline bool isPrivate() const throw() { return _private; }
@@ -198,13 +173,6 @@ public:
return ( (_allowPassiveBridging) || (std::find(_activeBridges.begin(),_activeBridges.end(),fromPeer) != _activeBridges.end()) );
}
- /**
- * @param mg Multicast group
- * @return Multicast rate or DEFAULT_MULTICAST_RATE if not set
- */
- const MulticastRate &multicastRate(const MulticastGroup &mg) const
- throw();
-
bool operator==(const NetworkConfig &nc) const;
inline bool operator!=(const NetworkConfig &nc) const { return (!(*this == nc)); }
@@ -229,7 +197,6 @@ private:
std::vector<InetAddress> _gateways;
std::vector<Address> _activeBridges;
std::vector< std::pair<Address,InetAddress> > _relays;
- std::map<MulticastGroup,MulticastRate> _multicastRates;
CertificateOfMembership _com;
AtomicCounter __refCount;
diff --git a/node/Node.cpp b/node/Node.cpp
index 1f6d474c..85011434 100644
--- a/node/Node.cpp
+++ b/node/Node.cpp
@@ -133,9 +133,7 @@ Node::Node(
if (!rt.size())
rt.fromString(ZT_DEFAULTS.defaultRootTopology);
}
- Dictionary rootservers(rt.get("rootservers",""));
- rootservers.update(rt.get("supernodes",""));
- RR->topology->setRootservers(rootservers);
+ RR->topology->setRootServers(Dictionary(rt.get("rootservers","")));
postEvent(ZT1_EVENT_UP);
}
@@ -143,7 +141,7 @@ Node::Node(
Node::~Node()
{
Mutex::Lock _l(_networks_m);
- _networks.clear();
+ _networks.clear(); // ensure that networks are destroyed before shutdown
delete RR->sa;
delete RR->topology;
delete RR->antiRec;
@@ -191,7 +189,7 @@ public:
RR(renv),
_now(now),
_relays(relays),
- _rootservers(RR->topology->rootserverAddresses())
+ _rootAddresses(RR->topology->rootAddresses())
{
}
@@ -207,7 +205,7 @@ public:
}
}
- if ((isRelay)||(std::find(_rootservers.begin(),_rootservers.end(),p->address()) != _rootservers.end())) {
+ if ((isRelay)||(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end())) {
p->doPingAndKeepalive(RR,_now);
if (p->lastReceive() > lastReceiveFromUpstream)
lastReceiveFromUpstream = p->lastReceive();
@@ -221,7 +219,7 @@ private:
const RuntimeEnvironment *RR;
uint64_t _now;
const std::vector< std::pair<Address,InetAddress> > &_relays;
- std::vector<Address> _rootservers;
+ std::vector<Address> _rootAddresses;
};
ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
@@ -238,7 +236,7 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
std::vector< SharedPtr<Network> > needConfig;
{
Mutex::Lock _l(_networks_m);
- for(std::map< uint64_t,SharedPtr<Network> >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
+ for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
SharedPtr<NetworkConfig> nc(n->second->config2());
if (((now - n->second->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!nc))
needConfig.push_back(n->second);
@@ -262,7 +260,7 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
}
}
- // Ping living or rootserver/relay peers
+ // Ping living or root server/relay peers
_PingPeersThatNeedPing pfunc(RR,now,networkRelays);
RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
@@ -312,20 +310,22 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
ZT1_ResultCode Node::join(uint64_t nwid)
{
Mutex::Lock _l(_networks_m);
- SharedPtr<Network> &nwe = _networks[nwid];
- if (!nwe)
- nwe = SharedPtr<Network>(new Network(RR,nwid));
+ SharedPtr<Network> nw = _network(nwid);
+ if(!nw)
+ _networks.push_back(std::pair< uint64_t,SharedPtr<Network> >(nwid,SharedPtr<Network>(new Network(RR,nwid))));
+ std::sort(_networks.begin(),_networks.end()); // will sort by nwid since it's the first in a pair<>
return ZT1_RESULT_OK;
}
ZT1_ResultCode Node::leave(uint64_t nwid)
{
+ std::vector< std::pair< uint64_t,SharedPtr<Network> > > newn;
Mutex::Lock _l(_networks_m);
- std::map< uint64_t,SharedPtr<Network> >::iterator nw(_networks.find(nwid));
- if (nw != _networks.end()) {
- nw->second->destroy();
- _networks.erase(nw);
+ for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
+ if (n->first != nwid)
+ newn.push_back(*n);
}
+ _networks.swap(newn);
return ZT1_RESULT_OK;
}
@@ -386,7 +386,7 @@ ZT1_PeerList *Node::peers() const
p->versionRev = -1;
}
p->latency = pi->second->latency();
- p->role = RR->topology->isRootserver(pi->second->address()) ? ZT1_PEER_ROLE_ROOTSERVER : ZT1_PEER_ROLE_LEAF;
+ p->role = RR->topology->isRoot(pi->second->identity()) ? ZT1_PEER_ROLE_ROOT : ZT1_PEER_ROLE_LEAF;
std::vector<Path> paths(pi->second->paths());
Path *bestPath = pi->second->getBestPath(_now);
@@ -408,10 +408,10 @@ ZT1_PeerList *Node::peers() const
ZT1_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
{
Mutex::Lock _l(_networks_m);
- std::map< uint64_t,SharedPtr<Network> >::const_iterator nw(_networks.find(nwid));
- if (nw != _networks.end()) {
+ SharedPtr<Network> nw = _network(nwid);
+ if(nw) {
ZT1_VirtualNetworkConfig *nc = (ZT1_VirtualNetworkConfig *)::malloc(sizeof(ZT1_VirtualNetworkConfig));
- nw->second->externalConfig(nc);
+ nw->externalConfig(nc);
return nc;
}
return (ZT1_VirtualNetworkConfig *)0;
@@ -428,7 +428,7 @@ ZT1_VirtualNetworkList *Node::networks() const
nl->networks = (ZT1_VirtualNetworkConfig *)(buf + sizeof(ZT1_VirtualNetworkList));
nl->networkCount = 0;
- for(std::map< uint64_t,SharedPtr<Network> >::const_iterator n(_networks.begin());n!=_networks.end();++n)
+ for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n)
n->second->externalConfig(&(nl->networks[nl->networkCount++]));
return nl;
diff --git a/node/Node.hpp b/node/Node.hpp
index 1d9372e4..2d2898b5 100644
--- a/node/Node.hpp
+++ b/node/Node.hpp
@@ -155,19 +155,19 @@ public:
len);
}
- inline SharedPtr<Network> network(uint64_t nwid)
+ inline SharedPtr<Network> network(uint64_t nwid) const
{
Mutex::Lock _l(_networks_m);
- std::map< uint64_t,SharedPtr<Network> >::iterator nw(_networks.find(nwid));
- return ((nw == _networks.end()) ? SharedPtr<Network>() : nw->second);
+ return _network(nwid);
}
inline std::vector< SharedPtr<Network> > allNetworks() const
{
- Mutex::Lock _l(_networks_m);
std::vector< SharedPtr<Network> > nw;
- for(std::map< uint64_t,SharedPtr<Network> >::const_iterator n(_networks.begin());n!=_networks.end();++n)
- nw.push_back(n->second);
+ Mutex::Lock _l(_networks_m);
+ nw.reserve(_networks.size());
+ for(std::vector< std::pair< uint64_t, SharedPtr<Network> > >::const_iterator i=_networks.begin();i!=_networks.end();++i)
+ nw.push_back(i->second);
return nw;
}
@@ -208,6 +208,16 @@ public:
#endif
private:
+ inline SharedPtr<Network> _network(uint64_t nwid) const
+ {
+ // assumes _networks_m is locked
+ for(std::vector< std::pair< uint64_t, SharedPtr<Network> > >::const_iterator i=_networks.begin();i!=_networks.end();++i) {
+ if (i->first == nwid)
+ return i->second;
+ }
+ return SharedPtr<Network>();
+ }
+
RuntimeEnvironment _RR;
RuntimeEnvironment *RR;
@@ -223,7 +233,7 @@ private:
//Dictionary _localConfig; // persisted as local.conf
//Mutex _localConfig_m;
- std::map< uint64_t,SharedPtr<Network> > _networks;
+ std::vector< std::pair< uint64_t, SharedPtr<Network> > > _networks;
Mutex _networks_m;
Mutex _backgroundTasksLock;
diff --git a/node/Packet.hpp b/node/Packet.hpp
index 21f8ca57..49201b71 100644
--- a/node/Packet.hpp
+++ b/node/Packet.hpp
@@ -626,7 +626,7 @@ public:
* [... additional tuples of network/address/adi ...]
*
* LIKEs are sent to peers with whom you have a direct peer to peer
- * connection, and always including rootservers.
+ * connection, and always including root servers.
*
* OK/ERROR are not generated.
*/
diff --git a/node/Peer.cpp b/node/Peer.cpp
index 3093ef41..96caa72c 100644
--- a/node/Peer.cpp
+++ b/node/Peer.cpp
@@ -122,16 +122,16 @@ void Peer::received(
/* Announce multicast groups of interest to direct peers if they are
* considered authorized members of a given network. Also announce to
- * rootservers and network controllers. */
+ * root servers and network controllers. */
if ((pathIsConfirmed)&&((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000))) {
_lastAnnouncedTo = now;
- const bool isRootserver = RR->topology->isRootserver(_id.address());
+ const bool isRoot = RR->topology->isRoot(_id);
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
const std::vector< SharedPtr<Network> > networks(RR->node->allNetworks());
for(std::vector< SharedPtr<Network> >::const_iterator n(networks.begin());n!=networks.end();++n) {
- if ( (isRootserver) || ((*n)->isAllowed(_id.address())) ) {
+ if ( (isRoot) || ((*n)->isAllowed(_id.address())) ) {
const std::vector<MulticastGroup> mgs((*n)->allMulticastGroups());
for(std::vector<MulticastGroup>::const_iterator mg(mgs.begin());mg!=mgs.end();++mg) {
if ((outp.size() + 18) > ZT_UDP_DEFAULT_PAYLOAD_MTU) {
diff --git a/node/SelfAwareness.cpp b/node/SelfAwareness.cpp
index 5fc8be2a..9f7c41d7 100644
--- a/node/SelfAwareness.cpp
+++ b/node/SelfAwareness.cpp
@@ -118,7 +118,7 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
// For all peers for whom we forgot an address, send a packet indirectly if
// they are still considered alive so that we will re-establish direct links.
- SharedPtr<Peer> sn(RR->topology->getBestRootserver());
+ SharedPtr<Peer> sn(RR->topology->getBestRoot());
if (sn) {
Path *snp = sn->getBestPath(now);
if (snp) {
diff --git a/node/Switch.cpp b/node/Switch.cpp
index 3ac0b920..236c1e66 100644
--- a/node/Switch.cpp
+++ b/node/Switch.cpp
@@ -145,12 +145,6 @@ void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,c
if (fromBridged)
network->learnBridgedMulticastGroup(mg,RR->node->now());
- // Check multicast/broadcast bandwidth quotas and reject if quota exceeded
- if (!network->updateAndCheckMulticastBalance(mg,len)) {
- TRACE("%.16llx: didn't multicast %u bytes, quota exceeded for multicast group %s",network->id(),len,mg.toString().c_str());
- return;
- }
-
//TRACE("%.16llx: MULTICAST %s -> %s %s %u",network->id(),from.toString().c_str(),mg.toString().c_str(),etherTypeName(etherType),len);
RR->mc->send(
@@ -320,8 +314,8 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
* P2 in randomized order in terms of which gets sent first. This is done
* since in a few cases NAT-t can be sensitive to slight timing differences
* in terms of when the two peers initiate. Normally this is accounted for
- * by the nearly-simultaneous RENDEZVOUS kickoff from the rootserver, but
- * given that rootservers are hosted on cloud providers this can in some
+ * by the nearly-simultaneous RENDEZVOUS kickoff from the relay, but
+ * given that relay are hosted on cloud providers this can in some
* cases have a few ms of latency between packet departures. By randomizing
* the order we make each attempted NAT-t favor one or the other going
* first, meaning if it doesn't succeed the first time it might the second
@@ -565,8 +559,8 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
// It wouldn't hurt anything, just redundant and unnecessary.
SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
if ((!relayTo)||(!relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now()))) {
- // Don't know peer or no direct path -- so relay via rootserver
- relayTo = RR->topology->getBestRootserver();
+ // Don't know peer or no direct path -- so relay via root server
+ relayTo = RR->topology->getBestRoot();
if (relayTo)
relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now());
}
@@ -641,8 +635,8 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
if ((relayTo)&&((relayTo->send(RR,packet->data(),packet->size(),RR->node->now())))) {
unite(source,destination,false);
} else {
- // Don't know peer or no direct path -- so relay via rootserver
- relayTo = RR->topology->getBestRootserver(&source,1,true);
+ // Don't know peer or no direct path -- so relay via root server
+ relayTo = RR->topology->getBestRoot(&source,1,true);
if (relayTo)
relayTo->send(RR,packet->data(),packet->size(),RR->node->now());
}
@@ -712,13 +706,13 @@ void Switch::_handleBeacon(const InetAddress &fromAddr,const Buffer<ZT_PROTO_BEA
Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
{
- SharedPtr<Peer> rootserver(RR->topology->getBestRootserver(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
- if (rootserver) {
- Packet outp(rootserver->address(),RR->identity.address(),Packet::VERB_WHOIS);
+ SharedPtr<Peer> root(RR->topology->getBestRoot(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
+ if (root) {
+ Packet outp(root->address(),RR->identity.address(),Packet::VERB_WHOIS);
addr.appendTo(outp);
- outp.armor(rootserver->key(),true);
- if (rootserver->send(RR,outp.data(),outp.size(),RR->node->now()))
- return rootserver->address();
+ outp.armor(root->key(),true);
+ if (root->send(RR,outp.data(),outp.size(),RR->node->now()))
+ return root->address();
}
return Address();
}
@@ -752,7 +746,7 @@ bool Switch::_trySend(const Packet &packet,bool encrypt,uint64_t nwid)
}
if (!relay)
- relay = RR->topology->getBestRootserver();
+ relay = RR->topology->getBestRoot();
if (!(relay)||(!(viaPath = relay->getBestPath(now))))
return false;
diff --git a/node/Topology.cpp b/node/Topology.cpp
index cfa6749c..2b1cc31f 100644
--- a/node/Topology.cpp
+++ b/node/Topology.cpp
@@ -36,7 +36,7 @@ namespace ZeroTier {
Topology::Topology(const RuntimeEnvironment *renv) :
RR(renv),
- _amRootserver(false)
+ _amRoot(false)
{
}
@@ -44,16 +44,16 @@ Topology::~Topology()
{
}
-void Topology::setRootservers(const std::map< Identity,std::vector<InetAddress> > &sn)
+void Topology::setRootServers(const std::map< Identity,std::vector<InetAddress> > &sn)
{
Mutex::Lock _l(_lock);
- if (_rootservers == sn)
+ if (_roots == sn)
return; // no change
- _rootservers = sn;
- _rootserverAddresses.clear();
- _rootserverPeers.clear();
+ _roots = sn;
+ _rootAddresses.clear();
+ _rootPeers.clear();
const uint64_t now = RR->node->now();
for(std::map< Identity,std::vector<InetAddress> >::const_iterator i(sn.begin());i!=sn.end();++i) {
@@ -64,17 +64,17 @@ void Topology::setRootservers(const std::map< Identity,std::vector<InetAddress>
for(std::vector<InetAddress>::const_iterator j(i->second.begin());j!=i->second.end();++j)
p->addPath(Path(*j,true));
p->use(now);
- _rootserverPeers.push_back(p);
+ _rootPeers.push_back(p);
}
- _rootserverAddresses.push_back(i->first.address());
+ _rootAddresses.push_back(i->first.address());
}
- std::sort(_rootserverAddresses.begin(),_rootserverAddresses.end());
+ std::sort(_rootAddresses.begin(),_rootAddresses.end());
- _amRootserver = (_rootservers.find(RR->identity) != _rootservers.end());
+ _amRoot = (_roots.find(RR->identity) != _roots.end());
}
-void Topology::setRootservers(const Dictionary &sn)
+void Topology::setRootServers(const Dictionary &sn)
{
std::map< Identity,std::vector<InetAddress> > m;
for(Dictionary::const_iterator d(sn.begin());d!=sn.end();++d) {
@@ -86,11 +86,11 @@ void Topology::setRootservers(const Dictionary &sn)
if (udp.length() > 0)
a.push_back(InetAddress(udp));
} catch ( ... ) {
- TRACE("rootserver list contained invalid entry for: %s",d->first.c_str());
+ TRACE("root server list contained invalid entry for: %s",d->first.c_str());
}
}
}
- this->setRootservers(m);
+ this->setRootServers(m);
}
SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
@@ -141,28 +141,28 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
return SharedPtr<Peer>();
}
-SharedPtr<Peer> Topology::getBestRootserver(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
+SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
{
- SharedPtr<Peer> bestRootserver;
+ SharedPtr<Peer> bestRoot;
const uint64_t now = RR->node->now();
Mutex::Lock _l(_lock);
- if (_amRootserver) {
- /* If I am a rootserver, the "best" rootserver is the one whose address
+ if (_amRoot) {
+ /* If I am a root server, the "best" root server is the one whose address
* is numerically greater than mine (with wrap at top of list). This
* causes packets searching for a route to pretty much literally
* circumnavigate the globe rather than bouncing between just two. */
- if (_rootserverAddresses.size() > 1) { // gotta be one other than me for this to work
- std::vector<Address>::const_iterator sna(std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),RR->identity.address()));
- if (sna != _rootserverAddresses.end()) { // sanity check -- _amRootserver should've been false in this case
+ if (_rootAddresses.size() > 1) { // gotta be one other than me for this to work
+ std::vector<Address>::const_iterator sna(std::find(_rootAddresses.begin(),_rootAddresses.end(),RR->identity.address()));
+ if (sna != _rootAddresses.end()) { // sanity check -- _amRoot should've been false in this case
for(;;) {
- if (++sna == _rootserverAddresses.end())
- sna = _rootserverAddresses.begin(); // wrap around at end
+ if (++sna == _rootAddresses.end())
+ sna = _rootAddresses.begin(); // wrap around at end
if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.find(*sna));
if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) {
- bestRootserver = p->second;
+ bestRoot = p->second;
break;
}
}
@@ -170,80 +170,87 @@ SharedPtr<Peer> Topology::getBestRootserver(const Address *avoid,unsigned int av
}
}
} else {
- /* If I am not a rootserver, the best rootserver is the active one with
+ /* If I am not a root server, the best root server is the active one with
* the lowest latency. */
- unsigned int l,bestRootserverLatency = 65536;
+ unsigned int l,bestLatency = 65536;
uint64_t lds,ldr;
- // First look for a best rootserver by comparing latencies, but exclude
- // rootservers that have not responded to direct messages in order to
+ // First look for a best root by comparing latencies, but exclude
+ // root servers that have not responded to direct messages in order to
// try to exclude any that are dead or unreachable.
- for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootserverPeers.begin());sn!=_rootserverPeers.end();) {
+ for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootPeers.begin());sn!=_rootPeers.end();) {
// Skip explicitly avoided relays
for(unsigned int i=0;i<avoidCount;++i) {
if (avoid[i] == (*sn)->address())
- goto keep_searching_for_rootservers;
+ goto keep_searching_for_roots;
}
// Skip possibly comatose or unreachable relays
lds = (*sn)->lastDirectSend();
ldr = (*sn)->lastDirectReceive();
if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
- goto keep_searching_for_rootservers;
+ goto keep_searching_for_roots;
if ((*sn)->hasActiveDirectPath(now)) {
l = (*sn)->latency();
- if (bestRootserver) {
- if ((l)&&(l < bestRootserverLatency)) {
- bestRootserverLatency = l;
- bestRootserver = *sn;
+ if (bestRoot) {
+ if ((l)&&(l < bestLatency)) {
+ bestLatency = l;
+ bestRoot = *sn;
}
} else {
if (l)
- bestRootserverLatency = l;
- bestRootserver = *sn;
+ bestLatency = l;
+ bestRoot = *sn;
}
}
-keep_searching_for_rootservers:
+keep_searching_for_roots:
++sn;
}
- if (bestRootserver) {
- bestRootserver->use(now);
- return bestRootserver;
+ if (bestRoot) {
+ bestRoot->use(now);
+ return bestRoot;
} else if (strictAvoid)
return SharedPtr<Peer>();
// If we have nothing from above, just pick one without avoidance criteria.
- for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootserverPeers.begin();sn!=_rootserverPeers.end();++sn) {
+ for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootPeers.begin();sn!=_rootPeers.end();++sn) {
if ((*sn)->hasActiveDirectPath(now)) {
unsigned int l = (*sn)->latency();
- if (bestRootserver) {
- if ((l)&&(l < bestRootserverLatency)) {
- bestRootserverLatency = l;
- bestRootserver = *sn;
+ if (bestRoot) {
+ if ((l)&&(l < bestLatency)) {
+ bestLatency = l;
+ bestRoot = *sn;
}
} else {
if (l)
- bestRootserverLatency = l;
- bestRootserver = *sn;
+ bestLatency = l;
+ bestRoot = *sn;
}
}
}
}
- if (bestRootserver)
- bestRootserver->use(now);
- return bestRootserver;
+ if (bestRoot)
+ bestRoot->use(now);
+ return bestRoot;
+}
+
+bool Topology::isRoot(const Identity &id) const
+ throw()
+{
+ Mutex::Lock _l(_lock);
+ return (_roots.count(id) != 0);
}
void Topology::clean(uint64_t now)
{
Mutex::Lock _l(_lock);
for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();) {
- if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),p->first) == _rootserverAddresses.end())) {
+ if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->first) == _rootAddresses.end())) {
_activePeers.erase(p++);
} else ++p;
}
diff --git a/node/Topology.hpp b/node/Topology.hpp
index 8aeae784..c878bcc6 100644
--- a/node/Topology.hpp
+++ b/node/Topology.hpp
@@ -59,21 +59,19 @@ public:
~Topology();
/**
- * Set up rootservers for this network
- *
- * @param sn Rootservers for this network
+ * @param sn Root server identities and addresses
*/
- void setRootservers(const std::map< Identity,std::vector<InetAddress> > &sn);
+ void setRootServers(const std::map< Identity,std::vector<InetAddress> > &sn);
/**
- * Set up rootservers for this network
+ * Set up root servers for this network
*
* This performs no signature verification of any kind. The caller must
* check the signature of the root topology dictionary first.
*
- * @param sn Rootservers dictionary from root-topology
+ * @param sn 'rootservers' key from root-topology Dictionary (deserialized as Dictionary)
*/
- void setRootservers(const Dictionary &sn);
+ void setRootServers(const Dictionary &sn);
/**
* Add a peer to database
@@ -95,65 +93,52 @@ public:
SharedPtr<Peer> getPeer(const Address &zta);
/**
- * @return Vector of peers that are rootservers
- */
- inline std::vector< SharedPtr<Peer> > rootserverPeers() const
- {
- Mutex::Lock _l(_lock);
- return _rootserverPeers;
- }
-
- /**
- * @return Number of rootservers
+ * @return Vector of peers that are root servers
*/
- inline unsigned int numRootservers() const
+ inline std::vector< SharedPtr<Peer> > rootPeers() const
{
Mutex::Lock _l(_lock);
- return (unsigned int)_rootserverPeers.size();
+ return _rootPeers;
}
/**
- * Get the current favorite rootserver
+ * Get the current favorite root server
*
- * @return Rootserver with lowest latency or NULL if none
+ * @return Root server with lowest latency or NULL if none
*/
- inline SharedPtr<Peer> getBestRootserver()
+ inline SharedPtr<Peer> getBestRoot()
{
- return getBestRootserver((const Address *)0,0,false);
+ return getBestRoot((const Address *)0,0,false);
}
/**
- * Get the best rootserver, avoiding rootservers listed in an array
+ * Get the best root server, avoiding root servers listed in an array
*
- * This will get the best rootserver (lowest latency, etc.) but will
- * try to avoid the listed rootservers, only using them if no others
+ * This will get the best root server (lowest latency, etc.) but will
+ * try to avoid the listed root servers, only using them if no others
* are available.
*
* @param avoid Nodes to avoid
* @param avoidCount Number of nodes to avoid
- * @param strictAvoid If false, consider avoided rootservers anyway if no non-avoid rootservers are available
- * @return Rootserver or NULL if none
+ * @param strictAvoid If false, consider avoided root servers anyway if no non-avoid root servers are available
+ * @return Root server or NULL if none available
*/
- SharedPtr<Peer> getBestRootserver(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
+ SharedPtr<Peer> getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
/**
- * @param zta ZeroTier address
- * @return True if this is a designated rootserver
+ * @param id Identity to check
+ * @return True if this is a designated root server
*/
- inline bool isRootserver(const Address &zta) const
- throw()
- {
- Mutex::Lock _l(_lock);
- return (std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),zta) != _rootserverAddresses.end());
- }
+ bool isRoot(const Identity &id) const
+ throw();
/**
- * @return Vector of rootserver addresses
+ * @return Vector of root server addresses
*/
- inline std::vector<Address> rootserverAddresses() const
+ inline std::vector<Address> rootAddresses() const
{
Mutex::Lock _l(_lock);
- return _rootserverAddresses;
+ return _rootAddresses;
}
/**
@@ -206,13 +191,13 @@ private:
const RuntimeEnvironment *RR;
std::map< Address,SharedPtr<Peer> > _activePeers;
- std::map< Identity,std::vector<InetAddress> > _rootservers;
- std::vector< Address > _rootserverAddresses;
- std::vector< SharedPtr<Peer> > _rootserverPeers;
+ std::map< Identity,std::vector<InetAddress> > _roots;
+ std::vector< Address > _rootAddresses;
+ std::vector< SharedPtr<Peer> > _rootPeers;
Mutex _lock;
- bool _amRootserver;
+ bool _amRoot;
};
} // namespace ZeroTier