summaryrefslogtreecommitdiff
path: root/node
diff options
context:
space:
mode:
authorAdam Ierymenko <adam.ierymenko@gmail.com>2016-09-02 12:34:02 -0700
committerAdam Ierymenko <adam.ierymenko@gmail.com>2016-09-02 12:34:02 -0700
commit4931e449989f74b9518d15bc69521fdbefb313e7 (patch)
treeae97532c0318b76bcc708f711a145c62bdd96323 /node
parentd1101441b3d43ee69c1b661cc5f777a09fd10fca (diff)
downloadinfinitytier-4931e449989f74b9518d15bc69521fdbefb313e7.tar.gz
infinitytier-4931e449989f74b9518d15bc69521fdbefb313e7.zip
Implement "weak pointer" behavior on Topology Path canonicalization hash table.
Diffstat (limited to 'node')
-rw-r--r--node/AtomicCounter.hpp51
-rw-r--r--node/SharedPtr.hpp28
-rw-r--r--node/Topology.cpp27
3 files changed, 49 insertions, 57 deletions
diff --git a/node/AtomicCounter.hpp b/node/AtomicCounter.hpp
index b4993771..a0f29baa 100644
--- a/node/AtomicCounter.hpp
+++ b/node/AtomicCounter.hpp
@@ -20,11 +20,9 @@
#define ZT_ATOMICCOUNTER_HPP
#include "Constants.hpp"
-#include "Mutex.hpp"
#include "NonCopyable.hpp"
-#ifdef __WINDOWS__
-// <atomic> will replace this whole class eventually once it's ubiquitous
+#ifndef __GNUC__
#include <atomic>
#endif
@@ -36,75 +34,34 @@ namespace ZeroTier {
class AtomicCounter : NonCopyable
{
public:
- /**
- * Initialize counter at zero
- */
AtomicCounter()
- throw()
{
_v = 0;
}
- inline operator int() const
- throw()
- {
-#ifdef __GNUC__
- return __sync_or_and_fetch(const_cast <volatile int *>(&_v),0);
-#else
-#ifdef __WINDOWS__
- return (int)_v;
-#else
- _l.lock();
- int v = _v;
- _l.unlock();
- return v;
-#endif
-#endif
- }
-
inline int operator++()
- throw()
{
#ifdef __GNUC__
return __sync_add_and_fetch(&_v,1);
#else
-#ifdef __WINDOWS__
return ++_v;
-#else
- _l.lock();
- int v = ++_v;
- _l.unlock();
- return v;
-#endif
#endif
}
inline int operator--()
- throw()
{
#ifdef __GNUC__
return __sync_sub_and_fetch(&_v,1);
#else
-#ifdef __WINDOWS__
return --_v;
-#else
- _l.lock();
- int v = --_v;
- _l.unlock();
- return v;
-#endif
#endif
}
private:
-#ifdef __WINDOWS__
- std::atomic_int _v;
-#else
+#ifdef __GNUC__
int _v;
-#ifndef __GNUC__
-#warning Neither __WINDOWS__ nor __GNUC__ so AtomicCounter using Mutex
- Mutex _l;
-#endif
+#else
+ std::atomic_int _v;
#endif
};
diff --git a/node/SharedPtr.hpp b/node/SharedPtr.hpp
index 3ff5ed18..1dd3b43d 100644
--- a/node/SharedPtr.hpp
+++ b/node/SharedPtr.hpp
@@ -119,15 +119,39 @@ public:
inline T *ptr() const throw() { return _ptr; }
/**
- * Set this pointer to null
+ * Set this pointer to NULL
*/
inline void zero()
{
if (_ptr) {
if (--_ptr->__refCount <= 0)
delete _ptr;
+ _ptr = (T *)0;
+ }
+ }
+
+ /**
+ * Set this pointer to NULL if this is the only pointer holding the object
+ *
+ * @return True if object was deleted and SharedPtr is now NULL (or was already NULL)
+ */
+ inline bool reclaimIfWeak()
+ {
+ if (_ptr) {
+ if (++_ptr->__refCount <= 2) {
+ if (--_ptr->__refCount <= 1) {
+ delete _ptr;
+ _ptr = (T *)0;
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ } else {
+ return true;
}
- _ptr = (T *)0;
}
inline bool operator==(const SharedPtr &sp) const throw() { return (_ptr == sp._ptr); }
diff --git a/node/Topology.cpp b/node/Topology.cpp
index 6e0fe90c..c6d46dc5 100644
--- a/node/Topology.cpp
+++ b/node/Topology.cpp
@@ -251,14 +251,25 @@ bool Topology::worldUpdateIfValid(const World &newWorld)
void Topology::clean(uint64_t now)
{
Mutex::Lock _l(_lock);
- Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
- Address *a = (Address *)0;
- SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
- while (i.next(a,p)) {
- if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) {
- _peers.erase(*a);
- } else {
- (*p)->clean(now);
+ {
+ Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
+ Address *a = (Address *)0;
+ SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
+ while (i.next(a,p)) {
+ if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) {
+ _peers.erase(*a);
+ } else {
+ (*p)->clean(now);
+ }
+ }
+ }
+ {
+ Hashtable< Path::HashKey,SharedPtr<Path> >::Iterator i(_paths);
+ Path::HashKey *k = (Path::HashKey *)0;
+ SharedPtr<Path> *p = (SharedPtr<Path> *)0;
+ while (i.next(k,p)) {
+ if (p->reclaimIfWeak())
+ _paths.erase(*k);
}
}
}