summaryrefslogtreecommitdiff
path: root/node/Switch.hpp
diff options
context:
space:
mode:
authorAdam Ierymenko <adam.ierymenko@gmail.com>2015-09-04 14:44:22 -0700
committerAdam Ierymenko <adam.ierymenko@gmail.com>2015-09-04 14:44:22 -0700
commit0ab3e49be91ed7a8723c8b58750aef77c01e8d08 (patch)
treec8fe1e6b1b2e7c8a41414c6cf1a91af1e638f19d /node/Switch.hpp
parentf116c4b9c05e88130cb7f2970b616f7930c30c6c (diff)
downloadinfinitytier-0ab3e49be91ed7a8723c8b58750aef77c01e8d08.tar.gz
infinitytier-0ab3e49be91ed7a8723c8b58750aef77c01e8d08.zip
Starting in on Switch... kill map in defrag queue, which will probably improve performance pretty decently under high load with lots of peers.
Diffstat (limited to 'node/Switch.hpp')
-rw-r--r--node/Switch.hpp4
1 files changed, 3 insertions, 1 deletions
diff --git a/node/Switch.hpp b/node/Switch.hpp
index ac85606e..a1b36014 100644
--- a/node/Switch.hpp
+++ b/node/Switch.hpp
@@ -45,6 +45,7 @@
#include "Network.hpp"
#include "SharedPtr.hpp"
#include "IncomingPacket.hpp"
+#include "Hashtable.hpp"
/* Ethernet frame types that might be relevant to us */
#define ZT_ETHERTYPE_IPV4 0x0800
@@ -199,13 +200,14 @@ private:
// Packet defragmentation queue -- comes before RX queue in path
struct DefragQueueEntry
{
+ DefragQueueEntry() : creationTime(0),totalFragments(0),haveFragments(0) {}
uint64_t creationTime;
SharedPtr<IncomingPacket> frag0;
Packet::Fragment frags[ZT_MAX_PACKET_FRAGMENTS - 1];
unsigned int totalFragments; // 0 if only frag0 received, waiting for frags
uint32_t haveFragments; // bit mask, LSB to MSB
};
- std::map< uint64_t,DefragQueueEntry > _defragQueue;
+ Hashtable< uint64_t,DefragQueueEntry > _defragQueue;
Mutex _defragQueue_m;
// ZeroTier-layer RX queue of incoming packets in the process of being decoded