summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGrant Limberg <glimberg@gmail.com>2015-11-02 18:30:54 -0800
committerGrant Limberg <glimberg@gmail.com>2015-11-02 18:30:54 -0800
commita19e82fcbc2203f0d84a0e744d344e0796bc0c33 (patch)
tree2f8cfc56a03cf6e614991c83a309b5fce5a48e48 /tests
parent0ffcfa307e537347f181e7b22047f252d0cdc414 (diff)
parent4e9d4304761f93a1764d3ec2d2b0c38140decad8 (diff)
downloadinfinitytier-a19e82fcbc2203f0d84a0e744d344e0796bc0c33.tar.gz
infinitytier-a19e82fcbc2203f0d84a0e744d344e0796bc0c33.zip
Merge branch 'edge' into windows-ui
Diffstat (limited to 'tests')
-rw-r--r--tests/http/Dockerfile24
-rw-r--r--tests/http/README.md12
-rw-r--r--tests/http/agent.js277
-rw-r--r--tests/http/big-test-hosts2
-rwxr-xr-xtests/http/big-test-kill.sh18
-rwxr-xr-xtests/http/big-test-ready.sh30
-rwxr-xr-xtests/http/big-test-start.sh30
-rwxr-xr-xtests/http/docker-main.sh14
-rw-r--r--tests/http/nodesource-el.repo6
-rw-r--r--tests/http/package.json16
-rw-r--r--tests/http/server.js44
11 files changed, 473 insertions, 0 deletions
diff --git a/tests/http/Dockerfile b/tests/http/Dockerfile
new file mode 100644
index 00000000..e19b3fee
--- /dev/null
+++ b/tests/http/Dockerfile
@@ -0,0 +1,24 @@
+FROM centos:latest
+
+MAINTAINER https://www.zerotier.com/
+
+EXPOSE 9993/udp
+
+ADD nodesource-el.repo /etc/yum.repos.d/nodesource-el.repo
+RUN yum -y update && yum install -y nodejs && yum clean all
+
+RUN mkdir -p /var/lib/zerotier-one
+RUN mkdir -p /var/lib/zerotier-one/networks.d
+RUN touch /var/lib/zerotier-one/networks.d/ffffffffffffffff.conf
+
+ADD package.json /
+RUN npm install
+
+ADD zerotier-one /
+RUN chmod a+x /zerotier-one
+
+ADD agent.js /
+ADD docker-main.sh /
+RUN chmod a+x /docker-main.sh
+
+CMD ["./docker-main.sh"]
diff --git a/tests/http/README.md b/tests/http/README.md
new file mode 100644
index 00000000..23a95605
--- /dev/null
+++ b/tests/http/README.md
@@ -0,0 +1,12 @@
+HTTP one-to-all test
+======
+
+*This is really internal use code. You're free to test it out but expect to do some editing/tweaking to make it work. We used this to run some massive scale tests of our new geo-cluster-based root server infrastructure prior to taking it live.*
+
+Before using this code you will want to edit agent.js to change SERVER_HOST to the IP address of where you will run server.js. This should typically be an open Internet IP, since this makes reporting not dependent upon the thing being tested. Also note that this thing does no security of any kind. It's designed for one-off tests run over a short period of time, not to be anything that runs permanently. You will also want to edit the Dockerfile if you want to build containers and change the network ID to the network you want to run tests over.
+
+This code can be deployed across a large number of VMs or containers to test and benchmark HTTP traffic within a virtual network at scale. The agent acts as a server and can query other agents, while the server collects agent data and tells agents about each other. It's designed to use RFC4193-based ZeroTier IPv6 addresses within the cluster, which allows the easy provisioning of a large cluster without IP conflicts.
+
+The Dockerfile builds an image that launches the agent. The image must be "docker run" with "--device=/dev/net/tun --privileged" to permit it to open a tun/tap device within the container. (Unfortunately CAP_NET_ADMIN may not work due to a bug in Docker and/or Linux.) You can run a bunch with a command like:
+
+ for ((n=0;n<10;n++)); do docker run --device=/dev/net/tun --privileged -d zerotier/http-test; done
diff --git a/tests/http/agent.js b/tests/http/agent.js
new file mode 100644
index 00000000..bc7c475e
--- /dev/null
+++ b/tests/http/agent.js
@@ -0,0 +1,277 @@
+// ZeroTier distributed HTTP test agent
+
+// ---------------------------------------------------------------------------
+// Customizable parameters:
+
+// Maximum interval between test attempts
+var TEST_INTERVAL_MAX = 60000;
+
+// Test timeout in ms
+var TEST_TIMEOUT = 30000;
+
+// Where should I contact to register and query a list of other test agents?
+var SERVER_HOST = '104.238.141.145';
+var SERVER_PORT = 18080;
+
+// Which port should agents use for their HTTP?
+var AGENT_PORT = 18888;
+
+// Payload size in bytes
+var PAYLOAD_SIZE = 10000;
+
+// ---------------------------------------------------------------------------
+
+var ipaddr = require('ipaddr.js');
+var os = require('os');
+var http = require('http');
+var async = require('async');
+
+var express = require('express');
+var app = express();
+
+// Find our ZeroTier-assigned RFC4193 IPv6 address
+var thisAgentId = null;
+var interfaces = os.networkInterfaces();
+if (!interfaces) {
+ console.error('FATAL: os.networkInterfaces() failed.');
+ process.exit(1);
+}
+for(var ifname in interfaces) {
+ var ifaddrs = interfaces[ifname];
+ if (Array.isArray(ifaddrs)) {
+ for(var i=0;i<ifaddrs.length;++i) {
+ if (ifaddrs[i].family == 'IPv6') {
+ try {
+ var ipbytes = ipaddr.parse(ifaddrs[i].address).toByteArray();
+ if ((ipbytes.length === 16)&&(ipbytes[0] == 0xfd)&&(ipbytes[9] == 0x99)&&(ipbytes[10] == 0x93)) {
+ thisAgentId = '';
+ for(var j=0;j<16;++j) {
+ var tmp = ipbytes[j].toString(16);
+ if (tmp.length === 1)
+ thisAgentId += '0';
+ thisAgentId += tmp;
+ }
+ }
+ } catch (e) {
+ console.error(e);
+ }
+ }
+ }
+ }
+}
+if (thisAgentId === null) {
+ console.error('FATAL: no ZeroTier-assigned RFC4193 IPv6 addresses found on any local interface!');
+ process.exit(1);
+}
+
+//console.log(thisAgentId);
+
+// Create a random (and therefore not very compressable) payload
+var payload = new Buffer(PAYLOAD_SIZE);
+for(var xx=0;xx<PAYLOAD_SIZE;++xx) {
+ payload.writeUInt8(Math.round(Math.random() * 255.0),xx);
+}
+
+function agentIdToIp(agentId)
+{
+ var ip = '';
+ ip += agentId.substr(0,4);
+ ip += ':';
+ ip += agentId.substr(4,4);
+ ip += ':';
+ ip += agentId.substr(8,4);
+ ip += ':';
+ ip += agentId.substr(12,4);
+ ip += ':';
+ ip += agentId.substr(16,4);
+ ip += ':';
+ ip += agentId.substr(20,4);
+ ip += ':';
+ ip += agentId.substr(24,4);
+ ip += ':';
+ ip += agentId.substr(28,4);
+ return ip;
+};
+
+var lastTestResult = null;
+var allOtherAgents = [];
+
+function doTest()
+{
+ var submit = http.request({
+ host: SERVER_HOST,
+ port: SERVER_PORT,
+ path: '/'+thisAgentId,
+ method: 'POST'
+ },function(res) {
+ var body = '';
+ res.on('data',function(chunk) { body += chunk.toString(); });
+ res.on('end',function() {
+
+ if (body) {
+ try {
+ var peers = JSON.parse(body);
+ if (Array.isArray(peers))
+ allOtherAgents = peers;
+ } catch (e) {}
+ }
+
+ if (allOtherAgents.length > 1) {
+
+ var target = allOtherAgents[Math.floor(Math.random() * allOtherAgents.length)];
+ while (target === thisAgentId)
+ target = allOtherAgents[Math.floor(Math.random() * allOtherAgents.length)];
+
+ var testRequest = null;
+ var timeoutId = null;
+ timeoutId = setTimeout(function() {
+ if (testRequest !== null)
+ testRequest.abort();
+ timeoutId = null;
+ },TEST_TIMEOUT);
+ var startTime = Date.now();
+
+ testRequest = http.get({
+ host: agentIdToIp(target),
+ port: AGENT_PORT,
+ path: '/'
+ },function(res) {
+ var bytes = 0;
+ res.on('data',function(chunk) { bytes += chunk.length; });
+ res.on('end',function() {
+ lastTestResult = {
+ source: thisAgentId,
+ target: target,
+ time: (Date.now() - startTime),
+ bytes: bytes,
+ timedOut: (timeoutId === null),
+ error: null
+ };
+ if (timeoutId !== null)
+ clearTimeout(timeoutId);
+ return setTimeout(doTest,Math.round(Math.random() * TEST_INTERVAL_MAX) + 1);
+ });
+ }).on('error',function(e) {
+ lastTestResult = {
+ source: thisAgentId,
+ target: target,
+ time: (Date.now() - startTime),
+ bytes: 0,
+ timedOut: (timeoutId === null),
+ error: e.toString()
+ };
+ if (timeoutId !== null)
+ clearTimeout(timeoutId);
+ return setTimeout(doTest,Math.round(Math.random() * TEST_INTERVAL_MAX) + 1);
+ });
+
+ } else {
+ return setTimeout(doTest,1000);
+ }
+
+ });
+ }).on('error',function(e) {
+ console.log('POST failed: '+e.toString());
+ return setTimeout(doTest,1000);
+ });
+ if (lastTestResult !== null) {
+ submit.write(JSON.stringify(lastTestResult));
+ lastTestResult = null;
+ }
+ submit.end();
+};
+
+/*
+function performTestOnAllPeers(peers,callback)
+{
+ var allResults = {};
+ var allRequests = [];
+ var timedOut = false;
+ var endOfTestTimer = setTimeout(function() {
+ timedOut = true;
+ for(var x=0;x<allRequests.length;++x)
+ allRequests[x].abort();
+ },TEST_DURATION);
+
+ async.each(peers,function(peer,next) {
+ if (timedOut)
+ return next(null);
+ if (peer.length !== 32)
+ return next(null);
+
+ var connectionStartTime = Date.now();
+ allResults[peer] = {
+ start: connectionStartTime,
+ end: 0,
+ error: null,
+ timedOut: false,
+ bytes: 0
+ };
+
+ allRequests.push(http.get({
+ host: agentIdToIp(peer),
+ port: AGENT_PORT,
+ path: '/'
+ },function(res) {
+ var bytes = 0;
+ res.on('data',function(chunk) {
+ bytes += chunk.length;
+ });
+ res.on('end',function() {
+ allResults[peer] = {
+ start: connectionStartTime,
+ end: Date.now(),
+ error: null,
+ timedOut: timedOut,
+ bytes: bytes
+ };
+ return next(null);
+ });
+ }).on('error',function(e) {
+ allResults[peer] = {
+ start: connectionStartTime,
+ end: Date.now(),
+ error: e.toString(),
+ timedOut: timedOut,
+ bytes: 0
+ };
+ return next(null);
+ }));
+ },function(err) {
+ if (!timedOut)
+ clearTimeout(endOfTestTimer);
+ return callback(allResults);
+ });
+};
+
+function doTestsAndReport()
+{
+ registerAndGetPeers(function(err,peers) {
+ if (err) {
+ console.error('WARNING: skipping test: unable to contact or query server: '+err.toString());
+ } else {
+ performTestOnAllPeers(peers,function(results) {
+ var submit = http.request({
+ host: SERVER_HOST,
+ port: SERVER_PORT,
+ path: '/'+thisAgentId,
+ method: 'POST'
+ },function(res) {
+ }).on('error',function(e) {
+ console.error('WARNING: unable to submit results to server: '+err.toString());
+ });
+ submit.write(JSON.stringify(results));
+ submit.end();
+ });
+ }
+ });
+};
+*/
+
+// Agents just serve up a test payload
+app.get('/',function(req,res) { return res.status(200).send(payload); });
+
+var expressServer = app.listen(AGENT_PORT,function () {
+ // Start timeout-based loop
+ doTest();
+});
diff --git a/tests/http/big-test-hosts b/tests/http/big-test-hosts
new file mode 100644
index 00000000..93b6f23f
--- /dev/null
+++ b/tests/http/big-test-hosts
@@ -0,0 +1,2 @@
+root@104.156.246.48
+root@104.156.252.136
diff --git a/tests/http/big-test-kill.sh b/tests/http/big-test-kill.sh
new file mode 100755
index 00000000..59f36788
--- /dev/null
+++ b/tests/http/big-test-kill.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Edit as needed -- note that >1000 per host is likely problematic due to Linux kernel limits
+NUM_CONTAINERS=100
+CONTAINER_IMAGE=zerotier/http-test
+
+#
+# This script is designed to be run on Docker hosts to run NUM_CONTAINERS
+#
+# It can then be run on each Docker host via pssh or similar to run very
+# large scale tests.
+#
+
+export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
+
+pssh -h big-test-hosts -i -t 0 -p 256 "docker ps -aq | xargs -r docker rm -f"
+
+exit 0
diff --git a/tests/http/big-test-ready.sh b/tests/http/big-test-ready.sh
new file mode 100755
index 00000000..aa540bba
--- /dev/null
+++ b/tests/http/big-test-ready.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Edit as needed -- note that >1000 per host is likely problematic due to Linux kernel limits
+NUM_CONTAINERS=100
+CONTAINER_IMAGE=zerotier/http-test
+
+#
+# This script is designed to be run on Docker hosts to run NUM_CONTAINERS
+#
+# It can then be run on each Docker host via pssh or similar to run very
+# large scale tests.
+#
+
+export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
+
+# Kill and clean up old test containers if any -- note that this kills all containers on the system!
+#docker ps -q | xargs -n 1 docker kill
+#docker ps -aq | xargs -n 1 docker rm
+
+# Pull latest if needed -- change this to your image name and/or where to pull it from
+#docker pull $CONTAINER_IMAGE
+
+# Run NUM_CONTAINERS
+#for ((n=0;n<$NUM_CONTAINERS;n++)); do
+# docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE
+#done
+
+pssh -h big-test-hosts -i -t 0 -p 256 "docker pull $CONTAINER_IMAGE"
+
+exit 0
diff --git a/tests/http/big-test-start.sh b/tests/http/big-test-start.sh
new file mode 100755
index 00000000..f300ac61
--- /dev/null
+++ b/tests/http/big-test-start.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Edit as needed -- note that >1000 per host is likely problematic due to Linux kernel limits
+NUM_CONTAINERS=50
+CONTAINER_IMAGE=zerotier/http-test
+
+#
+# This script is designed to be run on Docker hosts to run NUM_CONTAINERS
+#
+# It can then be run on each Docker host via pssh or similar to run very
+# large scale tests.
+#
+
+export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
+
+# Kill and clean up old test containers if any -- note that this kills all containers on the system!
+#docker ps -q | xargs -n 1 docker kill
+#docker ps -aq | xargs -n 1 docker rm
+
+# Pull latest if needed -- change this to your image name and/or where to pull it from
+#docker pull $CONTAINER_IMAGE
+
+# Run NUM_CONTAINERS
+#for ((n=0;n<$NUM_CONTAINERS;n++)); do
+# docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE
+#done
+
+pssh -h big-test-hosts -o big-test-out -t 0 -p 256 "for ((n=0;n<$NUM_CONTAINERS;n++)); do docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE; sleep 0.25; done"
+
+exit 0
diff --git a/tests/http/docker-main.sh b/tests/http/docker-main.sh
new file mode 100755
index 00000000..f9e11de5
--- /dev/null
+++ b/tests/http/docker-main.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+export PATH=/bin:/usr/bin:/usr/local/bin:/sbin:/usr/sbin
+
+/zerotier-one -d >>zerotier-one.out 2>&1
+
+while [ ! -d "/proc/sys/net/ipv6/conf/zt0" ]; do
+ sleep 0.25
+done
+
+sleep 2
+
+exec node --harmony /agent.js >>agent.out 2>&1
+#exec node --harmony /agent.js
diff --git a/tests/http/nodesource-el.repo b/tests/http/nodesource-el.repo
new file mode 100644
index 00000000..b785d3d0
--- /dev/null
+++ b/tests/http/nodesource-el.repo
@@ -0,0 +1,6 @@
+[nodesource]
+name=Node.js Packages for Enterprise Linux 7 - $basearch
+baseurl=https://rpm.nodesource.com/pub_4.x/el/7/$basearch
+failovermethod=priority
+enabled=1
+gpgcheck=0
diff --git a/tests/http/package.json b/tests/http/package.json
new file mode 100644
index 00000000..173a6f99
--- /dev/null
+++ b/tests/http/package.json
@@ -0,0 +1,16 @@
+{
+ "name": "zerotier-test-http",
+ "version": "1.0.0",
+ "description": "ZeroTier in-network HTTP test",
+ "main": "agent.js",
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "author": "ZeroTier, Inc.",
+ "license": "GPL-3.0",
+ "dependencies": {
+ "async": "^1.5.0",
+ "express": "^4.13.3",
+ "ipaddr.js": "^1.0.3"
+ }
+}
diff --git a/tests/http/server.js b/tests/http/server.js
new file mode 100644
index 00000000..57109392
--- /dev/null
+++ b/tests/http/server.js
@@ -0,0 +1,44 @@
+// ZeroTier distributed HTTP test coordinator and result-reporting server
+
+// ---------------------------------------------------------------------------
+// Customizable parameters:
+
+var SERVER_PORT = 18080;
+
+// ---------------------------------------------------------------------------
+
+var fs = require('fs');
+
+var express = require('express');
+var app = express();
+
+app.use(function(req,res,next) {
+ req.rawBody = '';
+ req.on('data', function(chunk) { req.rawBody += chunk.toString(); });
+ req.on('end', function() { return next(); });
+});
+
+var knownAgents = {};
+
+app.post('/:agentId',function(req,res) {
+ var agentId = req.params.agentId;
+ if ((!agentId)||(agentId.length !== 32))
+ return res.status(404).send('');
+
+ if (req.rawBody) {
+ var receiveTime = Date.now();
+ var resultData = null;
+ try {
+ resultData = JSON.parse(req.rawBody);
+ console.log(resultData.source+','+resultData.target+','+resultData.time+','+resultData.bytes+','+resultData.timedOut+',"'+((resultData.error) ? resultData.error : '')+'"');
+ } catch (e) {}
+ }
+
+ knownAgents[agentId] = Date.now();
+ return res.status(200).send(JSON.stringify(Object.keys(knownAgents)));
+});
+
+var expressServer = app.listen(SERVER_PORT,function () {
+ console.log('LISTENING ON '+SERVER_PORT);
+ console.log('');
+});