summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.gitignore2
-rw-r--r--make-linux.mk10
-rw-r--r--netcon/README.md53
-rwxr-xr-xnetcon/misc/httpdbin0 -> 24426 bytes
-rw-r--r--netcon/misc/httpd.c490
5 files changed, 522 insertions, 33 deletions
diff --git a/.gitignore b/.gitignore
index 12d6e0d5..22e5a81d 100755
--- a/.gitignore
+++ b/.gitignore
@@ -33,6 +33,8 @@ Thumbs.db
/examples/docker/test-*.env
/world/mkworld
/world/*.c25519
+/tiny-httpd
+/netcon/tiny-httpd
# Miscellaneous temporaries, build files, etc.
*.log
diff --git a/make-linux.mk b/make-linux.mk
index 47a860bc..5e3658a0 100644
--- a/make-linux.mk
+++ b/make-linux.mk
@@ -95,13 +95,15 @@ one: $(OBJS) service/OneService.o one.o osdep/LinuxEthernetTap.o
ln -sf zerotier-one zerotier-idtool
ln -sf zerotier-one zerotier-cli
-netcon: $(OBJS) one.o
+netcon: $(OBJS)
# Need to selectively rebuild one.cpp and OneService.cpp with ZT_SERVICE_NETCON and ZT_ONE_NO_ROOT_CHECK defined, and also NetconEthernetTap
- $(CXX) $(CXXFLAGS) $(LDFLAGS) -DZT_SERVICE_NETCON -DZT_ONE_NO_ROOT_CHECK -o zerotier-netcon-service $(OBJS) one.o $(LDLIBS) -ldl
+ $(CXX) $(CXXFLAGS) $(LDFLAGS) -DZT_SERVICE_NETCON -DZT_ONE_NO_ROOT_CHECK -Iext/lwip/src/include -Iext/lwip/src/include/ipv4 -Iext/lwip/src/include/ipv6 -o zerotier-netcon-service $(OBJS) service/OneService.cpp netcon/NetconEthernetTap.cpp one.cpp $(LDLIBS) -ldl
# Build netcon/liblwip.so which must be placed in ZT home for zerotier-netcon-service to work
cd netcon ; make -f make-liblwip.mk
# Use gcc not clang to build standalone intercept library since gcc is typically used for libc and we want to ensure maximal ABI compatibility
- cd netcon ; gcc -g -O2 -Wall -std=c99 -fPIC -DVERBOSE -DDEBUG_RPC -DCHECKS -D_GNU_SOURCE -DNETCON_INTERCEPT -I. -nostdlib -shared -o ../libzerotierintercept.so Intercept.c
+ cd netcon ; gcc -g -O2 -Wall -std=c99 -fPIC -DVERBOSE -DDEBUG_RPC -DCHECKS -D_GNU_SOURCE -DNETCON_INTERCEPT -I. -nostdlib -shared -o ../libzerotierintercept.so Intercept.c -ldl
+ ln -sf zerotier-netcon-service zerotier-cli
+ ln -sf zerotier-netcon-service zerotier-idtool
install-intercept:
cp libzerotierintercept.so /lib/libzerotierintercept.so
@@ -121,7 +123,7 @@ installer: one FORCE
./ext/installfiles/linux/buildinstaller.sh
clean:
- rm -rf *.so *.o node/*.o controller/*.o osdep/*.o service/*.o ext/http-parser/*.o ext/lz4/*.o ext/json-parser/*.o $(OBJS) zerotier-one zerotier-idtool zerotier-cli zerotier-selftest build-* ZeroTierOneInstaller-* *.deb *.rpm
+ rm -rf *.so *.o node/*.o controller/*.o osdep/*.o service/*.o ext/http-parser/*.o ext/lz4/*.o ext/json-parser/*.o $(OBJS) zerotier-one zerotier-idtool zerotier-cli zerotier-selftest zerotier-netcon-service build-* ZeroTierOneInstaller-* *.deb *.rpm
# Remove files from all the funny places we put them for tests
find netcon -type f \( -name '*.o' -o -name '*.so' -o -name '*.1.0' -o -name 'zerotier-one' -o -name 'zerotier-cli' -o -name 'zerotier-netcon-service' \) -delete
find netcon/docker-test -name "zerotier-intercept" -type f -delete
diff --git a/netcon/README.md b/netcon/README.md
index e20d8a92..47288a07 100644
--- a/netcon/README.md
+++ b/netcon/README.md
@@ -21,7 +21,7 @@ The current version of Network Containers **only supports TCP over IPv4**. There
The virtual TCP/IP stack will respond to *incoming* ICMP ECHO requests, which means that you can ping it from another host on the same ZeroTier virtual network. This is useful for testing.
-**Network Containers are currently all or nothing.** If engaged, the intercept library intercepts all network I/O calls and redirects them through the new path. A network-containerized application cannot communicate over the regular network connection of its host or container or with anything else except other hosts on its ZeroTier virtual LAN. Support for optional "fall-through" to the host IP stack for outgoing connections outside the virtual network and for gateway routes within the virtual network is also planned for the near future.
+**Network Containers are currently all or nothing.** If engaged, the intercept library intercepts all network I/O calls and redirects them through the new path. A network-containerized application cannot communicate over the regular network connection of its host or container or with anything else except other hosts on its ZeroTier virtual LAN. Support for optional "fall-through" to the host IP stack for outgoing connections outside the virtual network and for gateway routes within the virtual network is planned. (It will be optional since in some cases this isolation might be considered a nice security feature.)
#### Compatibility Test Results
@@ -38,26 +38,25 @@ It is *likely* to work with other things but there are no guarantees. UDP, ICMP/
Network Containers are currently only for Linux. To build the network container host, IP stack, and intercept library, from the base of the ZeroTier One tree run:
make netcon
- make install-intercept
This will build a binary called *zerotier-netcon-service* and a library called *libzerotierintercept.so*. It will also build the IP stack as *netcon/liblwip.so*.
-The *zerotier-netcon-service* binary is almost the same as a regular ZeroTier One build except instead of creating virtual network ports using Linux's */dev/net/tun* interface, it creates instances of a user-space TCP/IP stack for each virtual network and provides RPC access to this stack via a Unix domain socket called */tmp/.ztnc_##NETWORK_ID##*. The latter is a library that can be loaded with the Linux *LD\_PRELOAD* environment variable or by placement into */etc/ld.so.preload* on a Linux system or container.
+The *zerotier-netcon-service* binary is almost the same as a regular ZeroTier One build except instead of creating virtual network ports using Linux's */dev/net/tun* interface, it creates instances of a user-space TCP/IP stack for each virtual network and provides RPC access to this stack via a Unix domain socket called */tmp/.ztnc_##NETWORK_ID##*. The latter is a library that can be loaded with the Linux *LD\_PRELOAD* environment variable or by placement into */etc/ld.so.preload* on a Linux system or container. Additional magic involving nameless Unix domain socket pairs and interprocess socket handoff is used to emulate TCP sockets with extremely low overhead and in a way that's compatible with select, poll, epoll, and other I/O event mechanisms.
The intercept library does nothing unless the *ZT\_NC\_NWID* environment variable is set. If on program launch (or fork) it detects the presence of this environment variable, it will attempt to connect to a running *zerotier-netcon-service* at the aforementioned Unix domain socket location and will intercept calls to the Posix sockets API and redirect network traffic through the virtual network.
-Unlike *zerotier-one*, *zerotier-netcon-service* does not need to be run with root privileges and will not modify the host's network configuration in any way.
+Unlike *zerotier-one*, *zerotier-netcon-service* does not need to be run with root privileges and will not modify the host's network configuration in any way. It can be run alongside *zerotier-one* on the same host with no ill effect, though this can be confusing since you'll have to remember the difference between "real host interfaces" and network containerized endpoints.
# Starting the Network Containers Service
You don't need Docker or any other container engine to try Network Containers. A simple test can be performed in user space in your own home directory.
-First, build the netcon service and intercept library as describe above. Then create a directory to act as a temporary ZeroTier home for your test netcon service instance. You'll need to move the liblwip.so binary that was built with *make netcon* into there, since the service must be able to find it there and load it.
+First, build the netcon service and intercept library as described above. Then create a directory to act as a temporary ZeroTier home for your test netcon service instance. You'll need to move the liblwip.so binary that was built with *make netcon* into there, since the service must be able to find it there and load it.
mkdir /tmp/netcon-test-home
cp -f ./netcon/liblwip.so /tmp/netcon-test-home
-Now you can run the service (no sudo needed):
+Now you can run the service (no sudo needed, and *-d* tells it to run in the background and can be omitted if you want it not to daemonize):
./zerotier-netcon-service -d /tmp/netcon-test-home
@@ -65,9 +64,7 @@ As with ZeroTier One in its normal incarnation, you'll need to join a network:
./zerotier-cli -D/tmp/netcon-test-home join 8056c2e21c000001
-(If you don't want to use [Earth](https://www.zerotier.com/public.shtml) for this test, replace its network ID with one of your own.)
-
-Note the *-D* option. This tells *zerotier-cli* not to look in /var/lib/zerotier-one for information about a running instance of the ZeroTier system service but instead to look in /tmp/netcon-test-home. That's because *even if you do happen to be running ZeroTier on your local machine, what you are doing now has no impact on it and does not involve it in any way.* So if you have *zerotier-one* running, forget about it. It doesn't matter for this test.
+If you don't want to use [Earth](https://www.zerotier.com/public.shtml) for this test, replace 8056c2e21c000001 with a different network ID. The *-D* option tells *zerotier-cli* not to look in /var/lib/zerotier-one for information about a running instance of the ZeroTier system service but instead to look in /tmp/netcon-test-home. That's because *even if you do happen to be running ZeroTier on your local machine, what you are doing now has no impact on it and does not involve it in any way.* So if you have *zerotier-one* running, forget about it. It doesn't matter for this test.
Now type:
@@ -75,39 +72,37 @@ Now type:
Try it a few times until you see that you've successfully joined the network and have an IP address.
-You'll also want to have ZeroTier One (the normal build, not network containers) running somewhere else, such as on another Linux system or VM. Technically you could run it on the *same* Linux system and it wouldn't matter at all, but many people find this intensely confusing until they grasp just what exactly is happening here.
+Now you will want to have ZeroTier One (the normal *zerotier-one* build, not network containers) running somewhere else, such as on another Linux system or VM. Technically you could run it on the *same* Linux system and it wouldn't matter at all, but many people find this intensely confusing until they grasp just what exactly is happening here.
On the other Linux system, join the same network if you haven't already (8056c2e21c000001 if you're using Earth) and wait until you have an IP address. Then try pinging the IP address your netcon instance received. You should see ping replies.
-Back on the host that's running *zerotier-netcon-service*, type *ip list all* or *ifconfig* (ifconfig is technically deprecated so some Linux systems might not have it). Notice that the IP address of the network containers endpoint is not listed and no network device is listed for it either. That's because as far as the Linux kernel is concerned it doesn't exist.
+Back on the host that's running *zerotier-netcon-service*, type *ip addr list* or *ifconfig* (ifconfig is technically deprecated so some Linux systems might not have it). Notice that the IP address of the network containers endpoint is not listed and no network device is listed for it either. That's because as far as the Linux kernel is concerned it doesn't exist.
What are you pinging? What is happening here?
The *zerotier-netcon-service* binary has joined a *virtual* network and is running a *virtual* TCP/IP stack entirely in user space. As far as your system is concerned it's just another program exchanging UDP packets with a few other hosts on the Internet and nothing out of the ordinary is happening at all. That's why you never had to type *sudo*. It didn't change anything on the host.
-Now you can run a containerized application. Open another terminal window (since you might not want these environment variables to stick elsewhere) on the same machine the netcon service is running on and install something like *httpd* (a simple http server) to act as a test app:
-
-On Debian and Ubuntu:
-
- sudo apt-get install apache2
-
-Or for CentOS/EPEL or Fedora:
+Now you can run an application inside your network container. For testing we've included in the *misc/* subfolder a [tiny single-C-file HTTP server](https://github.com/elly/1k/blob/master/httpd.c). To build it run (from *ZeroTierOne/netcon*):
- sudo yum install httpd
+ gcc -o tiny-httpd netcon/misc/httpd.c
-Now try:
+That builds a very tiny HTTP server that serves static pages. Now you can run it network-containerized:
export LD_PRELOAD=/path/to/ZeroTierOne/libzerotierintercept.so
- export ZT_NC_NWID=8056c2e21c000001
- zerotier-intercept httpd -X
+ export ZT_NC_NWID=8056c2e21c000001
+ ./tiny-httpd -p 80 .
+Note the lack of sudo, even to bind to port 80. That's because you're not binding to port 80, at least not as far as the Linux kernel is concerned. If all went well the HTTP server is now listening, but only inside the network container. Going to port 80 on your machine won't work. To reach it, go to the other system where you joined the same network with a conventional ZeroTier instance and try:
-Going to port 80 on your machine won't work. Httpd is listening, but only inside the network container. To reach it, go to the other system where you joined the same network with a conventional ZeroTier instance and try:
-
- curl http://NETCON.INSTANCE.IP:80/
+ curl http://NETCON.INSTANCE.IP/
Replace *NETCON.INSTANCE.IP* with the IP address that *zerotier-netcon-service* was assigned on the virtual network. (This is the same IP you pinged in your first test.) If everything works, you should get back a copy of ZeroTier One's main README.md file.
+In the original shell where you ran *tiny-httpd* you can type CTRL+C to kill it. To turn off network containers you can clear the environment variables:
+
+ unset LD_PRELOAD
+ unset ZT_NC_NWID
+
# Installing in a Docker Container (or any other container engine)
If it's not immediately obvious, installation into a Docker container is easy. Just install *zerotier-netcon-service*, *libzerotierintercept.so*, and *liblwip.so* into the container at an appropriate location. We suggest putting it all in */var/lib/zerotier-one* since this is the default ZeroTier home and will eliminate the need to supply a path to any of ZeroTier's services or utilities. Then, in your Docker container entry point script launch the service with *-d* to run it in the background, set the appropriate environment variables as described above, and launch your container's main application.
@@ -117,11 +112,11 @@ The only bit of complexity is configuring which virtual network to join. ZeroTie
mkdir -p /var/lib/zerotier-one/networks.d
touch /var/lib/zerotier-one/networks.d/8056c2e21c000001.conf
-Replace 8056c2e21c000001 with the network ID of the network you want your container to automaticlaly join.
+Replace 8056c2e21c000001 with the network ID of the network you want your container to join.
Now your container will automatically join the specified network on startup. Authorizing the container on a private network still requires a manual authorization step either via the ZeroTier Central web UI or the API. We're working on some ideas to automate this via bearer token auth or similar since doing this manually or with scripts for large deployments is tedious. We'll have something in this area by the time Network Containers itself is ready to be pronounced no-longer-beta.
-# Unit Tests
+# Docker-based Unit Tests
Each unit test will temporarily copy all required ZeroTier binaries into its local directory, then build the *netcon_dockerfile* and *monitor_dockerfile*. Once built, each container will be run and perform tests and monitoring specified in *netcon_entrypoint.sh* and *monitor_entrypoint.sh*
@@ -136,7 +131,7 @@ To run unit tests:
After you've created your network and placed its blank config file in *netcon/docker-test* run the following to perform unit tests for httpd:
- ./build.sh httpd
- ./test.sh httpd
+ ./build.sh httpd
+ ./test.sh httpd
It's useful to note that the keyword *httpd* in this example is merely a substring for a test name, this means that if we replaced it with *x86_64* or *fc23*, it would run all unit tests for *x86_64* systems or *Fedora 23* respectively.
diff --git a/netcon/misc/httpd b/netcon/misc/httpd
new file mode 100755
index 00000000..93a6ff6e
--- /dev/null
+++ b/netcon/misc/httpd
Binary files differ
diff --git a/netcon/misc/httpd.c b/netcon/misc/httpd.c
new file mode 100644
index 00000000..3a58e2dc
--- /dev/null
+++ b/netcon/misc/httpd.c
@@ -0,0 +1,490 @@
+/* httpd.c - multi-client httpd, with cgi and dirindex support, in <500 LOC.
+ * Run as: httpd [-p port] <root>
+ * u+x or g+x files are considered cgi programs.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <netinet/in.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#define LINEBUFMAX 4096
+#define REQBUFMAX 4096
+#define FILEBUFMAX 4096
+
+static const char *docroot;
+static int printreqs = 0;
+
+struct reactor {
+ int epfd;
+};
+
+struct socket {
+ int fd;
+ struct sockaddr_in sa;
+ struct reactor *r;
+ void (*read)(struct socket *);
+ void (*write)(struct socket *);
+ void (*close)(struct socket *);
+ void *priv;
+};
+
+struct client {
+ struct socket *s;
+ void (*line)(struct client *, char *);
+ void (*writedone)(struct client *);
+
+ char *rbuf;
+ size_t rbufsize;
+ size_t rbuffill;
+ char *wbuf;
+ size_t wbufsize;
+ size_t wbuffill;
+
+ char *reqmethod;
+ char *requrl;
+
+ int fillfd;
+};
+
+static void udie(const char *prefix) {
+ perror(prefix);
+ abort();
+}
+
+static void *xmalloc(size_t sz) {
+ void *p = malloc(sz);
+ if (!p)
+ abort();
+ memset(p, 0, sz);
+ return p;
+}
+
+static char *xstrdup(const char *s) {
+ char *n = strdup(s);
+ if (!n)
+ abort();
+ return n;
+}
+
+static void strlcpy(char *dest, const char *src, size_t n) {
+ strncpy(dest, src, n - 1);
+ dest[n - 1] = '\0';
+}
+
+static void strlcat(char *dest, const char *src, size_t n) {
+ strncat(dest, src, n - 1);
+ dest[n - 1] = '\0';
+}
+
+static struct reactor *reactor_new(void) {
+ struct reactor *r = xmalloc(sizeof *r);
+ r->epfd = epoll_create1(0);
+ if (r->epfd < 0)
+ udie("epoll_create1()");
+ return r;
+}
+
+static struct socket *reactor_add(struct reactor *r, int fd) {
+ struct socket *s = xmalloc(sizeof *s);
+ struct epoll_event evt;
+
+ s->fd = fd;
+ s->r = r;
+ evt.events = 0;
+ evt.data.ptr = s;
+ if (epoll_ctl(r->epfd, EPOLL_CTL_ADD, fd, &evt) < 0)
+ udie("epoll_ctl()");
+ return s;
+};
+
+static void reactor_refresh(struct reactor *r, struct socket *s) {
+ struct epoll_event evt;
+ evt.events = 0;
+ evt.data.ptr = s;
+ if (s->read)
+ evt.events |= EPOLLIN;
+ if (s->write)
+ evt.events |= EPOLLOUT;
+ if (s->close)
+ evt.events |= EPOLLRDHUP;
+ if (epoll_ctl(r->epfd, EPOLL_CTL_MOD, s->fd, &evt) < 0)
+ udie("epoll_ctl()");
+}
+
+static void reactor_del(struct reactor *r, struct socket *s) {
+ if (epoll_ctl(r->epfd, EPOLL_CTL_DEL, s->fd, NULL) < 0)
+ udie("epoll_ctl()");
+ free(s);
+}
+
+static void reactor_run(struct reactor *r) {
+ struct epoll_event evts[16];
+ int n;
+ int i;
+ struct socket *s;
+
+ n = epoll_wait(r->epfd, evts, sizeof(evts) / sizeof(evts[0]), -1);
+ if (n < 0)
+ udie("epoll_wait()");
+ for (i = 0; i < n; i++) {
+ s = evts[i].data.ptr;
+ if (evts[i].events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) {
+ if (s->close)
+ s->close(s);
+ reactor_del(r, s);
+ } else if ((evts[i].events & EPOLLIN) && s->read) {
+ s->read(s);
+ } else if ((evts[i].events & EPOLLOUT) && s->write) {
+ s->write(s);
+ }
+ }
+}
+
+static void reqline(struct client *, char *);
+
+static struct client *client_new(struct socket *s) {
+ struct client *c = xmalloc(sizeof *c);
+ c->s = s;
+ c->rbuf = xmalloc(REQBUFMAX);
+ c->rbufsize = REQBUFMAX;
+ c->rbuffill = 0;
+ c->line = reqline;
+ c->wbufsize = 0;
+ c->wbuffill = 0;
+ c->writedone = NULL;
+ return c;
+}
+
+static void client_read(struct socket *s) {
+ struct client *c = s->priv;
+ char *p;
+ ssize_t len;
+
+ len = read(s->fd, c->rbuf + c->rbuffill, c->rbufsize - c->rbuffill);
+ if (len < 0)
+ udie("read()");
+ c->rbuffill += len;
+ while ((p = strstr(c->rbuf, "\n"))) {
+ *p = '\0';
+ if (p > c->rbuf && p[-1] == '\r')
+ p[-1] = '\0';
+ p++;
+ c->line(c, c->rbuf);
+ memmove(c->rbuf, p, c->rbufsize - (p - c->rbuf));
+ c->rbuffill -= (p - c->rbuf);
+ memset(c->rbuf + c->rbuffill, 0, c->rbufsize - c->rbuffill);
+ }
+}
+
+static void client_write(struct socket *s) {
+ struct client *c = s->priv;
+ ssize_t len;
+
+ len = write(s->fd, c->wbuf, c->wbuffill);
+ if (len < 0)
+ udie("write()");
+ if ((size_t)len < c->wbuffill)
+ memmove(c->wbuf, c->wbuf + len, c->wbuffill - len);
+ c->wbuffill -= len;
+ if (c->wbuffill)
+ return;
+ free(c->wbuf);
+ c->wbuf = NULL;
+ c->wbufsize = 0;
+ s->write = NULL;
+ c->writedone(c);
+}
+
+static void client_writeb(struct client *c, const char *buf, size_t len) {
+ if (!c->wbufsize || c->wbufsize - c->wbuffill < len) {
+ size_t growby = len - (c->wbufsize - c->wbuffill);
+ c->wbuf = realloc(c->wbuf, c->wbufsize + growby);
+ c->wbufsize += growby;
+ }
+ memcpy(c->wbuf + c->wbuffill, buf, len);
+ c->wbuffill += len;
+ if (!c->s->write) {
+ c->s->write = client_write;
+ reactor_refresh(c->s->r, c->s);
+ }
+}
+
+static void client_writeln(struct client *c, const char *fmt, ...) {
+ char buf[LINEBUFMAX];
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, LINEBUFMAX, fmt, ap);
+ va_end(ap);
+
+ p = buf + strlen(buf);
+ if (p > buf + LINEBUFMAX - 2)
+ p = buf + LINEBUFMAX - 2;
+ *p++ = '\r';
+ *p++ = '\n';
+ client_writeb(c, buf, p - buf);
+}
+
+static void client_writedone(struct client *c) {
+ close(c->s->fd);
+}
+
+static void client_refillbuf(struct client *c) {
+ char buf[FILEBUFMAX];
+ ssize_t len;
+
+ len = read(c->fillfd, buf, sizeof(buf));
+ if (len < 0)
+ udie("read()");
+ if (len == 0) {
+ c->writedone = client_writedone;
+ close(c->fillfd);
+ } else {
+ c->writedone = client_refillbuf;
+ }
+ client_writeb(c, buf, len);
+}
+
+static void client_close(struct socket *s) {
+ struct client *c = s->priv;
+ free(c->reqmethod);
+ free(c->requrl);
+ free(c->rbuf);
+ free(c->wbuf);
+ free(c);
+ /* ... */
+}
+
+static void listener_read(struct socket *s) {
+ struct sockaddr_in sa;
+ socklen_t salen = sizeof(sa);
+ int nfd = accept(s->fd, (struct sockaddr *)&sa, &salen);
+ struct socket *n;
+ if (nfd == -1)
+ udie("accept()");
+ if (fcntl(nfd, F_SETFD, FD_CLOEXEC) < 0)
+ udie("fcntl()");
+ n = reactor_add(s->r, nfd);
+ memcpy(&n->sa, &sa, sizeof(n->sa));
+ n->read = client_read;
+ n->close = client_close;
+ reactor_refresh(s->r, n);
+ n->priv = client_new(n);
+}
+
+static void error(struct client *c, int code) {
+ client_writeln(c, "HTTP/1.1 %u Error", code);
+ client_writeln(c, "");
+ c->writedone = client_writedone;
+}
+
+static void iptobuf(struct client *c, char *buf) {
+ unsigned int ip = ntohl(c->s->sa.sin_addr.s_addr);
+ sprintf(buf, "%u.%u.%u.%u", (ip >> 24) & 0xFF,
+ (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF);
+}
+
+static void runcgi(struct client *c, const char *prog, const char *args) {
+ char buf[] = "REMOTE_ADDR=255.255.255.255";
+ iptobuf(c, buf + strlen("REMOTE_ADDR="));
+ putenv(buf);
+ dup2(c->s->fd, 0);
+ dup2(c->s->fd, 1);
+ dup2(c->s->fd, 2);
+ execl(prog, prog, args, NULL);
+}
+
+static void cgi(struct client *c, const char *prog, const char *args) {
+ int p;
+ p = fork();
+ if (!p)
+ runcgi(c, prog, args);
+ else if (p < 0)
+ error(c, 500);
+ else
+ c->writedone = client_writedone;
+}
+
+static void genindex(struct client *c, const char *url) {
+ DIR *d = fdopendir(c->fillfd);
+ struct dirent *e;
+
+ client_writeln(c, "Content-Type: text/html");
+ client_writeln(c, "");
+
+ client_writeln(c, "<html>");
+ client_writeln(c, " <head>");
+ client_writeln(c, " <title>Index of %s</title>", url);
+ client_writeln(c, " </head>");
+ client_writeln(c, " <body>");
+ client_writeln(c, " <h1>Index of %s</h1>", url);
+ client_writeln(c, " <ul>");
+ while ((e = readdir(d))) {
+ client_writeln(c, " <li>");
+ client_writeln(c, " <a href=\"%s%s%s\">%s</a>", url,
+ url[strlen(url) - 1] == '/' ? "" : "/", e->d_name,
+ e->d_name);
+ client_writeln(c, " </li>");
+ }
+ client_writeln(c, " </ul>");
+ client_writeln(c, " </body>");
+ client_writeln(c, "</html>");
+ closedir(d);
+ c->writedone = client_writedone;
+}
+
+static void get(struct client *c, char *url) {
+ char rp[PATH_MAX];
+ char *rpcanon;
+ char *rest;
+ struct stat st;
+
+ strlcpy(rp, docroot, sizeof(rp));
+ if ((rest = strchr(url, '?')))
+ *rest++ = '\0';
+ strlcat(rp, url, sizeof(rp));
+ rpcanon = realpath(rp, NULL);
+ if (!rpcanon) {
+ error(c, 404);
+ return;
+ }
+
+ if (strstr(rpcanon, docroot) != rpcanon) {
+ error(c, 403);
+ free(rpcanon);
+ return;
+ }
+
+ c->fillfd = open(rpcanon, O_RDONLY);
+ if (c->fillfd == -1) {
+ free(rpcanon);
+ error(c, 403); /* XXX: not all open() failures are 403s */
+ return;
+ }
+
+ if (fstat(c->fillfd, &st) == -1)
+ udie("fstat()");
+
+ client_writeln(c, "HTTP/1.1 200 OK");
+
+ if (S_ISDIR(st.st_mode)) {
+ genindex(c, url);
+ } else if (st.st_mode & (S_IXUSR | S_IXGRP)) {
+ cgi(c, rpcanon, rest);
+ } else {
+ client_writeln(c, "");
+ client_refillbuf(c);
+ }
+ free(rpcanon);
+}
+
+static void reqdone(struct client *c) {
+ if (printreqs) {
+ char buf[32];
+ iptobuf(c, buf);
+ printf("%s %s %s\n", buf, c->reqmethod, c->requrl);
+ }
+ if (!strcasecmp(c->reqmethod, "GET"))
+ get(c, c->requrl);
+ else
+ error(c, 405);
+}
+
+static void reqhdr(struct client *c, char *line) {
+
+ if (!strlen(line)) {
+ reqdone(c);
+ return;
+ }
+
+ /* XXX */
+}
+
+static void reqline(struct client *c, char *line) {
+ char *method, *url, *version;
+
+ method = strtok(line, " ");
+ url = strtok(NULL, " ");
+ version = strtok(NULL, " ");
+
+ if (!method || !url) {
+ error(c, 400);
+ return;
+ }
+
+ c->reqmethod = xstrdup(method);
+ c->requrl = xstrdup(url);
+ c->line = reqhdr;
+}
+
+static int serve(int port) {
+ int sfd = socket(AF_INET, SOCK_STREAM, 0);
+ struct sockaddr_in sa;
+ if (sfd == -1)
+ udie("socket()");
+ memset(&sa, 0, sizeof(sa));
+ sa.sin_family = AF_INET;
+ sa.sin_addr.s_addr = htonl(INADDR_ANY);
+ sa.sin_port = htons(port);
+ if (bind(sfd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
+ udie("bind()");
+ if (listen(sfd, 20) < 0)
+ udie("listen()");
+ if (fcntl(sfd, F_SETFD, FD_CLOEXEC) < 0)
+ udie("fcntl()");
+ return sfd;
+}
+
+static void usage(const char *progn) {
+ printf("Usage: %s [-p port] [-v] <root>\n", progn);
+}
+
+int main(int argc, char *argv[]) {
+ struct reactor *r = reactor_new();
+ struct socket *listener;
+ int opt;
+ int port = 80;
+
+ while ((opt = getopt(argc, argv, "p:v")) != -1) {
+ switch (opt) {
+ case 'p':
+ port = atoi(optarg);
+ break;
+ case 'v':
+ printreqs = 1;
+ break;
+ default:
+ usage(argv[0]);
+ exit(1);
+ }
+ }
+
+ if (optind >= argc) {
+ usage(argv[0]);
+ exit(1);
+ }
+
+ docroot = argv[optind];
+
+ listener = reactor_add(r, serve(port));
+ listener->read = listener_read;
+ reactor_refresh(r, listener);
+
+ signal(SIGCHLD, SIG_IGN);
+
+ while (1) {
+ reactor_run(r);
+ }
+}