summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/Makefile.am5
-rw-r--r--include/alarm.h13
-rw-r--r--include/buffer.h32
-rw-r--r--include/cache.h92
-rw-r--r--include/conntrack.h160
-rw-r--r--include/conntrackd.h174
-rw-r--r--include/debug.h53
-rw-r--r--include/hash.h47
-rw-r--r--include/ignore.h12
-rw-r--r--include/jhash.h146
-rw-r--r--include/linux_list.h725
-rw-r--r--include/local.h29
-rw-r--r--include/log.h10
-rw-r--r--include/mcast.h48
-rw-r--r--include/network.h34
-rw-r--r--include/slist.h41
-rw-r--r--include/state_helper.h20
-rw-r--r--include/sync.h23
-rw-r--r--include/us-conntrack.h13
19 files changed, 1677 insertions, 0 deletions
diff --git a/include/Makefile.am b/include/Makefile.am
new file mode 100644
index 0000000..e669d73
--- /dev/null
+++ b/include/Makefile.am
@@ -0,0 +1,5 @@
+
+noinst_HEADERS = alarm.h jhash.h slist.h cache.h linux_list.h \
+ sync.h conntrackd.h local.h us-conntrack.h \
+ debug.h log.h hash.h mcast.h buffer.h
+
diff --git a/include/alarm.h b/include/alarm.h
new file mode 100644
index 0000000..93e6482
--- /dev/null
+++ b/include/alarm.h
@@ -0,0 +1,13 @@
+#ifndef _TIMER_H_
+#define _TIMER_H_
+
+#include "linux_list.h"
+
+struct alarm_list {
+ struct list_head head;
+ unsigned long expires;
+ void *data;
+ void (*function)(struct alarm_list *a, void *data);
+};
+
+#endif
diff --git a/include/buffer.h b/include/buffer.h
new file mode 100644
index 0000000..8d72dfb
--- /dev/null
+++ b/include/buffer.h
@@ -0,0 +1,32 @@
+#ifndef _BUFFER_H_
+#define _BUFFER_H_
+
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include "linux_list.h"
+
+struct buffer {
+ pthread_mutex_t lock;
+ size_t max_size;
+ size_t cur_size;
+ struct list_head head;
+};
+
+struct buffer_node {
+ struct list_head head;
+ size_t size;
+ char data[0];
+};
+
+struct buffer *buffer_create(size_t max_size);
+void buffer_destroy(struct buffer *b);
+int buffer_add(struct buffer *b, const void *data, size_t size);
+void buffer_del(struct buffer *b, void *data);
+void __buffer_del(struct buffer *b, void *data);
+void buffer_iterate(struct buffer *b,
+ void *data,
+ int (*iterate)(void *data1, void *data2));
+
+#endif
diff --git a/include/cache.h b/include/cache.h
new file mode 100644
index 0000000..7d9559a
--- /dev/null
+++ b/include/cache.h
@@ -0,0 +1,92 @@
+#ifndef _CACHE_H_
+#define _CACHE_H_
+
+#include <sys/types.h>
+#include <time.h>
+
+/* cache features */
+enum {
+ NO_FEATURES = 0,
+
+ TIMER_FEATURE = 0,
+ TIMER = (1 << TIMER_FEATURE),
+
+ LIFETIME_FEATURE = 2,
+ LIFETIME = (1 << LIFETIME_FEATURE),
+
+ __CACHE_MAX_FEATURE
+};
+#define CACHE_MAX_FEATURE __CACHE_MAX_FEATURE
+
+struct cache;
+struct us_conntrack;
+
+struct cache_feature {
+ size_t size;
+ void (*add)(struct us_conntrack *u, void *data);
+ void (*update)(struct us_conntrack *u, void *data);
+ void (*destroy)(struct us_conntrack *u, void *data);
+ int (*dump)(struct us_conntrack *u, void *data, char *buf, int type);
+};
+
+extern struct cache_feature lifetime_feature;
+extern struct cache_feature timer_feature;
+
+#define CACHE_MAX_NAMELEN 32
+
+struct cache {
+ char name[CACHE_MAX_NAMELEN];
+ struct hashtable *h;
+
+ unsigned int num_features;
+ struct cache_feature **features;
+ unsigned int feature_type[CACHE_MAX_FEATURE];
+ unsigned int *feature_offset;
+ struct cache_extra *extra;
+ unsigned int extra_offset;
+
+ /* statistics */
+ unsigned int add_ok;
+ unsigned int del_ok;
+ unsigned int upd_ok;
+
+ unsigned int add_fail;
+ unsigned int del_fail;
+ unsigned int upd_fail;
+
+ unsigned int commit_ok;
+ unsigned int commit_exist;
+ unsigned int commit_fail;
+
+ unsigned int flush;
+};
+
+struct cache_extra {
+ unsigned int size;
+
+ void (*add)(struct us_conntrack *u, void *data);
+ void (*update)(struct us_conntrack *u, void *data);
+ void (*destroy)(struct us_conntrack *u, void *data);
+};
+
+struct nf_conntrack;
+
+struct cache *cache_create(char *name, unsigned int features, u_int8_t proto, struct cache_extra *extra);
+void cache_destroy(struct cache *e);
+
+struct us_conntrack *cache_add(struct cache *c, struct nf_conntrack *ct);
+struct us_conntrack *cache_update(struct cache *c, struct nf_conntrack *ct);
+struct us_conntrack *cache_update_force(struct cache *c, struct nf_conntrack *ct);
+int cache_del(struct cache *c, struct nf_conntrack *ct);
+int cache_test(struct cache *c, struct nf_conntrack *ct);
+void cache_stats(struct cache *c, int fd);
+struct us_conntrack *cache_get_conntrack(struct cache *, void *);
+void *cache_get_extra(struct cache *, void *);
+
+/* iterators */
+void cache_dump(struct cache *c, int fd, int type);
+void cache_commit(struct cache *c);
+void cache_flush(struct cache *c);
+void cache_bulk(struct cache *c);
+
+#endif
diff --git a/include/conntrack.h b/include/conntrack.h
new file mode 100644
index 0000000..fb3b9b6
--- /dev/null
+++ b/include/conntrack.h
@@ -0,0 +1,160 @@
+#ifndef _CONNTRACK_H
+#define _CONNTRACK_H
+
+#ifdef HAVE_CONFIG_H
+#include "../config.h"
+#endif
+
+#include "linux_list.h"
+#include <getopt.h>
+#include <libnetfilter_conntrack/libnetfilter_conntrack.h>
+
+#define PROGNAME "conntrack"
+
+#include <netinet/in.h>
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+enum action {
+ CT_NONE = 0,
+
+ CT_LIST_BIT = 0,
+ CT_LIST = (1 << CT_LIST_BIT),
+
+ CT_CREATE_BIT = 1,
+ CT_CREATE = (1 << CT_CREATE_BIT),
+
+ CT_UPDATE_BIT = 2,
+ CT_UPDATE = (1 << CT_UPDATE_BIT),
+
+ CT_DELETE_BIT = 3,
+ CT_DELETE = (1 << CT_DELETE_BIT),
+
+ CT_GET_BIT = 4,
+ CT_GET = (1 << CT_GET_BIT),
+
+ CT_FLUSH_BIT = 5,
+ CT_FLUSH = (1 << CT_FLUSH_BIT),
+
+ CT_EVENT_BIT = 6,
+ CT_EVENT = (1 << CT_EVENT_BIT),
+
+ CT_VERSION_BIT = 7,
+ CT_VERSION = (1 << CT_VERSION_BIT),
+
+ CT_HELP_BIT = 8,
+ CT_HELP = (1 << CT_HELP_BIT),
+
+ EXP_LIST_BIT = 9,
+ EXP_LIST = (1 << EXP_LIST_BIT),
+
+ EXP_CREATE_BIT = 10,
+ EXP_CREATE = (1 << EXP_CREATE_BIT),
+
+ EXP_DELETE_BIT = 11,
+ EXP_DELETE = (1 << EXP_DELETE_BIT),
+
+ EXP_GET_BIT = 12,
+ EXP_GET = (1 << EXP_GET_BIT),
+
+ EXP_FLUSH_BIT = 13,
+ EXP_FLUSH = (1 << EXP_FLUSH_BIT),
+
+ EXP_EVENT_BIT = 14,
+ EXP_EVENT = (1 << EXP_EVENT_BIT),
+};
+#define NUMBER_OF_CMD 15
+
+enum options {
+ CT_OPT_ORIG_SRC_BIT = 0,
+ CT_OPT_ORIG_SRC = (1 << CT_OPT_ORIG_SRC_BIT),
+
+ CT_OPT_ORIG_DST_BIT = 1,
+ CT_OPT_ORIG_DST = (1 << CT_OPT_ORIG_DST_BIT),
+
+ CT_OPT_ORIG = (CT_OPT_ORIG_SRC | CT_OPT_ORIG_DST),
+
+ CT_OPT_REPL_SRC_BIT = 2,
+ CT_OPT_REPL_SRC = (1 << CT_OPT_REPL_SRC_BIT),
+
+ CT_OPT_REPL_DST_BIT = 3,
+ CT_OPT_REPL_DST = (1 << CT_OPT_REPL_DST_BIT),
+
+ CT_OPT_REPL = (CT_OPT_REPL_SRC | CT_OPT_REPL_DST),
+
+ CT_OPT_PROTO_BIT = 4,
+ CT_OPT_PROTO = (1 << CT_OPT_PROTO_BIT),
+
+ CT_OPT_TIMEOUT_BIT = 5,
+ CT_OPT_TIMEOUT = (1 << CT_OPT_TIMEOUT_BIT),
+
+ CT_OPT_STATUS_BIT = 6,
+ CT_OPT_STATUS = (1 << CT_OPT_STATUS_BIT),
+
+ CT_OPT_ZERO_BIT = 7,
+ CT_OPT_ZERO = (1 << CT_OPT_ZERO_BIT),
+
+ CT_OPT_EVENT_MASK_BIT = 8,
+ CT_OPT_EVENT_MASK = (1 << CT_OPT_EVENT_MASK_BIT),
+
+ CT_OPT_EXP_SRC_BIT = 9,
+ CT_OPT_EXP_SRC = (1 << CT_OPT_EXP_SRC_BIT),
+
+ CT_OPT_EXP_DST_BIT = 10,
+ CT_OPT_EXP_DST = (1 << CT_OPT_EXP_DST_BIT),
+
+ CT_OPT_MASK_SRC_BIT = 11,
+ CT_OPT_MASK_SRC = (1 << CT_OPT_MASK_SRC_BIT),
+
+ CT_OPT_MASK_DST_BIT = 12,
+ CT_OPT_MASK_DST = (1 << CT_OPT_MASK_DST_BIT),
+
+ CT_OPT_NATRANGE_BIT = 13,
+ CT_OPT_NATRANGE = (1 << CT_OPT_NATRANGE_BIT),
+
+ CT_OPT_MARK_BIT = 14,
+ CT_OPT_MARK = (1 << CT_OPT_MARK_BIT),
+
+ CT_OPT_ID_BIT = 15,
+ CT_OPT_ID = (1 << CT_OPT_ID_BIT),
+
+ CT_OPT_FAMILY_BIT = 16,
+ CT_OPT_FAMILY = (1 << CT_OPT_FAMILY_BIT),
+
+ CT_OPT_MAX_BIT = CT_OPT_FAMILY_BIT
+};
+#define NUMBER_OF_OPT CT_OPT_MAX_BIT+1
+
+struct ctproto_handler {
+ struct list_head head;
+
+ char *name;
+ u_int16_t protonum;
+ char *version;
+
+ enum ctattr_protoinfo protoinfo_attr;
+
+ int (*parse_opts)(char c, char *argv[],
+ struct nfct_tuple *orig,
+ struct nfct_tuple *reply,
+ struct nfct_tuple *exptuple,
+ struct nfct_tuple *mask,
+ union nfct_protoinfo *proto,
+ unsigned int *flags);
+
+ int (*final_check)(unsigned int flags,
+ unsigned int command,
+ struct nfct_tuple *orig,
+ struct nfct_tuple *reply);
+
+ void (*help)();
+
+ struct option *opts;
+
+ unsigned int option_offset;
+};
+
+extern void register_proto(struct ctproto_handler *h);
+
+#endif
diff --git a/include/conntrackd.h b/include/conntrackd.h
new file mode 100644
index 0000000..a5f7a3a
--- /dev/null
+++ b/include/conntrackd.h
@@ -0,0 +1,174 @@
+#ifndef _CONNTRACKD_H_
+#define _CONNTRACKD_H_
+
+#include "mcast.h"
+#include "local.h"
+
+#include <stdio.h>
+#include <libnetfilter_conntrack/libnetfilter_conntrack.h>
+#include "cache.h"
+#include "debug.h"
+#include <signal.h>
+#include "state_helper.h"
+#include <libnetfilter_conntrack/libnetfilter_conntrack_tcp.h>
+
+/* UNIX facilities */
+#define FLUSH_MASTER 0 /* flush kernel conntrack table */
+#define RESYNC_MASTER 1 /* resync with kernel conntrack table */
+#define DUMP_INTERNAL 16 /* dump internal cache */
+#define DUMP_EXTERNAL 17 /* dump external cache */
+#define COMMIT 18 /* commit external cache */
+#define FLUSH_CACHE 19 /* flush cache */
+#define KILL 20 /* kill conntrackd */
+#define STATS 21 /* dump statistics */
+#define SEND_BULK 22 /* send a bulk */
+#define REQUEST_DUMP 23 /* request dump */
+#define DUMP_INT_XML 24 /* dump internal cache in XML */
+#define DUMP_EXT_XML 25 /* dump external cache in XML */
+
+#define DEFAULT_CONFIGFILE "/etc/conntrackd/conntrackd.conf"
+#define DEFAULT_LOCKFILE "/var/lock/conntrackd.lock"
+
+enum {
+ STRIP_NAT_BIT = 0,
+ STRIP_NAT = (1 << STRIP_NAT_BIT),
+
+ DELAY_DESTROY_MSG_BIT = 1,
+ DELAY_DESTROY_MSG = (1 << DELAY_DESTROY_MSG_BIT),
+
+ RELAX_TRANSITIONS_BIT = 2,
+ RELAX_TRANSITIONS = (1 << RELAX_TRANSITIONS_BIT),
+
+ SYNC_MODE_PERSISTENT_BIT = 3,
+ SYNC_MODE_PERSISTENT = (1 << SYNC_MODE_PERSISTENT_BIT),
+
+ SYNC_MODE_NACK_BIT = 4,
+ SYNC_MODE_NACK = (1 << SYNC_MODE_NACK_BIT),
+
+ DONT_CHECKSUM_BIT = 5,
+ DONT_CHECKSUM = (1 << DONT_CHECKSUM_BIT),
+};
+
+/* daemon/request modes */
+#define NOT_SET 0
+#define DAEMON 1
+#define REQUEST 2
+
+/* conntrackd modes */
+#define SYNC_MODE 0
+#define STATS_MODE 1
+
+/* FILENAME_MAX is 4096 on my system, perhaps too much? */
+#ifndef FILENAME_MAXLEN
+#define FILENAME_MAXLEN 256
+#endif
+
+union inet_address {
+ u_int32_t ipv4;
+ u_int32_t ipv6[4];
+ u_int32_t all[4];
+};
+
+#define CONFIG(x) conf.x
+
+struct ct_conf {
+ char logfile[FILENAME_MAXLEN];
+ char lockfile[FILENAME_MAXLEN];
+ int hashsize; /* hashtable size */
+ struct mcast_conf mcast; /* multicast settings */
+ struct local_conf local; /* unix socket facilities */
+ int limit;
+ int refresh;
+ int cache_timeout; /* cache entries timeout */
+ int commit_timeout; /* committed entries timeout */
+ unsigned int netlink_buffer_size;
+ unsigned int netlink_buffer_size_max_grown;
+ unsigned char ignore_protocol[IPPROTO_MAX];
+ union inet_address *listen_to;
+ unsigned int listen_to_len;
+ unsigned int flags;
+ int family; /* protocol family */
+ unsigned int resend_buffer_size;/* NACK protocol */
+ unsigned int window_size;
+};
+
+#define STATE(x) st.x
+
+struct ct_general_state {
+ sigset_t block;
+ FILE *log;
+ int local;
+ struct ct_mode *mode;
+ struct ignore_pool *ignore_pool;
+
+ struct nfnl_handle *event; /* event handler */
+ struct nfnl_handle *sync; /* sync handler */
+ struct nfnl_handle *dump; /* dump handler */
+
+ struct nfnl_subsys_handle *subsys_event; /* events */
+ struct nfnl_subsys_handle *subsys_sync; /* resync */
+ struct nfnl_subsys_handle *subsys_dump; /* dump */
+
+ /* statistics */
+ u_int64_t malformed;
+ u_int64_t bytes[NFCT_DIR_MAX];
+ u_int64_t packets[NFCT_DIR_MAX];
+};
+
+#define STATE_SYNC(x) state.sync->x
+
+struct ct_sync_state {
+ struct cache *internal; /* internal events cache (netlink) */
+ struct cache *external; /* external events cache (mcast) */
+
+ struct mcast_sock *mcast_server; /* multicast socket: incoming */
+ struct mcast_sock *mcast_client; /* multicast socket: outgoing */
+
+ struct sync_mode *mcast_sync;
+ struct buffer *buffer;
+
+ u_int32_t last_seq_sent; /* last sequence number sent */
+ u_int32_t last_seq_recv; /* last sequence number recv */
+ u_int64_t packets_replayed; /* number of replayed packets */
+ u_int64_t packets_lost; /* lost packets: sequence tracking */
+};
+
+#define STATE_STATS(x) state.stats->x
+
+struct ct_stats_state {
+ struct cache *cache; /* internal events cache (netlink) */
+};
+
+union ct_state {
+ struct ct_sync_state *sync;
+ struct ct_stats_state *stats;
+};
+
+extern struct ct_conf conf;
+extern union ct_state state;
+extern struct ct_general_state st;
+
+#ifndef IPPROTO_VRRP
+#define IPPROTO_VRRP 112
+#endif
+
+struct ct_mode {
+ int (*init)(void);
+ int (*add_fds_to_set)(fd_set *readfds);
+ void (*step)(fd_set *readfds);
+ int (*local)(int fd, int type, void *data);
+ void (*kill)(void);
+ void (*dump)(struct nf_conntrack *ct, struct nlmsghdr *nlh);
+ void (*overrun)(struct nf_conntrack *ct, struct nlmsghdr *nlh);
+ void (*event_new)(struct nf_conntrack *ct, struct nlmsghdr *nlh);
+ void (*event_upd)(struct nf_conntrack *ct, struct nlmsghdr *nlh);
+ int (*event_dst)(struct nf_conntrack *ct, struct nlmsghdr *nlh);
+};
+
+/* conntrackd modes */
+extern struct ct_mode sync_mode;
+extern struct ct_mode stats_mode;
+
+#define MAX(x, y) x > y ? x : y
+
+#endif
diff --git a/include/debug.h b/include/debug.h
new file mode 100644
index 0000000..67f2c71
--- /dev/null
+++ b/include/debug.h
@@ -0,0 +1,53 @@
+#ifndef _DEBUG_H
+#define _DEBUG_H
+
+#if 0
+#define debug printf
+#else
+#define debug
+#endif
+
+#include <string.h>
+#include <netinet/in.h>
+#include <libnetfilter_conntrack/libnetfilter_conntrack.h>
+
+static inline void debug_ct(struct nf_conntrack *ct, char *msg)
+{
+ struct in_addr addr, addr2, addr3, addr4;
+
+ debug("----%s (%p) ----\n", msg, ct);
+ memcpy(&addr,
+ nfct_get_attr(ct, ATTR_ORIG_IPV4_SRC),
+ sizeof(u_int32_t));
+ memcpy(&addr2,
+ nfct_get_attr(ct, ATTR_ORIG_IPV4_DST),
+ sizeof(u_int32_t));
+ memcpy(&addr3,
+ nfct_get_attr(ct, ATTR_REPL_IPV4_SRC),
+ sizeof(u_int32_t));
+ memcpy(&addr4,
+ nfct_get_attr(ct, ATTR_REPL_IPV4_DST),
+ sizeof(u_int32_t));
+
+ debug("status: %x\n", nfct_get_attr_u32(ct, ATTR_STATUS));
+ debug("l3:%d l4:%d ",
+ nfct_get_attr_u8(ct, ATTR_ORIG_L3PROTO),
+ nfct_get_attr_u8(ct, ATTR_ORIG_L4PROTO));
+ debug("%s:%hu ->", inet_ntoa(addr),
+ ntohs(nfct_get_attr_u16(ct, ATTR_ORIG_PORT_SRC)));
+ debug("%s:%hu\n",
+ inet_ntoa(addr2),
+ ntohs(nfct_get_attr_u16(ct, ATTR_ORIG_PORT_DST)));
+ debug("l3:%d l4:%d ",
+ nfct_get_attr_u8(ct, ATTR_REPL_L3PROTO),
+ nfct_get_attr_u8(ct, ATTR_REPL_L4PROTO));
+ debug("%s:%hu ->",
+ inet_ntoa(addr3),
+ ntohs(nfct_get_attr_u16(ct, ATTR_REPL_PORT_SRC)));
+ debug("%s:%hu\n",
+ inet_ntoa(addr4),
+ ntohs(nfct_get_attr_u16(ct, ATTR_REPL_PORT_DST)));
+ debug("-------------------------\n");
+}
+
+#endif
diff --git a/include/hash.h b/include/hash.h
new file mode 100644
index 0000000..fd971e7
--- /dev/null
+++ b/include/hash.h
@@ -0,0 +1,47 @@
+#ifndef _NF_SET_HASH_H_
+#define _NF_SET_HASH_H_
+
+#include <unistd.h>
+#include <sys/types.h>
+#include "slist.h"
+#include "linux_list.h"
+
+struct hashtable;
+struct hashtable_node;
+
+struct hashtable {
+ u_int32_t hashsize;
+ u_int32_t limit;
+ u_int32_t count;
+ u_int32_t initval;
+ u_int32_t datasize;
+
+ u_int32_t (*hash)(const void *data, struct hashtable *table);
+ int (*compare)(const void *data1, const void *data2);
+
+ struct slist_head members[0];
+};
+
+struct hashtable_node {
+ struct slist_head head;
+ char data[0];
+};
+
+struct hashtable_node *hashtable_alloc_node(int datasize, void *data);
+void hashtable_destroy_node(struct hashtable_node *h);
+
+struct hashtable *
+hashtable_create(int hashsize, int limit, int datasize,
+ u_int32_t (*hash)(const void *data, struct hashtable *table),
+ int (*compare)(const void *data1, const void *data2));
+void hashtable_destroy(struct hashtable *h);
+
+void *hashtable_add(struct hashtable *table, void *data);
+void *hashtable_test(struct hashtable *table, const void *data);
+int hashtable_del(struct hashtable *table, void *data);
+int hashtable_flush(struct hashtable *table);
+int hashtable_iterate(struct hashtable *table, void *data,
+ int (*iterate)(void *data1, void *data2));
+unsigned int hashtable_counter(struct hashtable *table);
+
+#endif
diff --git a/include/ignore.h b/include/ignore.h
new file mode 100644
index 0000000..40cb02d
--- /dev/null
+++ b/include/ignore.h
@@ -0,0 +1,12 @@
+#ifndef _IGNORE_H_
+#define _IGNORE_H_
+
+struct ignore_pool {
+ struct hashtable *h;
+};
+
+struct ignore_pool *ignore_pool_create(u_int8_t family);
+void ignore_pool_destroy(struct ignore_pool *ip);
+int ignore_pool_add(struct ignore_pool *ip, void *data);
+
+#endif
diff --git a/include/jhash.h b/include/jhash.h
new file mode 100644
index 0000000..38b8780
--- /dev/null
+++ b/include/jhash.h
@@ -0,0 +1,146 @@
+#ifndef _LINUX_JHASH_H
+#define _LINUX_JHASH_H
+
+#define u32 unsigned int
+#define u8 char
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are surely my fault. -DaveM
+ */
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+ const u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+ b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+ c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+ __jhash_mix(a,b,c);
+
+ k += 12;
+ len -= 12;
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((u32)k[10]<<24);
+ case 10: c += ((u32)k[9]<<16);
+ case 9 : c += ((u32)k[8]<<8);
+ case 8 : b += ((u32)k[7]<<24);
+ case 7 : b += ((u32)k[6]<<16);
+ case 6 : b += ((u32)k[5]<<8);
+ case 5 : b += k[4];
+ case 4 : a += ((u32)k[3]<<24);
+ case 3 : a += ((u32)k[2]<<16);
+ case 2 : a += ((u32)k[1]<<8);
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+/* A special optimized version that handles 1 or more of u32s.
+ * The length parameter here is the number of u32s in the key.
+ */
+static inline u32 jhash2(u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+ len = length;
+
+ while (len >= 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ k += 3; len -= 3;
+ }
+
+ c += length * 4;
+
+ switch (len) {
+ case 2 : b += k[1];
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
+ *
+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
+ * done at the end is not done here.
+ */
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += JHASH_GOLDEN_RATIO;
+ b += JHASH_GOLDEN_RATIO;
+ c += initval;
+
+ __jhash_mix(a, b, c);
+
+ return c;
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return jhash_3words(a, b, 0, initval);
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+ return jhash_3words(a, 0, 0, initval);
+}
+
+#endif /* _LINUX_JHASH_H */
diff --git a/include/linux_list.h b/include/linux_list.h
new file mode 100644
index 0000000..57b56d7
--- /dev/null
+++ b/include/linux_list.h
@@ -0,0 +1,725 @@
+#ifndef _LINUX_LIST_H
+#define _LINUX_LIST_H
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ *
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/*
+ * Check at compile time that something is of a particular type.
+ * Always evaluates to 1 so you may use it easily in comparisons.
+ */
+#define typecheck(type,x) \
+({ type __dummy; \
+ typeof(x) __dummy2; \
+ (void)(&__dummy == &__dummy2); \
+ 1; \
+})
+
+#define prefetch(x) 1
+
+/* empty define to make this work in userspace -HW */
+#ifndef smp_wmb
+#define smp_wmb()
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100)
+#define LIST_POISON2 ((void *) 0x00200200)
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add_rcu(struct list_head * new,
+ struct list_head * prev, struct list_head * next)
+{
+ new->next = next;
+ new->prev = prev;
+ smp_wmb();
+ next->prev = new;
+ prev->next = new;
+}
+
+/**
+ * list_add_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+{
+ __list_add_rcu(new, head, head->next);
+}
+
+/**
+ * list_add_tail_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_tail_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_tail_rcu(struct list_head *new,
+ struct list_head *head)
+{
+ __list_add_rcu(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * Note: list_empty on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_del_rcu()
+ * or list_add_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry. Instead, either synchronize_kernel()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_del_rcu(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is
+ * empty _and_ checks that no other CPU might be
+ * in the process of still modifying either member
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ *
+ * @head: the list to test.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
+}
+
+static inline void __list_splice(struct list_head *list,
+ struct list_head *head)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(struct list_head *list, struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+ pos = pos->next, prefetch(pos->next))
+
+/**
+ * __list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ *
+ * This variant differs from list_for_each() in that it's the
+ * simplest possible list iteration code, no prefetching is done.
+ * Use this for code that knows the list to be very short (empty
+ * or 1 entry) most of the time.
+ */
+#define __list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
+ pos = pos->prev, prefetch(pos->prev))
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ prefetch(pos->member.next); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member), \
+ prefetch(pos->member.next))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
+ prefetch(pos->member.prev); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member), \
+ prefetch(pos->member.prev))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use as a start point in
+ * list_for_each_entry_continue
+ * @pos: the type * to use as a start point
+ * @head: the head of the list
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_prepare_entry(pos, head, member) \
+ ((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue - iterate over list of given type
+ * continuing after existing point
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_entry(pos->member.next, typeof(*pos), member), \
+ prefetch(pos->member.next); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member), \
+ prefetch(pos->member.next))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_rcu - iterate over an rcu-protected list
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+ pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next))
+
+#define __list_for_each_rcu(pos, head) \
+ for (pos = (head)->next; pos != (head); \
+ pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
+
+/**
+ * list_for_each_safe_rcu - iterate over an rcu-protected list safe
+ * against removal of list entry
+ * @pos: the &struct list_head to use as a loop counter.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_safe_rcu(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next)
+
+/**
+ * list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_entry_rcu(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ prefetch(pos->member.next); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member), \
+ ({ smp_read_barrier_depends(); 0;}), \
+ prefetch(pos->member.next))
+
+
+/**
+ * list_for_each_continue_rcu - iterate over an rcu-protected list
+ * continuing after existing point.
+ * @pos: the &struct list_head to use as a loop counter.
+ * @head: the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_continue_rcu(pos, head) \
+ for ((pos) = (pos)->next, prefetch((pos)->next); (pos) != (head); \
+ (pos) = (pos)->next, ({ smp_read_barrier_depends(); 0;}), prefetch((pos)->next))
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry().
+ */
+static inline void hlist_del_rcu(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (n->pprev) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+#define hlist_del_rcu_init hlist_del_init
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+
+/**
+ * hlist_add_head_rcu - adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry(), but only if smp_read_barrier_depends()
+ * is used to prevent memory-consistency problems on Alpha CPUs.
+ * Regardless of the type of CPU, the list-traversal primitive
+ * must be guarded by rcu_read_lock().
+ *
+ * OK, so why don't we have an hlist_for_each_entry_rcu()???
+ */
+static inline void hlist_add_head_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ n->pprev = &h->first;
+ smp_wmb();
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_after(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ next->next = n->next;
+ n->next = next;
+ next->pprev = &n->next;
+
+ if(next->next)
+ next->next->pprev = &next->next;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
+ pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(tpos, pos, member) \
+ for (pos = (pos)->next; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(tpos, pos, member) \
+ for (; pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @tpos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @n: another &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = n)
+
+/**
+ * hlist_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
+
+#endif
diff --git a/include/local.h b/include/local.h
new file mode 100644
index 0000000..350b8bf
--- /dev/null
+++ b/include/local.h
@@ -0,0 +1,29 @@
+#ifndef _LOCAL_SOCKET_H_
+#define _LOCAL_SOCKET_H_
+
+#include <sys/un.h>
+
+#ifndef UNIX_PATH_MAX
+#define UNIX_PATH_MAX 108
+#endif
+
+struct local_conf {
+ int backlog;
+ int reuseaddr;
+ char path[UNIX_PATH_MAX];
+};
+
+/* local server */
+int local_server_create(struct local_conf *conf);
+void local_server_destroy(int fd);
+int do_local_server_step(int fd, void *data,
+ void (*process)(int fd, void *data));
+
+/* local client */
+int local_client_create(struct local_conf *conf);
+void local_client_destroy(int fd);
+int do_local_client_step(int fd, void (*process)(char *buf));
+int do_local_request(int, struct local_conf *,void (*step)(char *buf));
+void local_step(char *buf);
+
+#endif
diff --git a/include/log.h b/include/log.h
new file mode 100644
index 0000000..9ecff30
--- /dev/null
+++ b/include/log.h
@@ -0,0 +1,10 @@
+#ifndef _LOG_H_
+#define _LOG_H_
+
+#include <stdio.h>
+
+FILE *init_log(char *filename);
+void dlog(FILE *fd, char *format, ...);
+void close_log(FILE *fd);
+
+#endif
diff --git a/include/mcast.h b/include/mcast.h
new file mode 100644
index 0000000..0f3e3cd
--- /dev/null
+++ b/include/mcast.h
@@ -0,0 +1,48 @@
+#ifndef _MCAST_H_
+#define _MCAST_H_
+
+#include <netinet/in.h>
+
+struct mcast_conf {
+ int ipproto;
+ int backlog;
+ int reuseaddr;
+ unsigned short port;
+ union {
+ struct in_addr inet_addr;
+ struct in6_addr inet_addr6;
+ } in;
+ union {
+ struct in_addr interface_addr;
+ struct in6_addr interface_addr6;
+ } ifa;
+};
+
+struct mcast_stats {
+ u_int64_t bytes;
+ u_int64_t messages;
+ u_int64_t error;
+};
+
+struct mcast_sock {
+ int fd;
+ union {
+ struct sockaddr_in ipv4;
+ struct sockaddr_in6 ipv6;
+ } addr;
+ struct mcast_stats stats;
+};
+
+struct mcast_sock *mcast_server_create(struct mcast_conf *conf);
+void mcast_server_destroy(struct mcast_sock *m);
+
+struct mcast_sock *mcast_client_create(struct mcast_conf *conf);
+void mcast_client_destroy(struct mcast_sock *m);
+
+int mcast_send(struct mcast_sock *m, void *data, int size);
+int mcast_recv(struct mcast_sock *m, void *data, int size);
+
+struct mcast_stats *mcast_get_stats(struct mcast_sock *m);
+void mcast_dump_stats(int fd, struct mcast_sock *s, struct mcast_sock *r);
+
+#endif
diff --git a/include/network.h b/include/network.h
new file mode 100644
index 0000000..dab50db
--- /dev/null
+++ b/include/network.h
@@ -0,0 +1,34 @@
+#ifndef _NETWORK_H_
+#define _NETWORK_H_
+
+#include <sys/types.h>
+
+struct nlnetwork {
+ u_int16_t flags;
+ u_int16_t checksum;
+ u_int32_t seq;
+};
+
+struct nlnetwork_ack {
+ u_int16_t flags;
+ u_int16_t checksum;
+ u_int32_t seq;
+ u_int32_t from;
+ u_int32_t to;
+};
+
+enum {
+ NET_HELLO_BIT = 0,
+ NET_HELLO = (1 << NET_HELLO_BIT),
+
+ NET_RESYNC_BIT = 1,
+ NET_RESYNC = (1 << NET_RESYNC_BIT),
+
+ NET_NACK_BIT = 2,
+ NET_NACK = (1 << NET_NACK_BIT),
+
+ NET_ACK_BIT = 3,
+ NET_ACK = (1 << NET_ACK_BIT),
+};
+
+#endif
diff --git a/include/slist.h b/include/slist.h
new file mode 100644
index 0000000..ab7fa34
--- /dev/null
+++ b/include/slist.h
@@ -0,0 +1,41 @@
+#ifndef _SLIST_H_
+#define _SLIST_H_
+
+#include "linux_list.h"
+
+#define INIT_SLIST_HEAD(ptr) ((ptr).next = NULL)
+
+struct slist_head {
+ struct slist_head *next;
+};
+
+static inline int slist_empty(const struct slist_head *h)
+{
+ return !h->next;
+}
+
+static inline void slist_del(struct slist_head *t, struct slist_head *prev)
+{
+ prev->next = t->next;
+ t->next = LIST_POISON1;
+}
+
+static inline void slist_add(struct slist_head *head, struct slist_head *t)
+{
+ struct slist_head *tmp = head->next;
+ head->next = t;
+ t->next = tmp;
+}
+
+#define slist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define slist_for_each(pos, head) \
+ for (pos = (head)->next; pos && ({ prefetch(pos.next); 1; }); \
+ pos = pos->next)
+
+#define slist_for_each_safe(pos, prev, next, head) \
+ for (pos = (head)->next, prev = (head); \
+ pos && ({ next = pos->next; 1; }); \
+ ({ prev = (prev->next != next) ? prev->next : prev; }), pos = next)
+
+#endif
diff --git a/include/state_helper.h b/include/state_helper.h
new file mode 100644
index 0000000..1ed0b79
--- /dev/null
+++ b/include/state_helper.h
@@ -0,0 +1,20 @@
+#ifndef _STATE_HELPER_H_
+#define _STATE_HELPER_H_
+
+enum {
+ ST_H_SKIP,
+ ST_H_REPLICATE
+};
+
+struct state_replication_helper {
+ u_int8_t proto;
+ unsigned int state;
+
+ int (*verdict)(const struct state_replication_helper *h,
+ const struct nf_conntrack *ct);
+};
+
+int state_helper_verdict(int type, struct nf_conntrack *ct);
+void state_helper_register(struct state_replication_helper *h, int state);
+
+#endif
diff --git a/include/sync.h b/include/sync.h
new file mode 100644
index 0000000..7756c87
--- /dev/null
+++ b/include/sync.h
@@ -0,0 +1,23 @@
+#ifndef _SYNC_HOOKS_H_
+#define _SYNC_HOOKS_H_
+
+struct nlnetwork;
+struct us_conntrack;
+
+struct sync_mode {
+ int internal_cache_flags;
+ int external_cache_flags;
+ struct cache_extra *internal_cache_extra;
+ struct cache_extra *external_cache_extra;
+
+ int (*init)(void);
+ void (*kill)(void);
+ int (*local)(int fd, int type, void *data);
+ int (*pre_recv)(const struct nlnetwork *net);
+ void (*post_send)(const struct nlnetwork *net, struct us_conntrack *u);
+};
+
+extern struct sync_mode notrack;
+extern struct sync_mode nack;
+
+#endif
diff --git a/include/us-conntrack.h b/include/us-conntrack.h
new file mode 100644
index 0000000..3d71e22
--- /dev/null
+++ b/include/us-conntrack.h
@@ -0,0 +1,13 @@
+#ifndef _US_CONNTRACK_H_
+#define _US_CONNTRACK_H_
+
+#include <libnetfilter_conntrack/libnetfilter_conntrack.h>
+
+/* be careful, do not modify the layout */
+struct us_conntrack {
+ struct nf_conntrack *ct;
+ struct cache *cache; /* add new attributes here */
+ char data[0];
+};
+
+#endif