diff options
Diffstat (limited to 'src/internal_cache.c')
-rw-r--r-- | src/internal_cache.c | 287 |
1 files changed, 231 insertions, 56 deletions
diff --git a/src/internal_cache.c b/src/internal_cache.c index e50e1db..ba2d74b 100644 --- a/src/internal_cache.c +++ b/src/internal_cache.c @@ -1,6 +1,7 @@ /* - * (C) 2009 by Pablo Neira Ayuso <pablo@netfilter.org> - * + * (C) 2006-2011 by Pablo Neira Ayuso <pablo@netfilter.org> + * (C) 2011 by Vyatta Inc. <http://www.vyatta.com> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -19,46 +20,60 @@ static inline void sync_send(struct cache_object *obj, int query) STATE_SYNC(sync)->enqueue(obj, query); } -static int _init(void) +static int internal_cache_init(void) { - STATE(mode)->internal->data = - cache_create("internal", + STATE(mode)->internal->ct.data = + cache_create("internal", CACHE_T_CT, STATE_SYNC(sync)->internal_cache_flags, - STATE_SYNC(sync)->internal_cache_extra); + STATE_SYNC(sync)->internal_cache_extra, + &cache_sync_internal_ct_ops); - if (!STATE(mode)->internal->data) { + if (!STATE(mode)->internal->ct.data) { dlog(LOG_ERR, "can't allocate memory for the internal cache"); return -1; } + + STATE(mode)->internal->exp.data = + cache_create("internal", CACHE_T_EXP, + STATE_SYNC(sync)->internal_cache_flags, + STATE_SYNC(sync)->internal_cache_extra, + &cache_sync_internal_exp_ops); + + if (!STATE(mode)->internal->exp.data) { + dlog(LOG_ERR, "can't allocate memory for the internal cache"); + return -1; + } + return 0; } -static void _close(void) +static void internal_cache_close(void) { - cache_destroy(STATE(mode)->internal->data); + cache_destroy(STATE(mode)->internal->ct.data); + cache_destroy(STATE(mode)->internal->exp.data); } -static void dump(int fd, int type) +static void internal_cache_ct_dump(int fd, int type) { - cache_dump(STATE(mode)->internal->data, fd, type); + cache_dump(STATE(mode)->internal->ct.data, fd, type); } -static void flush(void) +static void internal_cache_ct_flush(void) { - cache_flush(STATE(mode)->internal->data); + cache_flush(STATE(mode)->internal->ct.data); } -static void stats(int fd) +static void internal_cache_ct_stats(int fd) { - cache_stats(STATE(mode)->internal->data, fd); + cache_stats(STATE(mode)->internal->ct.data, fd); } -static void stats_ext(int fd) +static void internal_cache_ct_stats_ext(int fd) { - cache_stats_extended(STATE(mode)->internal->data, fd); + cache_stats_extended(STATE(mode)->internal->ct.data, fd); } -static void populate(struct nf_conntrack *ct) +static void internal_cache_ct_populate(struct nf_conntrack *ct) { /* This is required by kernels < 2.6.20 */ nfct_attr_unset(ct, ATTR_ORIG_COUNTER_BYTES); @@ -67,19 +82,19 @@ static void populate(struct nf_conntrack *ct) nfct_attr_unset(ct, ATTR_REPL_COUNTER_PACKETS); nfct_attr_unset(ct, ATTR_USE); - cache_update_force(STATE(mode)->internal->data, ct); + cache_update_force(STATE(mode)->internal->ct.data, ct); } -static int purge_step(void *data1, void *data2) +static int internal_cache_ct_purge_step(void *data1, void *data2) { struct cache_object *obj = data2; STATE(get_retval) = 0; - nl_get_conntrack(STATE(get), obj->ct); /* modifies STATE(get_reval) */ + nl_get_conntrack(STATE(get), obj->ptr); /* modifies STATE(get_reval) */ if (!STATE(get_retval)) { if (obj->status != C_OBJ_DEAD) { cache_object_set_status(obj, C_OBJ_DEAD); - sync_send(obj, NET_T_STATE_DEL); + sync_send(obj, NET_T_STATE_CT_DEL); cache_object_put(obj); } } @@ -87,14 +102,15 @@ static int purge_step(void *data1, void *data2) return 0; } -static void purge(void) +static void internal_cache_ct_purge(void) { - cache_iterate(STATE(mode)->internal->data, NULL, purge_step); + cache_iterate(STATE(mode)->internal->ct.data, NULL, + internal_cache_ct_purge_step); } -static int resync(enum nf_conntrack_msg_type type, - struct nf_conntrack *ct, - void *data) +static int +internal_cache_ct_resync(enum nf_conntrack_msg_type type, + struct nf_conntrack *ct, void *data) { struct cache_object *obj; @@ -108,23 +124,22 @@ static int resync(enum nf_conntrack_msg_type type, nfct_attr_unset(ct, ATTR_REPL_COUNTER_PACKETS); nfct_attr_unset(ct, ATTR_USE); - obj = cache_update_force(STATE(mode)->internal->data, ct); + obj = cache_update_force(STATE(mode)->internal->ct.data, ct); if (obj == NULL) return NFCT_CB_CONTINUE; switch (obj->status) { case C_OBJ_NEW: - sync_send(obj, NET_T_STATE_NEW); + sync_send(obj, NET_T_STATE_CT_NEW); break; case C_OBJ_ALIVE: - sync_send(obj, NET_T_STATE_UPD); + sync_send(obj, NET_T_STATE_CT_UPD); break; } return NFCT_CB_CONTINUE; } -static void -event_new_sync(struct nf_conntrack *ct, int origin) +static void internal_cache_ct_event_new(struct nf_conntrack *ct, int origin) { struct cache_object *obj; int id; @@ -139,13 +154,13 @@ event_new_sync(struct nf_conntrack *ct, int origin) nfct_attr_unset(ct, ATTR_REPL_COUNTER_BYTES); nfct_attr_unset(ct, ATTR_REPL_COUNTER_PACKETS); - obj = cache_find(STATE(mode)->internal->data, ct, &id); + obj = cache_find(STATE(mode)->internal->ct.data, ct, &id); if (obj == NULL) { retry: - obj = cache_object_new(STATE(mode)->internal->data, ct); + obj = cache_object_new(STATE(mode)->internal->ct.data, ct); if (obj == NULL) return; - if (cache_add(STATE(mode)->internal->data, obj, id) == -1) { + if (cache_add(STATE(mode)->internal->ct.data, obj, id) == -1) { cache_object_free(obj); return; } @@ -153,16 +168,15 @@ retry: * processes or the kernel, but don't propagate events that * have been triggered by conntrackd itself, eg. commits. */ if (origin == CTD_ORIGIN_NOT_ME) - sync_send(obj, NET_T_STATE_NEW); + sync_send(obj, NET_T_STATE_CT_NEW); } else { - cache_del(STATE(mode)->internal->data, obj); + cache_del(STATE(mode)->internal->ct.data, obj); cache_object_free(obj); goto retry; } } -static void -event_update_sync(struct nf_conntrack *ct, int origin) +static void internal_cache_ct_event_upd(struct nf_conntrack *ct, int origin) { struct cache_object *obj; @@ -170,16 +184,163 @@ event_update_sync(struct nf_conntrack *ct, int origin) if (origin == CTD_ORIGIN_INJECT) return; - obj = cache_update_force(STATE(mode)->internal->data, ct); + obj = cache_update_force(STATE(mode)->internal->ct.data, ct); if (obj == NULL) return; if (origin == CTD_ORIGIN_NOT_ME) - sync_send(obj, NET_T_STATE_UPD); + sync_send(obj, NET_T_STATE_CT_UPD); +} + +static int internal_cache_ct_event_del(struct nf_conntrack *ct, int origin) +{ + struct cache_object *obj; + int id; + + /* this event has been triggered by a direct inject, skip */ + if (origin == CTD_ORIGIN_INJECT) + return 0; + + /* we don't synchronize events for objects that are not in the cache */ + obj = cache_find(STATE(mode)->internal->ct.data, ct, &id); + if (obj == NULL) + return 0; + + if (obj->status != C_OBJ_DEAD) { + cache_object_set_status(obj, C_OBJ_DEAD); + if (origin == CTD_ORIGIN_NOT_ME) { + sync_send(obj, NET_T_STATE_CT_DEL); + } + cache_object_put(obj); + } + return 1; +} + +static void internal_cache_exp_dump(int fd, int type) +{ + cache_dump(STATE(mode)->internal->exp.data, fd, type); +} + +static void internal_cache_exp_flush(void) +{ + cache_flush(STATE(mode)->internal->exp.data); +} + +static void internal_cache_exp_stats(int fd) +{ + cache_stats(STATE(mode)->internal->exp.data, fd); +} + +static void internal_cache_exp_stats_ext(int fd) +{ + cache_stats_extended(STATE(mode)->internal->exp.data, fd); +} + +static void internal_cache_exp_populate(struct nf_expect *exp) +{ + cache_update_force(STATE(mode)->internal->exp.data, exp); +} + +static int internal_cache_exp_purge_step(void *data1, void *data2) +{ + struct cache_object *obj = data2; + + STATE(get_retval) = 0; + nl_get_expect(STATE(get), obj->ptr); /* modifies STATE(get_reval) */ + if (!STATE(get_retval)) { + if (obj->status != C_OBJ_DEAD) { + cache_object_set_status(obj, C_OBJ_DEAD); + sync_send(obj, NET_T_STATE_EXP_DEL); + cache_object_put(obj); + } + } + + return 0; +} + +static void internal_cache_exp_purge(void) +{ + cache_iterate(STATE(mode)->internal->exp.data, NULL, + internal_cache_exp_purge_step); } static int -event_destroy_sync(struct nf_conntrack *ct, int origin) +internal_cache_exp_resync(enum nf_conntrack_msg_type type, + struct nf_expect *exp, void *data) +{ + struct cache_object *obj; + const struct nf_conntrack *master = + nfexp_get_attr(exp, ATTR_EXP_MASTER); + + if (!exp_filter_find(STATE(exp_filter), exp)) + return NFCT_CB_CONTINUE; + + if (ct_filter_conntrack(master, 1)) + return NFCT_CB_CONTINUE; + + obj = cache_update_force(STATE(mode)->internal->exp.data, exp); + if (obj == NULL) + return NFCT_CB_CONTINUE; + + switch (obj->status) { + case C_OBJ_NEW: + sync_send(obj, NET_T_STATE_EXP_NEW); + break; + case C_OBJ_ALIVE: + sync_send(obj, NET_T_STATE_EXP_UPD); + break; + } + return NFCT_CB_CONTINUE; +} + +static void internal_cache_exp_event_new(struct nf_expect *exp, int origin) +{ + struct cache_object *obj; + int id; + + /* this event has been triggered by a direct inject, skip */ + if (origin == CTD_ORIGIN_INJECT) + return; + + obj = cache_find(STATE(mode)->internal->exp.data, exp, &id); + if (obj == NULL) { +retry: + obj = cache_object_new(STATE(mode)->internal->exp.data, exp); + if (obj == NULL) + return; + if (cache_add(STATE(mode)->internal->exp.data, obj, id) == -1) { + cache_object_free(obj); + return; + } + /* only synchronize events that have been triggered by other + * processes or the kernel, but don't propagate events that + * have been triggered by conntrackd itself, eg. commits. */ + if (origin == CTD_ORIGIN_NOT_ME) + sync_send(obj, NET_T_STATE_EXP_NEW); + } else { + cache_del(STATE(mode)->internal->exp.data, obj); + cache_object_free(obj); + goto retry; + } +} + +static void internal_cache_exp_event_upd(struct nf_expect *exp, int origin) +{ + struct cache_object *obj; + + /* this event has been triggered by a direct inject, skip */ + if (origin == CTD_ORIGIN_INJECT) + return; + + obj = cache_update_force(STATE(mode)->internal->exp.data, exp); + if (obj == NULL) + return; + + if (origin == CTD_ORIGIN_NOT_ME) + sync_send(obj, NET_T_STATE_EXP_UPD); +} + +static int internal_cache_exp_event_del(struct nf_expect *exp, int origin) { struct cache_object *obj; int id; @@ -189,14 +350,14 @@ event_destroy_sync(struct nf_conntrack *ct, int origin) return 0; /* we don't synchronize events for objects that are not in the cache */ - obj = cache_find(STATE(mode)->internal->data, ct, &id); + obj = cache_find(STATE(mode)->internal->exp.data, exp, &id); if (obj == NULL) return 0; if (obj->status != C_OBJ_DEAD) { cache_object_set_status(obj, C_OBJ_DEAD); if (origin == CTD_ORIGIN_NOT_ME) { - sync_send(obj, NET_T_STATE_DEL); + sync_send(obj, NET_T_STATE_EXP_DEL); } cache_object_put(obj); } @@ -205,16 +366,30 @@ event_destroy_sync(struct nf_conntrack *ct, int origin) struct internal_handler internal_cache = { .flags = INTERNAL_F_POPULATE | INTERNAL_F_RESYNC, - .init = _init, - .close = _close, - .dump = dump, - .flush = flush, - .stats = stats, - .stats_ext = stats_ext, - .populate = populate, - .purge = purge, - .resync = resync, - .new = event_new_sync, - .update = event_update_sync, - .destroy = event_destroy_sync, + .init = internal_cache_init, + .close = internal_cache_close, + .ct = { + .dump = internal_cache_ct_dump, + .flush = internal_cache_ct_flush, + .stats = internal_cache_ct_stats, + .stats_ext = internal_cache_ct_stats_ext, + .populate = internal_cache_ct_populate, + .purge = internal_cache_ct_purge, + .resync = internal_cache_ct_resync, + .new = internal_cache_ct_event_new, + .upd = internal_cache_ct_event_upd, + .del = internal_cache_ct_event_del, + }, + .exp = { + .dump = internal_cache_exp_dump, + .flush = internal_cache_exp_flush, + .stats = internal_cache_exp_stats, + .stats_ext = internal_cache_exp_stats_ext, + .populate = internal_cache_exp_populate, + .purge = internal_cache_exp_purge, + .resync = internal_cache_exp_resync, + .new = internal_cache_exp_event_new, + .upd = internal_cache_exp_event_upd, + .del = internal_cache_exp_event_del, + }, }; |