diff options
111 files changed, 13676 insertions, 1302 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 78ff7ad0..8bcaf1af 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,6 +33,9 @@ if (NOT BUILD_DRIVER_ONLY) endif (NOT BUILD_DRIVER_ONLY) if (BUILD_DRIVER OR BUILD_DRIVER_ONLY) - add_subdirectory(driver) + add_subdirectory(drivers/pptp) endif (BUILD_DRIVER OR BUILD_DRIVER_ONLY) +if (BUILD_IPOE_DRIVER) + add_subdirectory(drivers/ipoe) +endif (BUILD_IPOE_DRIVER) diff --git a/accel-pppd/CMakeLists.txt b/accel-pppd/CMakeLists.txt index 97c60349..92d7cfba 100644 --- a/accel-pppd/CMakeLists.txt +++ b/accel-pppd/CMakeLists.txt @@ -24,6 +24,11 @@ IF (MEMDEBUG) ENDIF (VALGRIND) ENDIF (MEMDEBUG) +IF (BACKUP) + ADD_DEFINITIONS(-DUSE_BACKUP) + ADD_SUBDIRECTORY(backup) +ENDIF (BACKUP) + IF (NOT DEFINED RADIUS) SET(RADIUS TRUE) ENDIF (NOT DEFINED RADIUS) @@ -42,8 +47,13 @@ ADD_SUBDIRECTORY(ipv6) ADD_SUBDIRECTORY(shaper) ADD_EXECUTABLE(accel-pppd + session.c + session_backup.c + ifcfg.c + + backup/backup.c + ppp/ppp.c - ppp/ppp_ifcfg.c ppp/ppp_fsm.c ppp/ppp_lcp.c ppp/lcp_opt_mru.c @@ -64,6 +74,10 @@ ADD_EXECUTABLE(accel-pppd cli/telnet.c cli/tcp.c cli/cli.c + + libnetlink/libnetlink.c + libnetlink/iplink.c + libnetlink/genl.c pwdb.c ipdb.c diff --git a/accel-pppd/auth/auth_chap_md5.c b/accel-pppd/auth/auth_chap_md5.c index 4b8206b4..a43081d8 100644 --- a/accel-pppd/auth/auth_chap_md5.c +++ b/accel-pppd/auth/auth_chap_md5.c @@ -163,7 +163,7 @@ static void chap_timeout_timer(struct triton_timer_t *t) if (++d->failure == conf_max_failure) { if (d->started) - ppp_terminate(d->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&d->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(d->ppp, NULL); } else { @@ -248,7 +248,7 @@ static void chap_send_challenge(struct chap_auth_data_t *ad, int new) ppp_chan_send(ad->ppp, &msg, ntohs(msg.hdr.len) + 2); if (conf_timeout && !ad->timeout.tpd) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->timeout, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->timeout, 0); } static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *hdr) @@ -281,7 +281,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h log_ppp_error("chap-md5: incorrect value-size (%i)\n", msg->val_size); chap_send_failure(ad); if (ad->started) - ppp_terminate(ad->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(ad->ppp, NULL); return; @@ -292,7 +292,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (conf_any_login) { if (ppp_auth_successed(ad->ppp, name)) { chap_send_failure(ad); - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); _free(name); return; } @@ -301,10 +301,10 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h return; } - r = pwdb_check(ad->ppp, name, PPP_CHAP, CHAP_MD5, ad->id, ad->val, VALUE_SIZE, msg->val); + r = pwdb_check(&ad->ppp->ses, name, PPP_CHAP, CHAP_MD5, ad->id, ad->val, VALUE_SIZE, msg->val); if (r == PWDB_NO_IMPL) { - passwd = pwdb_get_passwd(ad->ppp,name); + passwd = pwdb_get_passwd(&ad->ppp->ses, name); if (!passwd) { _free(name); @@ -326,7 +326,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h log_ppp_warn("chap-md5: challenge response mismatch\n"); chap_send_failure(ad); if (ad->started) - ppp_terminate(ad->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(ad->ppp, name); _free(name); @@ -334,13 +334,13 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (!ad->started) { if (ppp_auth_successed(ad->ppp, name)) { chap_send_failure(ad); - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); _free(name); } else { chap_send_success(ad); ad->started = 1; if (conf_interval) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->interval, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->interval, 0); } } else _free(name); @@ -349,7 +349,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h } else if (r == PWDB_DENIED) { chap_send_failure(ad); if (ad->started) - ppp_terminate(ad->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(ad->ppp, name); _free(name); @@ -357,13 +357,13 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (!ad->started) { if (ppp_auth_successed(ad->ppp, name)) { chap_send_failure(ad); - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); _free(name); } else { chap_send_success(ad); ad->started = 1; if (conf_interval) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->interval, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->interval, 0); } } else { chap_send_success(ad); diff --git a/accel-pppd/auth/auth_mschap_v1.c b/accel-pppd/auth/auth_mschap_v1.c index f0b58f45..fd1a60a8 100644 --- a/accel-pppd/auth/auth_mschap_v1.c +++ b/accel-pppd/auth/auth_mschap_v1.c @@ -164,7 +164,7 @@ static void chap_timeout_timer(struct triton_timer_t *t) if (++d->failure == conf_max_failure) { if (d->started) - ppp_terminate(d->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&d->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(d->ppp, NULL); } else { @@ -251,7 +251,7 @@ static void chap_send_challenge(struct chap_auth_data_t *ad, int new) ppp_chan_send(ad->ppp, &msg, ntohs(msg.hdr.len) + 2); if (conf_timeout && !ad->timeout.tpd) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->timeout, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->timeout, 0); } static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *hdr) @@ -283,7 +283,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (msg->val_size != RESPONSE_VALUE_SIZE) { log_ppp_error("mschap-v1: incorrect value-size (%i)\n", msg->val_size); if (ad->started) - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); else ppp_auth_failed(ad->ppp, NULL); return; @@ -293,7 +293,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (!name) { log_emerg("mschap-v1: out of memory\n"); if (ad->started) - ppp_terminate(ad->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_NAS_ERROR, 0); else ppp_auth_failed(ad->ppp, NULL); return; @@ -302,7 +302,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (conf_any_login) { if (ppp_auth_successed(ad->ppp, name)) { chap_send_failure(ad, mschap_error); - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); _free(name); return; } @@ -311,7 +311,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h return; } - r = pwdb_check(ad->ppp, name, PPP_CHAP, MSCHAP_V1, ad->id, ad->val, VALUE_SIZE, msg->lm_hash, msg->nt_hash, msg->flags, &mschap_error); + r = pwdb_check(&ad->ppp->ses, name, PPP_CHAP, MSCHAP_V1, ad->id, ad->val, VALUE_SIZE, msg->lm_hash, msg->nt_hash, msg->flags, &mschap_error); if (r == PWDB_NO_IMPL) if (chap_check_response(ad, msg, name)) r = PWDB_DENIED; @@ -319,7 +319,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (r == PWDB_DENIED) { chap_send_failure(ad, mschap_error); if (ad->started) - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); else ppp_auth_failed(ad->ppp, name); _free(name); @@ -327,13 +327,13 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (!ad->started) { if (ppp_auth_successed(ad->ppp, name)) { chap_send_failure(ad, mschap_error); - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); _free(name); } else { chap_send_success(ad); ad->started = 1; if (conf_interval) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->interval, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->interval, 0); } } else { chap_send_success(ad); @@ -380,7 +380,7 @@ static int chap_check_response(struct chap_auth_data_t *ad, struct chap_response char *u_passwd; int i; - passwd = pwdb_get_passwd(ad->ppp,name); + passwd = pwdb_get_passwd(&ad->ppp->ses, name); if (!passwd) { if (conf_ppp_verbose) log_ppp_warn("mschap-v1: user not found\n"); diff --git a/accel-pppd/auth/auth_mschap_v2.c b/accel-pppd/auth/auth_mschap_v2.c index aeb49077..ffb6fbf4 100644 --- a/accel-pppd/auth/auth_mschap_v2.c +++ b/accel-pppd/auth/auth_mschap_v2.c @@ -165,7 +165,7 @@ static void chap_timeout_timer(struct triton_timer_t *t) if (++d->failure == conf_max_failure) { if (d->started) - ppp_terminate(d->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&d->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(d->ppp, NULL); } else { @@ -254,7 +254,7 @@ static int generate_response(struct chap_auth_data_t *ad, struct chap_response_t 0x6E}; - passwd = pwdb_get_passwd(ad->ppp,name); + passwd = pwdb_get_passwd(&ad->ppp->ses, name); if (!passwd) return -1; @@ -324,7 +324,7 @@ static void chap_send_challenge(struct chap_auth_data_t *ad, int new) ppp_chan_send(ad->ppp, &msg, ntohs(msg.hdr.len) + 2); if (conf_timeout && !ad->timeout.tpd) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->timeout, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->timeout, 0); } static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *hdr) @@ -361,7 +361,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h log_ppp_error("mschap-v2: incorrect value-size (%i)\n", msg->val_size); chap_send_failure(ad, mschap_error, reply_msg); if (ad->started) - ppp_terminate(ad->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_USER_ERROR, 0); else ppp_auth_failed(ad->ppp, NULL); return; @@ -371,13 +371,13 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (!name) { log_emerg("mschap-v2: out of memory\n"); if (ad->started) - ppp_terminate(ad->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_NAS_ERROR, 0); else ppp_auth_failed(ad->ppp, NULL); return; } - r = pwdb_check(ad->ppp, name, PPP_CHAP, MSCHAP_V2, ad->id, ad->val, msg->peer_challenge, msg->reserved, msg->nt_hash, msg->flags, authenticator, &mschap_error, &reply_msg); + r = pwdb_check(&ad->ppp->ses, name, PPP_CHAP, MSCHAP_V2, ad->id, ad->val, msg->peer_challenge, msg->reserved, msg->nt_hash, msg->flags, authenticator, &mschap_error, &reply_msg); if (r == PWDB_NO_IMPL) { r = chap_check_response(ad, msg, name); @@ -390,7 +390,7 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (r == PWDB_DENIED) { chap_send_failure(ad, mschap_error, reply_msg); if (ad->started) - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); else ppp_auth_failed(ad->ppp, name); _free(name); @@ -398,13 +398,13 @@ static void chap_recv_response(struct chap_auth_data_t *ad, struct chap_hdr_t *h if (!ad->started) { if (ppp_auth_successed(ad->ppp, name)) { chap_send_failure(ad, mschap_error, reply_msg); - ppp_terminate(ad->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ad->ppp->ses, TERM_AUTH_ERROR, 0); _free(name); } else { chap_send_success(ad, msg, authenticator); ad->started = 1; if (conf_interval) - triton_timer_add(ad->ppp->ctrl->ctx, &ad->interval, 0); + triton_timer_add(ad->ppp->ses.ctrl->ctx, &ad->interval, 0); } } else { chap_send_success(ad, msg, authenticator); @@ -453,7 +453,7 @@ static int chap_check_response(struct chap_auth_data_t *ad, struct chap_response char *u_passwd; int i; - passwd = pwdb_get_passwd(ad->ppp, name); + passwd = pwdb_get_passwd(&ad->ppp->ses, name); if (!passwd) { if (conf_ppp_verbose) log_ppp_warn("mschap-v2: user not found\n"); diff --git a/accel-pppd/auth/auth_pap.c b/accel-pppd/auth/auth_pap.c index 4ab9bbbf..206ecde6 100644 --- a/accel-pppd/auth/auth_pap.c +++ b/accel-pppd/auth/auth_pap.c @@ -95,7 +95,7 @@ static int pap_start(struct ppp_t *ppp, struct auth_data_t *auth) d->timeout.expire = pap_timeout; d->timeout.period = conf_timeout * 1000; - triton_timer_add(ppp->ctrl->ctx, &d->timeout, 0); + triton_timer_add(ppp->ses.ctrl->ctx, &d->timeout, 0); ppp_register_chan_handler(ppp, &d->h); @@ -201,7 +201,7 @@ static int pap_recv_req(struct pap_auth_data_t *p, struct pap_hdr_t *hdr) if (conf_any_login) { if (ppp_auth_successed(p->ppp, peer_id)) { pap_send_nak(p, hdr->id); - ppp_terminate(p->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&p->ppp->ses, TERM_AUTH_ERROR, 0); _free(peer_id); return -1; } @@ -212,9 +212,9 @@ static int pap_recv_req(struct pap_auth_data_t *p, struct pap_hdr_t *hdr) passwd = _strndup((const char*)ptr, passwd_len); - r = pwdb_check(p->ppp, peer_id, PPP_PAP, passwd); + r = pwdb_check(&p->ppp->ses, peer_id, PPP_PAP, passwd); if (r == PWDB_NO_IMPL) { - passwd2 = pwdb_get_passwd(p->ppp, peer_id); + passwd2 = pwdb_get_passwd(&p->ppp->ses, peer_id); if (!passwd2 || strcmp(passwd2, passwd)) r = PWDB_DENIED; else @@ -226,7 +226,7 @@ static int pap_recv_req(struct pap_auth_data_t *p, struct pap_hdr_t *hdr) log_ppp_warn("PAP: authentication error\n"); pap_send_nak(p, hdr->id); if (p->started) - ppp_terminate(p->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&p->ppp->ses, TERM_AUTH_ERROR, 0); else ppp_auth_failed(p->ppp, peer_id); ret = -1; @@ -234,7 +234,7 @@ static int pap_recv_req(struct pap_auth_data_t *p, struct pap_hdr_t *hdr) } else { if (ppp_auth_successed(p->ppp, peer_id)) { pap_send_nak(p, hdr->id); - ppp_terminate(p->ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&p->ppp->ses, TERM_AUTH_ERROR, 0); _free(peer_id); ret = -1; } else { diff --git a/accel-pppd/backup/CMakeLists.txt b/accel-pppd/backup/CMakeLists.txt new file mode 100644 index 00000000..77fefabf --- /dev/null +++ b/accel-pppd/backup/CMakeLists.txt @@ -0,0 +1,4 @@ +ADD_LIBRARY(backup_file SHARED backup_file.c) + +INSTALL(TARGETS backup_file LIBRARY DESTINATION lib/accel-ppp) + diff --git a/accel-pppd/backup/backup.c b/accel-pppd/backup/backup.c new file mode 100644 index 00000000..16349fc7 --- /dev/null +++ b/accel-pppd/backup/backup.c @@ -0,0 +1,254 @@ +#include <stdlib.h> +#include <string.h> + +#include "triton.h" +#include "log.h" +#include "events.h" +#include "ap_session.h" +#include "backup.h" + +#ifdef USE_BACKUP + +static LIST_HEAD(storage_list); +static LIST_HEAD(module_list); + +struct backup_tag __export *backup_add_tag(struct backup_mod *m, uint8_t id, int internal, const void *data, size_t size) +{ + struct backup_tag *t; + + t = m->data->storage->alloc_tag(m->data, size); + if (!t) + return NULL; + + t->id = id; + t->internal = internal; + t->size = size; + memcpy(t->data, data, size); + + list_add_tail(&t->entry, &m->tag_list); + + return t; +} + +void backup_add_fd(struct backup_mod *m, int fd) +{ + if (m->data->storage->add_fd) + m->data->storage->add_fd(m->data, fd); +} + +struct backup_mod __export *backup_find_mod(struct backup_data *d, uint8_t mod_id) +{ + struct backup_mod *m; + + list_for_each_entry(m, &d->mod_list, entry) { + if (m->id == mod_id) + return m; + } + + return NULL; +} + +struct backup_tag __export *backup_find_tag(struct backup_data *d, uint8_t mod_id, uint8_t tag_id, int internal) +{ + struct backup_mod *m = backup_find_mod(d, mod_id); + struct backup_tag *t; + + if (!m) + return NULL; + + list_for_each_entry(t, &m->tag_list, entry) { + if (t->id == tag_id && t->internal == internal) + return t; + } + + return NULL; +} + +void __export backup_free(struct backup_data *data) +{ + struct backup_mod *m; + struct backup_tag *t; + + while (!list_empty(&data->mod_list)) { + m = list_entry(data->mod_list.next, typeof(*m), entry); + while (!list_empty(&m->tag_list)) { + t = list_entry(m->tag_list.next, typeof(*t), entry); + list_del(&t->entry); + data->storage->free_tag(data, t); + } + list_del(&m->entry); + data->storage->free_mod(m); + } + data->storage->free(data); +} + +int __export backup_save_session(struct ap_session *ses) +{ + struct backup_storage *storage; + struct backup_module *module; + struct backup_data *d; + struct backup_mod *m; + int r, f1 = 0, f2; + + list_for_each_entry(storage, &storage_list, entry) { + d = storage->create(ses); + if (!d) + continue; + + //d->ses = ses; + + f2 = 0; + + list_for_each_entry(module, &module_list, entry) { + if (!module->save) + continue; + + m = storage->alloc_mod(d); + if (!m) { + f2 = 1; + break; + } + + m->data = d; + m->id = module->id; + r = module->save(ses, m); + if (r == -2) { + storage->free_mod(m); + continue; + } + + list_add_tail(&m->entry, &d->mod_list); + + if (r == -1) { + f2 = 1; + break; + } + } + + if (f2) + backup_free(d); + else { + f1 = 1; + if (storage->commit) + storage->commit(d); + ses->backup = d; + } + } + + return !f1; +} + +/*int backup_restore_internal(void) +{ + struct backup_storage *storage; + + list_for_each_entry(storage, &storage_list, entry) { + if (storage->restore_internal) { + if (storage->check_integrity()) + continue; + storage->restore_internal(); + return 0; + } + } + + return -1; +} + +void backup_restore_external(void) +{ + struct backup_storage *storage; + + list_for_each_entry(storage, &storage_list, entry) { + if (storage->restore_external) { + if (storage->check_integrity()) + continue; + storage->restore_external(); + return; + } + } +}*/ + +static void __restore_session(struct ap_session *ses) +{ + struct backup_module *module; + struct backup_mod *m; + struct backup_module *ctrl = NULL; + + list_for_each_entry(module, &module_list, entry) { + if (module->ctrl_start) + ctrl = module; + if (module->restore) { + m = backup_find_mod(ses->backup, module->id); + if (!m) + continue; + module->restore(ses, m); + } + } + + log_ppp_info1("session restored\n"); + + if (ctrl) + ctrl->ctrl_start(ses); + else { + triton_event_fire(EV_CTRL_STARTING, ses); + triton_event_fire(EV_CTRL_STARTED, ses); + + ap_session_starting(ses); + ap_session_activate(ses); + } +} + +void __export backup_restore_session(struct backup_data *d) +{ + struct backup_module *module; + struct backup_mod *m; + struct ap_session *ses; + + list_for_each_entry(module, &module_list, entry) { + if (module->ctrl_restore) { + m = backup_find_mod(d, module->id); + if (!m) + continue; + ses = module->ctrl_restore(m); + ses->backup = d; + d->ses = ses; + ses->state = AP_STATE_RESTORE; + triton_context_call(ses->ctrl->ctx, (triton_event_func)__restore_session, ses); + break; + } + } +} + + +void __export backup_register_module(struct backup_module *m) +{ + list_add_tail(&m->entry, &module_list); +} + +void __export backup_register_storage(struct backup_storage *s) +{ + list_add_tail(&s->entry, &storage_list); +} + +void backup_restore_fd() +{ + +} + +void backup_restore(int internal) +{ + struct backup_storage *storage; + struct backup_module *module; + + list_for_each_entry(storage, &storage_list, entry) { + if (storage->restore) + storage->restore(internal); + } + + list_for_each_entry(module, &module_list, entry) { + if (module->restore_complete) + module->restore_complete(); + } +} + +#endif diff --git a/accel-pppd/backup/backup.h b/accel-pppd/backup/backup.h new file mode 100644 index 00000000..0037596a --- /dev/null +++ b/accel-pppd/backup/backup.h @@ -0,0 +1,95 @@ +#ifndef __BACKUP_H +#define __BACKUP_H + +#include <stdint.h> +#include "list.h" + +#define MODID_COMMON 1 +#define MODID_RADIUS 2 +#define MODID_PPPOE 3 +#define MODID_IPOE 4 +#define MODID_PPTP 5 +#define MODID_L2TP 6 +#define MODID_IPPOOL 7 + + +struct ap_session; +struct backup_storage; +struct backup_data; + +struct backup_tag +{ + struct list_head entry; + uint16_t internal:1; + uint8_t id; + uint16_t size; + uint8_t *data; +}; + +struct backup_mod +{ + struct backup_data *data; + struct list_head entry; + int id; + struct list_head tag_list; +}; + +struct backup_data +{ + struct ap_session *ses; + struct backup_storage *storage; + struct list_head mod_list; + int internal:1; +}; + +struct backup_module +{ + struct list_head entry; + int id; + + int (*save)(struct ap_session *, struct backup_mod *); + int (*restore)(struct ap_session *, struct backup_mod *); + + struct ap_session *(*ctrl_restore)(struct backup_mod *); + void (*ctrl_start)(struct ap_session *ses); + void (*restore_complete)(void); +}; + +struct backup_storage +{ + struct list_head entry; + + /*int (*check_integrity)(void); + int (*restore)(int internal);*/ + + void (*restore)(int internal); + + struct backup_data *(*create)(struct ap_session *); + int (*commit)(struct backup_data *); + void (*free)(struct backup_data *); + + struct backup_mod *(*alloc_mod)(struct backup_data *); + void (*free_mod)(struct backup_mod *); + + void (*add_fd)(struct backup_data *, int fd); + + struct backup_tag *(*alloc_tag)(struct backup_data *, int size); + void (*free_tag)(struct backup_data *, struct backup_tag *); +}; + +void backup_register_module(struct backup_module *); +void backup_register_storage(struct backup_storage *); + +int backup_save_session(struct ap_session *ses); +void backup_restore_session(struct backup_data *d); + +struct backup_mod *backup_find_mod(struct backup_data *d, uint8_t mod_id); +struct backup_tag *backup_find_tag(struct backup_data *d, uint8_t mod_id, uint8_t tag_id, int internal); +struct backup_tag *backup_add_tag(struct backup_mod *m, uint8_t id, int internal, const void *data, size_t size); +void backup_add_fd(struct backup_mod *m, int fd); + +void backup_restore(int internal); +void backup_restore_fd(); + +#endif + diff --git a/accel-pppd/backup/backup_file.c b/accel-pppd/backup/backup_file.c new file mode 100644 index 00000000..b9d26466 --- /dev/null +++ b/accel-pppd/backup/backup_file.c @@ -0,0 +1,351 @@ +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <fcntl.h> +#include <limits.h> +#include <dirent.h> +#include <sys/stat.h> +#include <sys/uio.h> +#include <sys/mman.h> + +#include "triton.h" +#include "log.h" +#include "ap_session.h" +#include "backup.h" +#include "crypto.h" +#include "memdebug.h" + +#define VERSION 1 + +struct fs_backup_data +{ + struct list_head fd_list; + int fd; + void *map_addr; + int map_len; + char sessionid[AP_SESSIONID_LEN]; + struct backup_data data; +}; + +static char *conf_path; + +static struct backup_storage file_storage; + +static struct backup_data *fs_create(struct ap_session *ses) +{ + struct fs_backup_data *d = _malloc(sizeof(*d)); + + if (!d) + return NULL; + + memset(d, 0, sizeof(*d)); + d->fd = -1; + INIT_LIST_HEAD(&d->fd_list); + INIT_LIST_HEAD(&d->data.mod_list); + d->data.ses = ses; + d->data.storage = &file_storage; + + return &d->data; +} + +static int fs_commit(struct backup_data *d) +{ + char fname[PATH_MAX]; + int fd; + struct backup_mod *mod; + struct backup_tag *tag; + struct iovec iov[IOV_MAX]; + int i, len, n; + MD5_CTX md5; + unsigned char md5_buf[16]; + uint8_t end[4] = {0, 0, 0, 0}; + uint8_t version = VERSION; + uint8_t *ptr; + + if (!conf_path) + return -1; + + sprintf(fname, "%s/%s", conf_path, d->ses->sessionid); + + fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, S_IREAD | S_IWRITE); + + if (fd < 0) { + log_error("backup: can not create file '%s': %s\n", fname, strerror(errno)); + return -1; + } + + MD5_Init(&md5); + MD5_Update(&md5, &version, 1); + + iov[0].iov_base = &version; + iov[0].iov_len = 1; + i = 1; + len = 1; + + list_for_each_entry(mod, &d->mod_list, entry) { + MD5_Update(&md5, &mod->id, 1); + iov[i].iov_base = &mod->id; + iov[i].iov_len = 1; + i++; + len++; + + list_for_each_entry(tag, &mod->tag_list, entry) { + ptr = (uint8_t *)(tag + 1); + *ptr = tag->id; ptr++; + *ptr = tag->internal ? 1 : 0; ptr++; + *(uint16_t *)ptr = tag->size; + MD5_Update(&md5, tag + 1, 4 + tag->size); + iov[i].iov_base = tag + 1; + iov[i].iov_len = 4 + tag->size; + i++; + len += 4 + tag->size; + if (i == IOV_MAX - 2) { + n = writev(fd, iov, i); + if (n < len) { + log_error("backup: short write %i/%i\n", n, len); + goto out_err; + } + i = 0; + len = 0; + } + } + + MD5_Update(&md5, end, 4); + iov[i].iov_base = end; + iov[i].iov_len = 4; + i++; + len += 4; + } + + MD5_Final(md5_buf, &md5); + + iov[i].iov_base = md5_buf; + iov[i].iov_len = 16; + len += 16; + + n = writev(fd, iov, i + 1); + if (n < len) { + log_error("backup: short write %i/%i\n", n, len); + goto out_err; + } + + close(fd); + + while (!list_empty(&d->mod_list)) { + mod = list_entry(d->mod_list.next, typeof(*mod), entry); + list_del(&mod->entry); + while (!list_empty(&mod->tag_list)) { + tag = list_entry(mod->tag_list.next, typeof(*tag), entry); + list_del(&tag->entry); + _free(tag); + } + _free(mod); + } + + return 0; + +out_err: + close(fd); + unlink(fname); + return -1; +} + +static void fs_free(struct backup_data *d) +{ + struct fs_backup_data *fsd = container_of(d, typeof(*fsd), data); + char fname[PATH_MAX]; + + if (fsd->map_addr) + munmap(fsd->map_addr, fsd->map_len); + + if (fsd->fd != -1) + close(fsd->fd); + + sprintf(fname, "%s/%s", conf_path, d->ses->sessionid); + unlink(fname); + + _free(fsd); +} + +static struct backup_mod *fs_alloc_mod(struct backup_data *d) +{ + struct backup_mod *m = _malloc(sizeof(struct backup_mod)); + + if (!m) + return NULL; + + memset(m, 0, sizeof(*m)); + INIT_LIST_HEAD(&m->tag_list); + + return m; +} + +static void fs_free_mod(struct backup_mod *mod) +{ + _free(mod); +} + +static struct backup_tag *fs_alloc_tag(struct backup_data *d, int size) +{ + struct backup_tag *t = _malloc(sizeof(struct backup_tag) + 4 + size); + + if (!t) + return NULL; + + memset(t, 0, sizeof(*t)); + + t->data = (uint8_t *)(t + 1) + 4; + + return t; +} + +static void fs_free_tag(struct backup_data *d, struct backup_tag *tag) +{ + _free(tag); +} + +static void fs_add_fd(struct backup_data *d, int fd) +{ + +} + +static void restore_session(const char *fn, int internal) +{ + char fname[PATH_MAX]; + int fd; + struct stat st; + uint8_t *ptr, *endptr; + MD5_CTX md5; + unsigned char md5_buf[16]; + struct backup_data *d; + struct fs_backup_data *fsd; + struct backup_mod *mod; + struct backup_tag *tag; + + sprintf(fname, "%s/%s", conf_path, fn); + + fd = open(fname, O_RDONLY); + if (fd < 0) { + log_emerg("backup_file: open '%s': %s\n", fname, strerror(errno)); + return; + } + + fstat(fd, &st); + + ptr = mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, fd, 0); + if (ptr == MAP_FAILED) { + log_emerg("backup_file: mmap '%s': %s\n", fname, strerror(errno)); + close(fd); + return; + } + + if (*ptr != VERSION) + goto out; + + MD5_Init(&md5); + MD5_Update(&md5, ptr, st.st_size - 16); + MD5_Final(md5_buf, &md5); + + if (memcmp(md5_buf, ptr + st.st_size - 16, 16)) + goto out; + + d = fs_create(NULL); + if (!d) + goto out; + + d->internal = internal; + + fsd = container_of(d, typeof(*fsd), data); + fsd->fd = fd; + fsd->map_addr = ptr; + fsd->map_len = st.st_size; + + endptr = ptr + st.st_size - 16; + ptr++; + + while (ptr < endptr) { + mod = fs_alloc_mod(d); + list_add_tail(&mod->entry, &d->mod_list); + mod->data = d; + mod->id = *ptr; ptr++; + while (ptr < endptr) { + if (*(uint8_t *)ptr == 0) { + ptr += 4; + break; + } + + if (!internal && ptr[1]) { + ptr += 4 + *(uint16_t *)(ptr + 2); + continue; + } + + tag = fs_alloc_tag(d, 0); + tag->id = *ptr; ptr++; + tag->internal = (*ptr & 0x01) ? 1 : 0; ptr ++; + tag->size = *(uint16_t *)ptr; ptr += 2; + tag->data = ptr; ptr += tag->size; + + list_add_tail(&tag->entry, &mod->tag_list); + } + } + + backup_restore_session(d); + + return; + +out: + munmap(ptr, st.st_size); + close(fd); +} + +static void fs_restore(int internal) +{ + DIR *dirp; + struct dirent ent, *res; + + if (!conf_path) + return; + + dirp = opendir(conf_path); + if (!dirp) { + log_emerg("backup_file: opendir: %s\n", strerror(errno)); + return; + } + + while (1) { + if (readdir_r(dirp, &ent, &res)) { + log_emerg("backup_file: readdir: %s\n", strerror(errno)); + break; + } + if (!res) + break; + if (strcmp(ent.d_name, ".") == 0 || strcmp(ent.d_name, "..") == 0) + continue; + restore_session(ent.d_name, internal); + } + + closedir(dirp); +} + +static struct backup_storage file_storage = { + .create = fs_create, + .commit = fs_commit, + .free = fs_free, + .alloc_mod = fs_alloc_mod, + .free_mod = fs_free_mod, + .add_fd = fs_add_fd, + .alloc_tag = fs_alloc_tag, + .free_tag = fs_free_tag, + .restore = fs_restore, +}; + +static void init(void) +{ + conf_path = conf_get_opt("backup", "path"); + + backup_register_storage(&file_storage); +} + +DEFINE_INIT(1000, init); diff --git a/accel-pppd/cli/cli.h b/accel-pppd/cli/cli.h index cdceb2fa..3e31c5d8 100644 --- a/accel-pppd/cli/cli.h +++ b/accel-pppd/cli/cli.h @@ -29,7 +29,7 @@ struct cli_regexp_cmd_t int (*help)(char * const *fields, int field_cnt, void *client); }; -struct ppp_t; +struct ap_session; void cli_register_simple_cmd(struct cli_simple_cmd_t *cmd); void cli_register_simple_cmd2( @@ -39,7 +39,7 @@ void cli_register_simple_cmd2( ... ); void cli_register_regexp_cmd(struct cli_regexp_cmd_t *cmd); -void cli_show_ses_register(const char *name, const char *desc, void (*print)(const struct ppp_t *ppp, char *buf)); +void cli_show_ses_register(const char *name, const char *desc, void (*print)(const struct ap_session *ses, char *buf)); int cli_send(void *client, const char *data); int cli_sendv(void *client, const char *fmt, ...); diff --git a/accel-pppd/cli/show_sessions.c b/accel-pppd/cli/show_sessions.c index c0fec580..746ce583 100644 --- a/accel-pppd/cli/show_sessions.c +++ b/accel-pppd/cli/show_sessions.c @@ -22,7 +22,7 @@ struct column_t struct list_head entry; const char *name; const char *desc; - void (*print)(const struct ppp_t *ppp, char *buf); + void (*print)(const struct ap_session *ses, char *buf); }; struct col_t @@ -50,7 +50,7 @@ struct cell_t static LIST_HEAD(col_list); -void __export cli_show_ses_register(const char *name, const char *desc, void (*print)(const struct ppp_t *ppp, char *buf)) +void __export cli_show_ses_register(const char *name, const char *desc, void (*print)(const struct ap_session *ses, char *buf)) { struct column_t *c = malloc(sizeof(*c)); c->name = name; @@ -131,7 +131,7 @@ static int show_ses_exec(const char *cmd, char * const *f, int f_cnt, void *cli) struct cell_t *cell; char *ptr1, *ptr2; int i, n, total_width, def_columns = 0; - struct ppp_t *ppp; + struct ap_session *ses; char *buf = NULL; int match_key_f = 0, order_key_f = 0; LIST_HEAD(c_list); @@ -221,8 +221,8 @@ static int show_ses_exec(const char *cmd, char * const *f, int f_cnt, void *cli) list_add_tail(&col->entry, &c_list); } - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { row = _malloc(sizeof(*row)); if (!row) goto oom; @@ -238,7 +238,7 @@ static int show_ses_exec(const char *cmd, char * const *f, int f_cnt, void *cli) goto oom; cell->col = col; list_add_tail(&cell->entry, &row->cell_list); - col->column->print(ppp, cell->buf); + col->column->print(ses, cell->buf); n = strlen(cell->buf); if (n > col->width) col->width = n; @@ -248,7 +248,7 @@ static int show_ses_exec(const char *cmd, char * const *f, int f_cnt, void *cli) row->match_key = cell->buf; } } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); if (order_key || match_key) { while(!list_empty(&t_list)) { @@ -368,40 +368,40 @@ oom: goto out; } -static void print_ifname(const struct ppp_t *ppp, char *buf) +static void print_ifname(const struct ap_session *ses, char *buf) { - snprintf(buf, CELL_SIZE, "%s", ppp->ifname); + snprintf(buf, CELL_SIZE, "%s", ses->ifname); } -static void print_username(const struct ppp_t *ppp, char *buf) +static void print_username(const struct ap_session *ses, char *buf) { - if (ppp->username) - snprintf(buf, CELL_SIZE, "%s", ppp->username); + if (ses->username) + snprintf(buf, CELL_SIZE, "%s", ses->username); else *buf = 0; } -static void print_ip(const struct ppp_t *ppp, char *buf) +static void print_ip(const struct ap_session *ses, char *buf) { - u_inet_ntoa(ppp->ipv4 ? ppp->ipv4->peer_addr : 0, buf); + u_inet_ntoa(ses->ipv4 ? ses->ipv4->peer_addr : 0, buf); } -static void print_type(const struct ppp_t *ppp, char *buf) +static void print_type(const struct ap_session *ses, char *buf) { - snprintf(buf, CELL_SIZE, "%s", ppp->ctrl->name); + snprintf(buf, CELL_SIZE, "%s", ses->ctrl->name); } -static void print_state(const struct ppp_t *ppp, char *buf) +static void print_state(const struct ap_session *ses, char *buf) { char *state; - switch (ppp->state) { - case PPP_STATE_STARTING: + switch (ses->state) { + case AP_STATE_STARTING: state = "start"; break; - case PPP_STATE_ACTIVE: + case AP_STATE_ACTIVE: state = "active"; break; - case PPP_STATE_FINISHING: + case AP_STATE_FINISHING: state = "finish"; break; default: @@ -410,17 +410,17 @@ static void print_state(const struct ppp_t *ppp, char *buf) sprintf(buf, "%s", state); } -static void print_uptime(const struct ppp_t *ppp, char *buf) +static void print_uptime(const struct ap_session *ses, char *buf) { time_t uptime; int day,hour,min,sec; char time_str[14]; - if (ppp->stop_time) - uptime = ppp->stop_time - ppp->start_time; + if (ses->stop_time) + uptime = ses->stop_time - ses->start_time; else { time(&uptime); - uptime -= ppp->start_time; + uptime -= ses->start_time; } day = uptime/ (24*60*60); uptime %= (24*60*60); @@ -435,24 +435,32 @@ static void print_uptime(const struct ppp_t *ppp, char *buf) sprintf(buf, "%s", time_str); } -static void print_calling_sid(const struct ppp_t *ppp, char *buf) +static void print_calling_sid(const struct ap_session *ses, char *buf) { - snprintf(buf, CELL_SIZE, "%s", ppp->ctrl->calling_station_id); + snprintf(buf, CELL_SIZE, "%s", ses->ctrl->calling_station_id); } -static void print_called_sid(const struct ppp_t *ppp, char *buf) +static void print_called_sid(const struct ap_session *ses, char *buf) { - snprintf(buf, CELL_SIZE, "%s", ppp->ctrl->called_station_id); + snprintf(buf, CELL_SIZE, "%s", ses->ctrl->called_station_id); } -static void print_sid(const struct ppp_t *ppp, char *buf) +static void print_sid(const struct ap_session *ses, char *buf) { - snprintf(buf, CELL_SIZE, "%s", ppp->sessionid); + snprintf(buf, CELL_SIZE, "%s", ses->sessionid); } -static void print_comp(const struct ppp_t *ppp, char *buf) +static void print_comp(const struct ap_session *ses, char *buf) { - snprintf(buf, CELL_SIZE, "%s", ppp->comp ? ppp->comp : ""); + struct ppp_t *ppp; + + *buf = 0; + + if (ses->ctrl->type != CTRL_TYPE_IPOE) { + ppp = container_of(ses, typeof(*ppp), ses); + if (ppp->comp) + snprintf(buf, CELL_SIZE, "%s", ppp->comp); + } } static void init(void) diff --git a/accel-pppd/cli/std_cmd.c b/accel-pppd/cli/std_cmd.c index 24e52283..01e01571 100644 --- a/accel-pppd/cli/std_cmd.c +++ b/accel-pppd/cli/std_cmd.c @@ -15,6 +15,8 @@ #include "log.h" #include "memdebug.h" +void core_restart(int); + static int show_stat_exec(const char *cmd, char * const *fields, int fields_cnt, void *client) { struct timespec ts; @@ -68,10 +70,10 @@ static int show_stat_exec(const char *cmd, char * const *fields, int fields_cnt, cli_sendv(client, " timer_pending: %u\r\n", triton_stat.timer_pending); //=========== - cli_send(client, "ppp:\r\n"); - cli_sendv(client, " starting: %u\r\n", ppp_stat.starting); - cli_sendv(client, " active: %u\r\n", ppp_stat.active); - cli_sendv(client, " finishing: %u\r\n", ppp_stat.finishing); + cli_send(client, "sessions:\r\n"); + cli_sendv(client, " starting: %u\r\n", ap_session_stat.starting); + cli_sendv(client, " active: %u\r\n", ap_session_stat.active); + cli_sendv(client, " finishing: %u\r\n", ap_session_stat.finishing); return CLI_CMD_OK; } @@ -94,19 +96,19 @@ static void exit_help(char * const *fields, int fields_cnt, void *client) //============================= -static void ppp_terminate_soft(struct ppp_t *ppp) +static void __terminate_soft(struct ap_session *ses) { - ppp_terminate(ppp, TERM_NAS_REQUEST, 0); + ap_session_terminate(ses, TERM_NAS_REQUEST, 0); } -static void ppp_terminate_hard(struct ppp_t *ppp) +static void __terminate_hard(struct ap_session *ses) { - ppp_terminate(ppp, TERM_NAS_REQUEST, 1); + ap_session_terminate(ses, TERM_NAS_REQUEST, 1); } static int terminate_exec1(char * const *f, int f_cnt, void *cli) { - struct ppp_t *ppp; + struct ap_session *ses; int hard = 0; pcre *re; const char *pcre_err; @@ -126,16 +128,16 @@ static int terminate_exec1(char * const *f, int f_cnt, void *cli) return CLI_CMD_OK; } - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (pcre_exec(re, NULL, ppp->username, strlen(ppp->username), 0, 0, NULL, 0) < 0) + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (pcre_exec(re, NULL, ses->username, strlen(ses->username), 0, 0, NULL, 0) < 0) continue; if (hard) - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_hard, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_hard, ses); else - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_soft, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_soft, ses); } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); pcre_free(re); @@ -144,7 +146,7 @@ static int terminate_exec1(char * const *f, int f_cnt, void *cli) static int terminate_exec2(int key, char * const *f, int f_cnt, void *cli) { - struct ppp_t *ppp; + struct ap_session *ses; int hard = 0; in_addr_t ipaddr = 0; @@ -159,44 +161,44 @@ static int terminate_exec2(int key, char * const *f, int f_cnt, void *cli) if (key == 1) ipaddr = inet_addr(f[2]); - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { switch (key) { case 0: - if (!ppp->username || strcmp(ppp->username, f[2])) + if (!ses->username || strcmp(ses->username, f[2])) continue; break; case 1: - if (ppp->ipv4 && ppp->ipv4->peer_addr != ipaddr) + if (ses->ipv4 && ses->ipv4->peer_addr != ipaddr) continue; break; case 2: - if (strcmp(ppp->ctrl->calling_station_id, f[2])) + if (strcmp(ses->ctrl->calling_station_id, f[2])) continue; break; case 3: - if (strcmp(ppp->sessionid, f[2])) + if (strcmp(ses->sessionid, f[2])) continue; break; case 4: - if (strcmp(ppp->ifname, f[2])) + if (strcmp(ses->ifname, f[2])) continue; break; } if (hard) - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_hard, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_hard, ses); else - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_soft, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_soft, ses); break; } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); return CLI_CMD_OK; } static int terminate_exec(const char *cmd, char * const *fields, int fields_cnt, void *client) { - struct ppp_t *ppp; + struct ap_session *ses; int hard = 0; if (fields_cnt == 1) @@ -225,14 +227,14 @@ static int terminate_exec(const char *cmd, char * const *fields, int fields_cnt, } else if (fields_cnt != 2) return CLI_CMD_SYNTAX; - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { if (hard) - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_hard, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_hard, ses); else - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_soft, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_soft, ses); } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); return CLI_CMD_OK; } @@ -258,44 +260,50 @@ static void shutdown_help(char * const *fields, int fields_cnt, void *client) cli_send(client, "\t\tcancel - cancel 'shutdown soft' and return to normal operation\r\n"); } -static void ppp_terminate_soft2(struct ppp_t *ppp) +static void __terminate_soft2(struct ap_session *ses) { - ppp_terminate(ppp, TERM_NAS_REBOOT, 0); + ap_session_terminate(ses, TERM_NAS_REBOOT, 0); } -static void ppp_terminate_hard2(struct ppp_t *ppp) +static void __terminate_hard2(struct ap_session *ses) { - ppp_terminate(ppp, TERM_NAS_REBOOT, 1); + ap_session_terminate(ses, TERM_NAS_REBOOT, 1); +} + +static void terminate_all_sessions(int hard) +{ + struct ap_session *ses; + + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (hard) + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_hard2, ses); + else + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_soft2, ses); + } + pthread_rwlock_unlock(&ses_lock); } static int shutdown_exec(const char *cmd, char * const *f, int f_cnt, void *cli) { int hard = 0; - struct ppp_t *ppp; if (f_cnt == 2) { if (!strcmp(f[1], "soft")) { - ppp_shutdown_soft(); + ap_shutdown_soft(NULL); return CLI_CMD_OK; } else if (!strcmp(f[1], "hard")) hard = 1; else if (!strcmp(f[1], "cancel")) { - ppp_shutdown = 0; + ap_shutdown = 0; return CLI_CMD_OK; } else return CLI_CMD_SYNTAX; } - ppp_shutdown_soft(); + ap_shutdown_soft(NULL); - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (hard) - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_hard2, ppp); - else - triton_context_call(ppp->ctrl->ctx, (triton_event_func)ppp_terminate_soft2, ppp); - } - pthread_rwlock_unlock(&ppp_lock); + terminate_all_sessions(hard); return CLI_CMD_OK; } @@ -328,11 +336,63 @@ static void reload_help(char * const *fields, int fields_cnt, void *client) cli_send(client, "reload - reload config file\r\n"); } + +//========================== + +static void __do_restart(void) +{ + core_restart(0); + _exit(0); +} + +static int restart_exec(const char *cmd, char * const *f, int f_cnt, void *cli) +{ + int hard; + + if (f_cnt == 2) { + if (strcmp(f[1], "soft") == 0) + hard = 0; + else if (strcmp(f[1], "gracefully") == 0) + hard = 1; + else if (strcmp(f[1], "hard") == 0) + __do_restart(); + else + return CLI_CMD_SYNTAX; + } else if (f_cnt == 1) + hard = 0; + else + return CLI_CMD_SYNTAX; + +#ifndef USE_BACKUP + hard = 1; +#endif + + if (hard) { + ap_shutdown_soft(__do_restart); + terminate_all_sessions(0); + } else { + core_restart(1); + _exit(0); + } + + return CLI_CMD_OK; +} + +static void restart_help(char * const *fields, int fields_cnt, void *client) +{ + cli_send(client, "restart [soft|gracefully|hard] - restart daemon\r\n"); + cli_send(client, "\t\tsoft - restart daemon softly, e.g. keep existing connections if session backup is enabled (default)\r\n"); + cli_send(client, "\t\tgracefully - terminate all connections then restart\r\n"); + cli_send(client, "\t\thard - restart immediatly\r\n"); +} + + static void init(void) { cli_register_simple_cmd2(show_stat_exec, show_stat_help, 2, "show", "stat"); cli_register_simple_cmd2(terminate_exec, terminate_help, 1, "terminate"); cli_register_simple_cmd2(reload_exec, reload_help, 1, "reload"); + cli_register_simple_cmd2(restart_exec, restart_help, 1, "restart"); cli_register_simple_cmd2(shutdown_exec, shutdown_help, 1, "shutdown"); cli_register_simple_cmd2(exit_exec, exit_help, 1, "exit"); } diff --git a/accel-pppd/cli/telnet.c b/accel-pppd/cli/telnet.c index f7b43115..5cb75c7a 100644 --- a/accel-pppd/cli/telnet.c +++ b/accel-pppd/cli/telnet.c @@ -220,7 +220,7 @@ static int send_password_request(struct telnet_client_t *cln) static int send_prompt(struct telnet_client_t *cln) { - sprintf((char *)temp_buf, "%s%s# ", conf_cli_prompt, ppp_shutdown ? "(shutdown)" : ""); + sprintf((char *)temp_buf, "%s%s# ", conf_cli_prompt, ap_shutdown ? "(shutdown)" : ""); return telnet_send(cln, temp_buf, strlen((char *)temp_buf)); } @@ -292,7 +292,7 @@ static int telnet_input_char(struct telnet_client_t *cln, uint8_t c) return -1; } cln->auth = 1; - if (ppp_shutdown) { + if (ap_shutdown) { if (telnet_send(cln, MSG_SHUTDOWN_IN_PROGRESS, sizeof(MSG_SHUTDOWN_IN_PROGRESS))) return -1; } @@ -555,6 +555,8 @@ static int serv_read(struct triton_md_handler_t *h) continue; } + fcntl(sock, F_SETFD, fcntl(sock, F_GETFD) | FD_CLOEXEC); + conn = _malloc(sizeof(*conn)); memset(conn, 0, sizeof(*conn)); conn->hnd.fd = sock; @@ -600,7 +602,7 @@ static int serv_read(struct triton_md_handler_t *h) send_password_request(conn); else { conn->auth = 1; - if (ppp_shutdown) { + if (ap_shutdown) { if (telnet_send(conn, MSG_SHUTDOWN_IN_PROGRESS, sizeof(MSG_SHUTDOWN_IN_PROGRESS))) continue; } diff --git a/accel-pppd/ctrl/CMakeLists.txt b/accel-pppd/ctrl/CMakeLists.txt index 6b37bc4a..9b6a11d6 100644 --- a/accel-pppd/ctrl/CMakeLists.txt +++ b/accel-pppd/ctrl/CMakeLists.txt @@ -1,3 +1,4 @@ ADD_SUBDIRECTORY(pptp) ADD_SUBDIRECTORY(pppoe) ADD_SUBDIRECTORY(l2tp) +ADD_SUBDIRECTORY(ipoe) diff --git a/accel-pppd/ctrl/ipoe/CMakeLists.txt b/accel-pppd/ctrl/ipoe/CMakeLists.txt new file mode 100644 index 00000000..fdcb3a49 --- /dev/null +++ b/accel-pppd/ctrl/ipoe/CMakeLists.txt @@ -0,0 +1,26 @@ +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(sources + ipoe.c + dhcpv4.c + dhcpv4_options.c + ipoe_netlink.c + backup.c +) + +IF (LUA) + include(FindLua51) + IF (NOT LUA51_FOUND) + MESSAGE(FATAL_ERROR "lua not found") + ENDIF (NOT LUA51_FOUND) + INCLUDE_DIRECTORIES(${LUA_INCLUDE_DIR}) + ADD_DEFINITIONS(-DUSE_LUA) + SET(sources ${sources} lua.c lua_lpack.c) +ENDIF (LUA) + +ADD_LIBRARY(ipoe SHARED ${sources}) +IF (LUA) + TARGET_LINK_LIBRARIES(ipoe ${LUA_LIBRARIES}) +ENDIF(LUA) + +INSTALL(TARGETS ipoe LIBRARY DESTINATION lib/accel-ppp) diff --git a/accel-pppd/ctrl/ipoe/backup.c b/accel-pppd/ctrl/ipoe/backup.c new file mode 100644 index 00000000..8347a4e5 --- /dev/null +++ b/accel-pppd/ctrl/ipoe/backup.c @@ -0,0 +1,217 @@ +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <netinet/in.h> +#include <net/ethernet.h> + +#include "triton.h" +#include "events.h" +#include "log.h" +#include "memdebug.h" + +#include "ipoe.h" +#include "backup.h" +#include "ap_session_backup.h" + +#define IPOE_TAG_HWADDR 1 +#define IPOE_TAG_CLIENT_ID 2 +#define IPOE_TAG_AGENT_CIRCUIT_ID 3 +#define IPOE_TAG_AGENT_REMOTE_ID 4 +#define IPOE_TAG_XID 5 +#define IPOE_TAG_GIADDR 6 +#define IPOE_TAG_CALLING_SID 7 +#define IPOE_TAG_CALLED_SID 8 +#define IPOE_TAG_IFNAME 9 + +#define IPOE_TAG_IFINDEX 100 + + +#define add_tag(id, data, size) if (!backup_add_tag(m, id, 0, data, size)) return -1; +#define add_tag_i(id, data, size) if (!backup_add_tag(m, id, 1, data, size)) return -1; + +static LIST_HEAD(ds_list); + +static void restore_complete(void); + +#ifdef USE_BACKUP +static int session_save(struct ap_session *ses, struct backup_mod *m) +{ + struct ipoe_session *conn = container_of(ses, typeof(*conn), ses); + + add_tag(IPOE_TAG_HWADDR, conn->hwaddr, 6); + add_tag(IPOE_TAG_CALLING_SID, ses->ctrl->calling_station_id, strlen(ses->ctrl->calling_station_id)); + add_tag(IPOE_TAG_CALLED_SID, ses->ctrl->called_station_id, strlen(ses->ctrl->called_station_id)); + add_tag(IPOE_TAG_XID, &conn->xid, 4); + add_tag(IPOE_TAG_GIADDR, &conn->giaddr, 4); + + if (conn->client_id) + add_tag(IPOE_TAG_CLIENT_ID, conn->client_id->data, conn->client_id->len); + if (conn->agent_circuit_id) + add_tag(IPOE_TAG_AGENT_CIRCUIT_ID, conn->agent_circuit_id->data, conn->agent_circuit_id->len); + if (conn->agent_circuit_id) + add_tag(IPOE_TAG_AGENT_REMOTE_ID, conn->agent_remote_id->data, conn->agent_remote_id->len); + + add_tag(IPOE_TAG_IFNAME, conn->serv->ifname, strlen(conn->serv->ifname) + 1); + + add_tag_i(IPOE_TAG_IFINDEX, &conn->ifindex, 4); + + return 0; +} + +static int session_restore(struct ap_session *ses, struct backup_mod *m) +{ + struct ipoe_session *conn = container_of(ses, typeof(*conn), ses); + + + return 0; +} + +static void set_dhcpv4_opt(struct dhcp_opt **opt, struct backup_tag *t, uint8_t **ptr) +{ + *opt = (struct dhcp_opt *)(*ptr); + (*opt)->len = t->size; + memcpy((*opt)->data, t->data, t->size); + (*ptr) += sizeof(**opt) + t->size; +} + +static struct ap_session *ctrl_restore(struct backup_mod *m) +{ + struct backup_tag *t; + struct ipoe_session *ses; + struct ipoe_serv *serv; + struct backup_tag *ifname = NULL; + int dlen = 0; + uint8_t *ptr; + struct ipoe_session_info *info; + + //if (!m->data->internal) + // return NULL; + + list_for_each_entry(t, &m->tag_list, entry) { + switch(t->id) { + case IPOE_TAG_CLIENT_ID: + case IPOE_TAG_AGENT_CIRCUIT_ID: + case IPOE_TAG_AGENT_REMOTE_ID: + dlen += sizeof(struct dhcp_opt) + t->size; + break; + case IPOE_TAG_IFNAME: + ifname = t; + break; + } + } + + if (!ifname) + return NULL; + + serv = ipoe_find_serv((char *)ifname->data); + if (!serv) + return NULL; + + ses = ipoe_session_alloc(); + if (!ses) + return NULL; + + if (dlen) + ses->data = _malloc(dlen); + + ptr = ses->data; + + list_for_each_entry(t, &m->tag_list, entry) { + switch(t->id) { + case IPOE_TAG_HWADDR: + memcpy(ses->hwaddr, t->data, 6); + break; + case IPOE_TAG_CALLING_SID: + ses->ctrl.calling_station_id = _malloc(t->size + 1); + memcpy(ses->ctrl.calling_station_id, t->data, t->size); + ses->ctrl.calling_station_id[t->size] = 0; + break; + case IPOE_TAG_CALLED_SID: + ses->ctrl.called_station_id = _malloc(t->size + 1); + memcpy(ses->ctrl.called_station_id, t->data, t->size); + ses->ctrl.called_station_id[t->size] = 0; + break; + case IPOE_TAG_XID: + ses->xid = *(uint32_t *)t->data; + break; + case IPOE_TAG_GIADDR: + ses->giaddr = *(uint32_t *)t->data; + break; + case IPOE_TAG_CLIENT_ID: + set_dhcpv4_opt(&ses->client_id, t, &ptr); + break; + case IPOE_TAG_AGENT_CIRCUIT_ID: + set_dhcpv4_opt(&ses->agent_circuit_id, t, &ptr); + break; + case IPOE_TAG_AGENT_REMOTE_ID: + set_dhcpv4_opt(&ses->agent_remote_id, t, &ptr); + break; + case IPOE_TAG_IFINDEX: + ses->ifindex = *(uint32_t *)t->data; + break; + } + } + + ses->serv = serv; + + triton_context_register(&ses->ctx, &ses->ses); + triton_context_wakeup(&ses->ctx); + + pthread_mutex_lock(&serv->lock); + list_add_tail(&ses->entry, &serv->sessions); + pthread_mutex_unlock(&serv->lock); + + if (ses->ifindex != -1) { + list_for_each_entry(info, &ds_list, entry) { + if (info->ifindex == ses->ifindex) { + list_del(&info->entry); + _free(info); + break; + } + } + } + + return &ses->ses; +} + +static struct backup_module mod = { + .id = MODID_IPOE, + .save = session_save, + .restore = session_restore, + .ctrl_restore = ctrl_restore, + .restore_complete = restore_complete, +}; +#endif + +static void dump_sessions(void) +{ + ipoe_nl_get_sessions(&ds_list); + +#ifndef USE_BACKUP + restore_complete(); +#endif +} + +static void restore_complete(void) +{ + struct ipoe_session_info *info; + + while (!list_empty(&ds_list)) { + info = list_entry(ds_list.next, typeof(*info), entry); + ipoe_nl_delete(info->ifindex); + list_del(&info->entry); + _free(info); + } +} + +static void init(void) +{ + dump_sessions(); + +#ifdef USE_BACKUP + backup_register_module(&mod); +#endif +} + +DEFINE_INIT(100, init); + diff --git a/accel-pppd/ctrl/ipoe/dhcpv4.c b/accel-pppd/ctrl/ipoe/dhcpv4.c new file mode 100644 index 00000000..78911811 --- /dev/null +++ b/accel-pppd/ctrl/ipoe/dhcpv4.c @@ -0,0 +1,603 @@ +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <string.h> +#include <pthread.h> +#include <fcntl.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <net/ethernet.h> +#include <netinet/ip.h> +#include <netinet/udp.h> +#include <netpacket/packet.h> +#include <arpa/inet.h> +#include <linux/if.h> + +#include "events.h" +#include "list.h" +#include "triton.h" +#include "log.h" +#include "mempool.h" +#include "memdebug.h" +#include "ap_session.h" +#include "ipdb.h" + +#include "dhcpv4.h" + +#define DHCP_SERV_PORT 67 +#define DHCP_CLIENT_PORT 68 +#define DHCP_MAGIC "\x63\x82\x53\x63" + + +#define BUF_SIZE 4096 + + +static int conf_verbose; +static in_addr_t conf_dns1; +static in_addr_t conf_dns2; + +static mempool_t pack_pool; +static mempool_t opt_pool; + +static int dhcpv4_read(struct triton_md_handler_t *h); + +struct dhcpv4_serv *dhcpv4_create(struct triton_context_t *ctx, const char *ifname) +{ + struct dhcpv4_serv *serv; + int sock, raw_sock; + struct sockaddr_in addr; + struct sockaddr_ll ll_addr; + struct ifreq ifr; + int f = 1; + + memset(&ifr, 0, sizeof(ifr)); + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + if (ioctl(sock_fd, SIOCGIFINDEX, &ifr)) { + log_error("dhcpv4(%s): ioctl(SIOCGIFINDEX): %s\n", ifname, strerror(errno)); + return NULL; + } + + raw_sock = socket(AF_PACKET, SOCK_RAW, ntohs(ETH_P_IP)); + if (raw_sock < 0) { + log_error("dhcpv4: packet socket is not supported by kernel\n"); + return NULL; + } + + memset(&ll_addr, 0, sizeof(ll_addr)); + ll_addr.sll_family = AF_PACKET; + ll_addr.sll_ifindex = ifr.ifr_ifindex; + ll_addr.sll_protocol = ntohs(ETH_P_IP); + + if (bind(raw_sock, (struct sockaddr *)&ll_addr, sizeof(ll_addr))) { + log_error("dhcpv4(%s): bind: %s\n", ifname, strerror(errno)); + close(raw_sock); + return NULL; + } + + memset(&addr, 0, sizeof(addr)); + + addr.sin_family = AF_INET; + addr.sin_port = htons(DHCP_SERV_PORT); + addr.sin_addr.s_addr = htonl(INADDR_ANY); + + sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + + if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &f, sizeof(f))) + log_error("setsockopt(SO_REUSEADDR): %s\n", strerror(errno)); + + + if (setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &f, sizeof(f))) { + log_error("setsockopt(SO_BROADCAST): %s\n", strerror(errno)); + goto out_err; + } + + if (bind(sock, &addr, sizeof(addr))) { + log_error("bind: %s\n", strerror(errno)); + goto out_err; + } + + if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, ifname, strlen(ifname))) { + log_error("setsockopt(SO_BINDTODEVICE): %s\n", strerror(errno)); + goto out_err; + } + + if (ioctl(sock, SIOCGIFHWADDR, &ifr)) { + log_error("dhcpv4(%s): ioctl(SIOCGIFHWADDR): %s\n", ifname, strerror(errno)); + goto out_err; + } + + fcntl(raw_sock, F_SETFL, O_NONBLOCK); + fcntl(raw_sock, F_SETFD, fcntl(raw_sock, F_GETFD) | FD_CLOEXEC); + + fcntl(sock, F_SETFL, O_NONBLOCK); + fcntl(sock, F_SETFD, fcntl(sock, F_GETFD) | FD_CLOEXEC); + + serv = _malloc(sizeof(*serv)); + memset(serv, 0, sizeof(*serv)); + + memcpy(serv->hwaddr, ifr.ifr_hwaddr.sa_data, ETH_ALEN); + + serv->ctx = ctx; + serv->hnd.fd = sock; + serv->hnd.read = dhcpv4_read; + serv->raw_sock = raw_sock; + + triton_md_register_handler(ctx, &serv->hnd); + triton_md_enable_handler(&serv->hnd, MD_MODE_READ); + + return serv; + +out_err: + close(raw_sock); + close(sock); + return NULL; +} + +void dhcpv4_free(struct dhcpv4_serv *serv) +{ + triton_md_unregister_handler(&serv->hnd); + close(serv->hnd.fd); + _free(serv); +} + +void dhcpv4_print_packet(struct dhcpv4_packet *pack, void (*print)(const char *fmt, ...)) +{ + const char *msg_name[] = {"Discover", "Offer", "Request", "Decline", "Ack", "Nak", "Release", "Inform"}; + + print("[DHCPv4 %s xid=%x ", msg_name[pack->msg_type - 1], pack->hdr->xid); + + if (pack->hdr->ciaddr) + print("ciaddr=%i.%i.%i.%i ", + pack->hdr->ciaddr & 0xff, + (pack->hdr->ciaddr >> 8) & 0xff, + (pack->hdr->ciaddr >> 16) & 0xff, + (pack->hdr->ciaddr >> 24) & 0xff); + + if (pack->hdr->yiaddr) + print("yiaddr=%i.%i.%i.%i ", + pack->hdr->yiaddr & 0xff, + (pack->hdr->yiaddr >> 8) & 0xff, + (pack->hdr->yiaddr >> 16) & 0xff, + (pack->hdr->yiaddr >> 24) & 0xff); + + if (pack->hdr->siaddr) + print("siaddr=%i.%i.%i.%i ", + pack->hdr->siaddr & 0xff, + (pack->hdr->siaddr >> 8) & 0xff, + (pack->hdr->siaddr >> 16) & 0xff, + (pack->hdr->siaddr >> 24) & 0xff); + + if (pack->hdr->giaddr) + print("giaddr=%i.%i.%i.%i ", + pack->hdr->giaddr & 0xff, + (pack->hdr->giaddr >> 8) & 0xff, + (pack->hdr->giaddr >> 16) & 0xff, + (pack->hdr->giaddr >> 24) & 0xff); + + print("chaddr=%02x:%02x:%02x:%02x:%02x:%02x ", + pack->hdr->chaddr[0], + pack->hdr->chaddr[1], + pack->hdr->chaddr[2], + pack->hdr->chaddr[3], + pack->hdr->chaddr[4], + pack->hdr->chaddr[5], + pack->hdr->chaddr[6]); + + dhcpv4_print_options(pack, print); + + print("]\n"); +} + +static int parse_opt82(struct dhcpv4_packet *pack, struct dhcpv4_option *opt) +{ + uint8_t *ptr = opt->data; + uint8_t *endptr = ptr + opt->len; + int type, len; + struct dhcpv4_option *opt1; + + while (ptr < endptr) { + type = *ptr++; + len = *ptr++; + if (ptr + len > endptr) + return -1; + if (type == 1 || type == 2) { + opt1 = mempool_alloc(opt_pool); + if (!opt1) { + log_emerg("out of memory\n"); + return -1; + } + + opt1->type = type; + opt1->len = len; + opt1->data = ptr; + + if (type == 1) + pack->agent_circuit_id = opt1; + else + pack->agent_remote_id = opt1; + } + + ptr += len; + } + + return 0; +} + +static int dhcpv4_parse_packet(struct dhcpv4_packet *pack, int len) +{ + struct dhcpv4_option *opt; + uint8_t *ptr, *endptr = pack->data + len; + + if (len < sizeof(struct dhcpv4_hdr)) { + if (conf_verbose) + log_warn("dhcpv4: short packet received\n"); + return -1; + } + + if (pack->hdr->op != DHCP_OP_REQUEST) + return -1; + + if (pack->hdr->htype != 1) + return -1; + + if (pack->hdr->hlen != 6) + return -1; + + if (memcmp(pack->hdr->magic, DHCP_MAGIC, 4)) + return -1; + + ptr = pack->data + sizeof(struct dhcpv4_hdr); + + while (ptr < endptr) { + if (*ptr == 0) { + ptr++; + continue; + } + + if (*ptr == 0xff) + break; + + opt = mempool_alloc(opt_pool); + if (!opt) { + log_emerg("out of memory\n"); + return -1; + } + memset(opt, 0, sizeof(*opt)); + opt->type = *ptr++; + opt->len = *ptr++; + opt->data = ptr; + ptr += opt->len; + + if (ptr > endptr) + return -1; + + list_add_tail(&opt->entry, &pack->options); + + if (opt->type == 53) + pack->msg_type = opt->data[0]; + else if (opt->type == 82) + parse_opt82(pack, opt); + else if (opt->type == 50) + pack->request_ip = *(uint32_t *)opt->data; + else if (opt->type == 54) + pack->server_id = *(uint32_t *)opt->data; + } + + if (pack->msg_type == 0 || pack->msg_type > 8) + return -1; + + if (dhcpv4_check_options(pack)) + return -1; + + /*if (conf_verbose) { + log_info2("recv "); + print_packet(pack, log_info2); + }*/ + + return 0; +} + +static struct dhcpv4_packet *dhcpv4_packet_alloc() +{ + struct dhcpv4_packet *pack = mempool_alloc(pack_pool); + + if (!pack) + return NULL; + + memset(pack, 0, sizeof(*pack)); + + INIT_LIST_HEAD(&pack->options); + + pack->hdr = (struct dhcpv4_hdr *)pack->data; + pack->ptr = (uint8_t *)(pack->hdr + 1); + + memcpy(pack->hdr->magic, DHCP_MAGIC, 4); + + return pack; +} + +static int dhcpv4_read(struct triton_md_handler_t *h) +{ + struct dhcpv4_packet *pack; + struct dhcpv4_serv *serv = container_of(h, typeof(*serv), hnd); + struct sockaddr_in addr; + socklen_t len; + int n; + + while (1) { + pack = dhcpv4_packet_alloc(); + if (!pack) { + log_emerg("out of memory\n"); + return 1; + } + + len = sizeof(addr); + n = recvfrom(h->fd, pack->data, BUF_SIZE, 0, &addr, &len); + if (n == -1) { + mempool_free(pack); + if (errno == EAGAIN) + return 0; + log_error("dhcpv4: recv: %s\n", strerror(errno)); + continue; + } + + if (dhcpv4_parse_packet(pack, n)) { + dhcpv4_packet_free(pack); + continue; + } + + if (serv->recv) + serv->recv(serv, pack); + } +} + +uint16_t ip_csum(uint16_t *buf, int len) +{ + uint32_t sum=0; + int i; + + for (i=0; i < len; i += 2) + sum += *buf++; + + // take only 16 bits out of the 32 bit sum and add up the carries + while (sum >> 16) + sum = (sum & 0xffff) + (sum >> 16); + + // one's complement the result + sum = ~sum; + + return sum & 0xffff; +} + + +static int dhcpv4_send(struct dhcpv4_serv *serv, struct dhcpv4_packet *pack, in_addr_t saddr, in_addr_t daddr) +{ + uint8_t hdr[sizeof(struct ether_header) + sizeof(struct iphdr) + sizeof(struct udphdr)]; + struct ether_header *eth = (struct ether_header *)hdr; + struct iphdr *ip = (struct iphdr *)(eth + 1); + struct udphdr *udp = (struct udphdr *)(ip + 1); + int len = pack->ptr - pack->data; + struct iovec iov[2]; + + memcpy(eth->ether_dhost, pack->hdr->chaddr, ETH_ALEN); + memcpy(eth->ether_shost, serv->hwaddr, ETH_ALEN); + eth->ether_type = htons(ETH_P_IP); + + ip->ihl = 5; + ip->version = 4; + ip->tos = 0x10; + ip->tot_len = ntohs(sizeof(*ip) + sizeof(*udp) + len); + ip->id = 0; + ip->frag_off = 0; + ip->ttl = 128; + ip->protocol = IPPROTO_UDP; + ip->check = 0; + ip->saddr = saddr; + ip->daddr = daddr; + ip->check = ip_csum((uint16_t *)ip, 20); + + udp->source = ntohs(DHCP_SERV_PORT); + udp->dest = ntohs(DHCP_CLIENT_PORT); + udp->len = htons(sizeof(*udp) + len); + udp->check = 0; + + iov[0].iov_base = hdr; + iov[0].iov_len = sizeof(hdr); + iov[1].iov_base = pack->data; + iov[1].iov_len = len; + + len = writev(serv->raw_sock, iov, 2); + + if (len < 0) + return -1; + + return 0; +} + +void dhcpv4_packet_free(struct dhcpv4_packet *pack) +{ + struct dhcpv4_option *opt; + + while (!list_empty(&pack->options)) { + opt = list_entry(pack->options.next, typeof(*opt), entry); + list_del(&opt->entry); + mempool_free(opt); + } + + if (pack->agent_circuit_id) + mempool_free(pack->agent_circuit_id); + + if (pack->agent_remote_id) + mempool_free(pack->agent_remote_id); + + mempool_free(pack); +} + +int dhcpv4_packet_add_opt(struct dhcpv4_packet *pack, int type, const void *data, int len) +{ + struct dhcpv4_option *opt = mempool_alloc(opt_pool); + + if (!opt) { + log_emerg("out of memory\n"); + return -1; + } + + *pack->ptr++ = type; + *pack->ptr++ = len; + + opt->type = type; + opt->len = len; + opt->data = pack->ptr; + pack->ptr += len; + + memcpy(opt->data, data, len); + + list_add_tail(&opt->entry, &pack->options); + + return 0; +} + +int dhcpv4_send_reply(int msg_type, struct dhcpv4_serv *serv, struct dhcpv4_packet *req, struct ap_session *ses, int lease_time) +{ + struct dhcpv4_packet *pack; + int val, r; + struct dns { + in_addr_t dns1; + in_addr_t dns2; + } dns; + + pack = dhcpv4_packet_alloc(); + if (!pack) { + log_emerg("out of memory\n"); + return -1; + } + + memcpy(pack->hdr, req->hdr, sizeof(*req->hdr)); + + pack->hdr->op = DHCP_OP_REPLY; + pack->hdr->ciaddr = 0; + pack->hdr->yiaddr = ses->ipv4->peer_addr; + if (msg_type == DHCPOFFER) + pack->hdr->siaddr = ses->ipv4->addr; + else + pack->hdr->siaddr = 0; + + if (dhcpv4_packet_add_opt(pack, 53, &msg_type, 1)) + goto out_err; + + if (dhcpv4_packet_add_opt(pack, 54, &ses->ipv4->addr, 4)) + goto out_err; + + val = ntohl(lease_time); + if (dhcpv4_packet_add_opt(pack, 51, &val, 4)) + goto out_err; + + if (dhcpv4_packet_add_opt(pack, 3, &ses->ipv4->addr, 4)) + goto out_err; + + val = htonl(~((1 << (32 - ses->ipv4->mask)) - 1)); + if (dhcpv4_packet_add_opt(pack, 1, &val, 4)) + goto out_err; + + if (conf_dns1 && conf_dns2) { + dns.dns1 = conf_dns1; + dns.dns2 = conf_dns2; + if (dhcpv4_packet_add_opt(pack, 6, &dns, 8)) + goto out_err; + } else if (conf_dns1) { + if (dhcpv4_packet_add_opt(pack, 6, &conf_dns1, 4)) + goto out_err; + } + + *pack->ptr++ = 255; + + if (conf_verbose) { + pack->msg_type = msg_type; + log_ppp_info2("send "); + dhcpv4_print_packet(pack, log_ppp_info2); + } + + r = dhcpv4_send(serv, pack, ses->ipv4->addr, ses->ipv4->peer_addr); + + dhcpv4_packet_free(pack); + + return r; + +out_err: + dhcpv4_packet_free(pack); + return -1; +} + +int dhcpv4_send_nak(struct dhcpv4_serv *serv, struct dhcpv4_packet *req) +{ + struct dhcpv4_packet *pack; + int val, r; + + pack = dhcpv4_packet_alloc(); + if (!pack) { + log_emerg("out of memory\n"); + return -1; + } + + memcpy(pack->hdr, req->hdr, sizeof(*req->hdr)); + + pack->hdr->op = DHCP_OP_REPLY; + pack->hdr->ciaddr = 0; + pack->hdr->yiaddr = 0; + pack->hdr->siaddr = 0; + + val = DHCPNAK; + if (dhcpv4_packet_add_opt(pack, 53, &val, 1)) + goto out_err; + + *pack->ptr++ = 255; + + if (conf_verbose) { + pack->msg_type = DHCPNAK; + log_info2("send "); + dhcpv4_print_packet(pack, log_info2); + } + + r = dhcpv4_send(serv, pack, 0, 0xffffffff); + + dhcpv4_packet_free(pack); + + return r; + +out_err: + dhcpv4_packet_free(pack); + return -1; + + return 0; +} + +static void load_config() +{ + const char *opt; + + opt = conf_get_opt("ipoe", "verbose"); + if (opt) + conf_verbose = atoi(opt); + + opt = conf_get_opt("dns", "dns1"); + if (opt) + conf_dns1 = inet_addr(opt); + + opt = conf_get_opt("dns", "dns2"); + if (opt) + conf_dns2 = inet_addr(opt); +} + +static void init() +{ + pack_pool = mempool_create(BUF_SIZE + sizeof(struct dhcpv4_packet)); + opt_pool = mempool_create(sizeof(struct dhcpv4_option)); + + load_config(); + + triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); +} + +DEFINE_INIT(100, init); diff --git a/accel-pppd/ctrl/ipoe/dhcpv4.h b/accel-pppd/ctrl/ipoe/dhcpv4.h new file mode 100644 index 00000000..52e90a3e --- /dev/null +++ b/accel-pppd/ctrl/ipoe/dhcpv4.h @@ -0,0 +1,89 @@ +#ifndef __DHCPV4_H +#define __DHCPV4_H + +#include <stdint.h> +#include "list.h" + +#include "triton.h" + +#define __packed __attribute__((packed)) + +#define DHCP_OP_REQUEST 1 +#define DHCP_OP_REPLY 2 + +#define DHCPDISCOVER 1 +#define DHCPOFFER 2 +#define DHCPREQUEST 3 +#define DHCPDECLINE 4 +#define DHCPACK 5 +#define DHCPNAK 6 +#define DHCPRELEASE 7 +#define DHCPINFORM 8 + +struct dhcpv4_hdr +{ + uint8_t op; + uint8_t htype; + uint8_t hlen; + uint8_t hops; + uint32_t xid; + uint16_t sec; + uint16_t flags; + uint32_t ciaddr; + uint32_t yiaddr; + uint32_t siaddr; + uint32_t giaddr; + uint8_t chaddr[16]; + char sname[64]; + char file[128]; + uint8_t magic[4]; +} __packed; + +struct dhcpv4_option +{ + struct list_head entry; + uint8_t type; + uint8_t len; + uint8_t *data; +}; + +struct dhcpv4_packet +{ + struct dhcpv4_hdr *hdr; + struct list_head options; + struct dhcpv4_option *client_id; + struct dhcpv4_option *agent_circuit_id; + struct dhcpv4_option *agent_remote_id; + uint32_t request_ip; + uint32_t server_id; + int msg_type; + uint8_t *ptr; + uint8_t data[0]; +}; + +struct dhcpv4_serv +{ + struct triton_context_t *ctx; + struct triton_md_handler_t hnd; + int raw_sock; + uint8_t hwaddr[6]; + void (*recv)(struct dhcpv4_serv *serv, struct dhcpv4_packet *pack); +}; + +struct ap_session; + +struct dhcpv4_serv *dhcpv4_create(struct triton_context_t *ctx, const char *ifname); +void dhcpv4_free(struct dhcpv4_serv *); + + +int dhcpv4_send_reply(int msg_type, struct dhcpv4_serv *serv, struct dhcpv4_packet *req, struct ap_session *ses, int lease_time); +int dhcpv4_send_nak(struct dhcpv4_serv *serv, struct dhcpv4_packet *req); + +void dhcpv4_packet_free(struct dhcpv4_packet *pack); + +int dhcpv4_check_options(struct dhcpv4_packet *); +void dhcpv4_print_options(struct dhcpv4_packet *, void (*)(const char *, ...)); + +void dhcpv4_print_packet(struct dhcpv4_packet *pack, void (*print)(const char *fmt, ...)); + +#endif diff --git a/accel-pppd/ctrl/ipoe/dhcpv4_options.c b/accel-pppd/ctrl/ipoe/dhcpv4_options.c new file mode 100644 index 00000000..82e64902 --- /dev/null +++ b/accel-pppd/ctrl/ipoe/dhcpv4_options.c @@ -0,0 +1,290 @@ +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <ctype.h> +#include <arpa/inet.h> + +#include "dhcpv4.h" + +struct known_option +{ + int type; + int min_len; + int max_len; + int elem_size; + const char *name; + void (*print)(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +}; + +static void print_int(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_uint(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_ip(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_str(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_hex(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_route(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_classless_route(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_message_type(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_request_list(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); +static void print_relay_agent(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)); + +static struct known_option options[] = { + { 1, 4, 4, 4, "Subnet", print_ip }, + { 2, 4, 4, 4, "Time-Offset", print_int }, + { 3, 4, 255, 4, "Router", print_ip }, + { 4, 4, 255, 4, "Time-Server", print_ip }, + { 5, 4, 255, 4, "Name-Server", print_ip }, + { 6, 4, 255, 4, "DNS", print_ip }, + //{ 7, 4, 255, 4, "log-server", print_ip }, + //{ 8, 4, 255, 4, "cookie-server", print_ip }, + //{ 9, 4, 255, 4, "lpr-server", print_ip }, + //{ 10, 4, 255, 4, "impress-server", print_ip }, + //{ 11, 4, 255, 4, "resourse-location", print_ip }, + { 12, 1, 255, 1, "Host-Name", print_str }, + //{ 13, 4, 255, 4, "impress-server", print_ip }, + { 15, 1, 255, 1, "Domain-Name", print_str }, + { 26, 2, 2, 2, "MTU", print_int }, + { 28, 4, 4, 4, "Broadcast", print_ip }, + { 33, 8, 255, 8, "Route", print_route }, + { 42, 4, 4, 4, "NTP", print_ip }, + { 43, 1, 255, 1, "Vendor-Specific", print_hex }, + { 50, 4, 4, 4, "Request-IP", print_ip }, + { 51, 4, 4, 4, "Lease-Time", print_uint }, + { 53, 1, 1, 1, "Message-Type", print_message_type }, + { 54, 4, 4, 4, "Server-ID", print_ip }, + { 55, 1, 255, 1, "Request-List", print_request_list }, + { 56, 1, 255, 1, "Message", print_str }, + { 57, 2, 2, 2, "Max-Message-Size", print_uint }, + { 58, 4, 4, 4, "T1", print_uint }, + { 59, 4, 4, 4, "T2", print_uint }, + { 60, 1, 255, 1, "Vendor-Class", print_hex }, + { 61, 2, 255, 1, "Client-ID", print_hex }, + { 82, 3, 255, 1, "Relay-Agent", print_relay_agent }, + { 121, 5, 255, 1, "Classless-Route", print_classless_route }, + { 0 }, +}; + +int dhcpv4_check_options(struct dhcpv4_packet *pack) +{ + struct dhcpv4_option *opt; + struct known_option *kopt; + + list_for_each_entry(opt, &pack->options, entry) { + for (kopt = options; kopt->type; kopt++) { + if (kopt->type != opt->type) + continue; + if (opt->len < kopt->min_len) + return -1; + if (opt->len > kopt->max_len) + return -1; + if (opt->len % kopt->elem_size != 0) + return -1; + break; + } + } + + return 0; +} + +void dhcpv4_print_options(struct dhcpv4_packet *pack, void (*print)(const char *fmt, ...)) +{ + struct dhcpv4_option *opt; + struct known_option *kopt; + int n = 0; + + list_for_each_entry(opt, &pack->options, entry) { + if (n) + print(" <"); + else + print("<"); + n++; + for (kopt = options; kopt->type && kopt->type != opt->type; kopt++); + if (kopt->type) { + print("%s ", kopt->name); + kopt->print(opt, kopt->elem_size, print); + } else { + print("Option-%i "); + print_hex(opt, 1, print); + } + print(">"); + } +} + + +static void print_int(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + if (opt->len == 2) + print("%i", ntohs(*(int16_t *)(opt->data))); + else + print("%i", ntohl(*(int32_t *)(opt->data))); +} + +static void print_uint(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + if (opt->len == 2) + print("%u", ntohs(*(uint16_t *)(opt->data))); + else + print("%u", ntohl(*(uint32_t *)(opt->data))); +} + +static void print_ip(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + int i, n = opt->len / elem_size; + uint32_t ip; + + for (i = 0; i < n; i++) { + ip = ntohl(*(uint32_t *)(opt->data + i*elem_size)); + + if (i) + print(","); + + print("%i.%i.%i.%i", + (ip >> 24) & 0xff, + (ip >> 16) & 0xff, + (ip >> 8) & 0xff, + ip & 0xff); + } +} + +static void print_str(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + const char *ptr = (const char *)opt->data; + const char *endptr = ptr + opt->len; + + for(; ptr < endptr; ptr++) + print("%c", *ptr); +} + +static void print_hex(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + const uint8_t *ptr = opt->data; + const uint8_t *endptr = ptr + opt->len; + + for(; ptr < endptr; ptr++) + print("%02x", *ptr); +} + +static void print_route(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + int i, n = opt->len / 8; + uint32_t ip, gw; + + for (i = 0; i < n; i++) { + ip = ntohl(*(uint32_t *)(opt->data + i*8)); + gw = ntohl(*(uint32_t *)(opt->data + i*8 + 4)); + + if (i) + print(","); + + print("%i.%i.%i.%i via %i.%i.%i.%i", + (ip >> 24) & 0xff, + (ip >> 16) & 0xff, + (ip >> 8) & 0xff, + ip & 0xff, + (gw >> 24) & 0xff, + (gw >> 16) & 0xff, + (gw >> 8) & 0xff, + gw & 0xff); + } +} + +static void print_message_type(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + const char *msg_name[] = {"", "Discover", "Offer", "Request", "Decline", "Ack", "Nak", "Release", "Inform"}; + + print("%s", msg_name[opt->data[0]]); +} + +static void print_request_list(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + int i; + struct known_option *kopt; + + for (i = 0; i < opt->len; i++) { + if (i) + print(","); + for (kopt = options; kopt->type && kopt->type != opt->data[i]; kopt++); + if (kopt->type) + print("%s", kopt->name); + else + print("%i", opt->data[i]); + } +} + +static void print_relay_agent(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + const uint8_t *ptr = opt->data; + const uint8_t *endptr = ptr + opt->len; + const uint8_t *endptr1; + int type, len; + + while (ptr < endptr) { + if (ptr != opt->data) + print(" "); + type = *ptr++; + len = *ptr++; + /*if (ptr + len > endptr) { + print(" invalid"); + return; + }*/ + if (type == 1) + print("{Agent-Circuit-ID "); + else if (type == 2) + print("{Agent-Remote-ID "); + else + print("{Option-%i ", type); + + endptr1 = ptr + len; + for (;ptr < endptr1; ptr++) { + if (!isprint(*ptr)) { + print("_"); + break; + } + print("%c", *ptr); + } + for (;ptr < endptr1; ptr++) + print("%02x", *ptr); + print("}"); + } +} + +static void print_classless_route(const struct dhcpv4_option *opt, int elem_size, void (*print)(const char *fmt, ...)) +{ + const uint8_t *ptr = opt->data; + const uint8_t *endptr = ptr + opt->len; + int mask, i, mask1 = 0; + uint32_t ip; + uint32_t gw; + + while (ptr < endptr) { + if (ptr != opt->data) + print(","); + + mask = *ptr++; + ip = ntohl(*(uint32_t *)ptr); + for (i = 0; i < mask; i++) + mask1 |= (1 << (32 - i)); + ip &= mask1; + if (mask <= 8) + ptr++; + else if (mask <= 16) + ptr += 2; + else if (mask <= 24) + ptr += 3; + else + ptr += 4; + gw = ntohl(*(uint32_t *)ptr); + ptr += 4; + + print("%i.%i.%i.%i/%i via %i.%i.%i.%i", + (ip >> 24) & 0xff, + (ip >> 16) & 0xff, + (ip >> 8) & 0xff, + ip & 0xff, + mask, + (gw >> 24) & 0xff, + (gw >> 16) & 0xff, + (gw >> 8) & 0xff, + gw & 0xff); + } +} diff --git a/accel-pppd/ctrl/ipoe/if_ipoe.h b/accel-pppd/ctrl/ipoe/if_ipoe.h new file mode 120000 index 00000000..1f0e053e --- /dev/null +++ b/accel-pppd/ctrl/ipoe/if_ipoe.h @@ -0,0 +1 @@ +../../../drivers/ipoe/ipoe.h
\ No newline at end of file diff --git a/accel-pppd/ctrl/ipoe/ipoe.c b/accel-pppd/ctrl/ipoe/ipoe.c new file mode 100644 index 00000000..bfe65fb1 --- /dev/null +++ b/accel-pppd/ctrl/ipoe/ipoe.c @@ -0,0 +1,1137 @@ +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <stdarg.h> +#include <errno.h> +#include <string.h> +#include <fcntl.h> +#include <time.h> +#include <arpa/inet.h> +#include <netinet/in.h> +#include <net/ethernet.h> +#include <netinet/ip.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <linux/if.h> + +#include <pcre.h> + +#include "events.h" +#include "list.h" +#include "triton.h" +#include "log.h" +#include "mempool.h" +#include "utils.h" +#include "cli.h" +#include "ap_session.h" +#include "pwdb.h" +#include "ipdb.h" + +#include "iplink.h" +#include "connlimit.h" + +#include "ipoe.h" + +#include "memdebug.h" + +#define USERNAME_IFNAME 0 +#define USERNAME_LUA 1 + +#define MODE_L2 0 +#define MODE_L3 1 + +static int conf_dhcpv4 = 1; +static int conf_up = 0; +static int conf_mode = 0; +static int conf_shared = 1; +//static int conf_dhcpv6; +static int conf_username; +static int conf_unit_cache; + +#ifdef USE_LUA +static const char *conf_lua_username_func; +#endif + +static int conf_offer_timeout = 3; +static in_addr_t conf_gw_address; +static int conf_netmask = 24; +static int conf_lease_time = 600; +static int conf_lease_timeout = 660; +static int conf_verbose; + +static unsigned int stat_starting; +static unsigned int stat_active; + +static mempool_t ses_pool; + +static LIST_HEAD(serv_list); + +struct iplink_arg +{ + pcre *re; + const char *opt; +}; + +struct unit_cache +{ + struct list_head entry; + int ifindex; +}; + +static pthread_mutex_t uc_lock = PTHREAD_MUTEX_INITIALIZER; +static LIST_HEAD(uc_list); +static int uc_size; +static mempool_t uc_pool; + +static void ipoe_session_finished(struct ap_session *s); +static void ipoe_drop_sessions(struct ipoe_serv *serv, struct ipoe_session *skip); +static void ipoe_serv_close(struct triton_context_t *ctx); + +static struct ipoe_session *ipoe_session_lookup(struct ipoe_serv *serv, struct dhcpv4_packet *pack) +{ + struct ipoe_session *ses; + struct ipoe_session *ses1 = NULL; + + list_for_each_entry(ses, &serv->sessions, entry) { + if (pack->hdr->giaddr != ses->giaddr) + continue; + + if (pack->agent_circuit_id && !ses->agent_circuit_id) + continue; + + if (pack->agent_remote_id && !ses->agent_remote_id) + continue; + + if (pack->client_id && !ses->client_id) + continue; + + if (!pack->agent_circuit_id && ses->agent_circuit_id) + continue; + + if (!pack->agent_remote_id && ses->agent_remote_id) + continue; + + if (!pack->client_id && ses->client_id) + continue; + + if (pack->agent_circuit_id) { + if (pack->agent_circuit_id->len != ses->agent_circuit_id->len) + continue; + if (memcmp(pack->agent_circuit_id->data, ses->agent_circuit_id->data, pack->agent_circuit_id->len)) + continue; + } + + if (pack->agent_remote_id) { + if (pack->agent_remote_id->len != ses->agent_remote_id->len) + continue; + if (memcmp(pack->agent_remote_id->data, ses->agent_remote_id->data, pack->agent_remote_id->len)) + continue; + } + + if (pack->client_id) { + if (pack->client_id->len != ses->client_id->len) + continue; + if (memcmp(pack->client_id->data, ses->client_id->data, pack->client_id->len)) + continue; + } + + if (memcmp(pack->hdr->chaddr, ses->hwaddr, 6)) + continue; + + ses1 = ses; + + if (pack->hdr->xid != ses->xid) + continue; + + return ses; + } + + return ses1; +} + +static void ipoe_session_timeout(struct triton_timer_t *t) +{ + struct ipoe_session *ses = container_of(t, typeof(*ses), timer); + + triton_timer_del(t); + + log_ppp_info2("session timed out\n"); + + ap_session_terminate(&ses->ses, TERM_LOST_CARRIER, 0); +} + +static void ipoe_session_set_username(struct ipoe_session *ses) +{ +#ifdef USE_LUA + if (conf_username == USERNAME_LUA) { + ipoe_lua_set_username(ses, conf_lua_username_func); + } else +#endif + ses->ses.username = _strdup(ses->ses.ifname); +} + +static void ipoe_session_start(struct ipoe_session *ses) +{ + int r; + char *passwd; + struct ifreq ifr; + struct unit_cache *uc; + + if (ses->serv->opt_shared == 0) + strncpy(ses->ses.ifname, ses->serv->ifname, AP_IFNAME_LEN); + else if (ses->ifindex == -1) { + pthread_mutex_lock(&uc_lock); + if (!list_empty(&uc_list)) { + uc = list_entry(uc_list.next, typeof(*uc), entry); + ses->ifindex = uc->ifindex; + list_del(&uc->entry); + --uc_size; + pthread_mutex_unlock(&uc_lock); + mempool_free(uc); + } else { + pthread_mutex_unlock(&uc_lock); + ses->ifindex = ipoe_nl_create(0, 0, ses->serv->opt_mode == MODE_L2 ? ses->serv->ifname : NULL, ses->hwaddr); + if (ses->ifindex == -1) { + log_ppp_error("ipoe: failed to create interface\n"); + ipoe_session_finished(&ses->ses); + return; + } + } + + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_ifindex = ses->ifindex; + if (ioctl(sock_fd, SIOCGIFNAME, &ifr, sizeof(ifr))) { + log_ppp_error("ipoe: failed to get interface name\n"); + ses->ifindex = -1; + ipoe_session_finished(&ses->ses); + return; + } + + strncpy(ses->ses.ifname, ifr.ifr_name, AP_IFNAME_LEN); + ses->ses.ifindex = ses->ifindex; + } + + if (!ses->ses.username) { + ipoe_session_set_username(ses); + + if (!ses->ses.username) { + ipoe_session_finished(&ses->ses); + return; + } + } + + triton_event_fire(EV_CTRL_STARTING, &ses->ses); + triton_event_fire(EV_CTRL_STARTED, &ses->ses); + + ap_session_starting(&ses->ses); + + r = pwdb_check(&ses->ses, ses->ses.username, 0); + if (r == PWDB_NO_IMPL) { + passwd = pwdb_get_passwd(&ses->ses, ses->ses.username); + if (!passwd) + r = PWDB_DENIED; + else { + r = PWDB_SUCCESS; + _free(passwd); + } + } + + if (r == PWDB_DENIED) { + if (conf_ppp_verbose) + log_ppp_warn("authentication failed\n"); + ap_session_terminate(&ses->ses, TERM_AUTH_ERROR, 0); + return; + } + + ses->ses.ipv4 = ipdb_get_ipv4(&ses->ses); + if (!ses->ses.ipv4) { + log_ppp_warn("no free IPv4 address\n"); + ap_session_terminate(&ses->ses, TERM_AUTH_ERROR, 0); + return; + } + + if (conf_gw_address) + ses->ses.ipv4->addr = conf_gw_address; + + if (conf_netmask) + ses->ses.ipv4->mask = conf_netmask; + else if (!ses->ses.ipv4->mask) + ses->ses.ipv4->mask = 24; + + if (ses->dhcpv4_request) { + dhcpv4_send_reply(DHCPOFFER, ses->serv->dhcpv4, ses->dhcpv4_request, &ses->ses, conf_lease_time); + + dhcpv4_packet_free(ses->dhcpv4_request); + ses->dhcpv4_request = NULL; + + ses->timer.expire = ipoe_session_timeout; + ses->timer.expire_tv.tv_sec = conf_offer_timeout; + triton_timer_add(&ses->ctx, &ses->timer, 0); + } else { + if (ipoe_nl_modify(ses->ifindex, ses->giaddr, ses->ses.ipv4->peer_addr, NULL, NULL)) + ap_session_terminate(&ses->ses, TERM_NAS_ERROR, 0); + else + ap_session_activate(&ses->ses); + } +} + +static void ipoe_session_activate(struct ipoe_session *ses) +{ + ap_session_activate(&ses->ses); + + if (ses->dhcpv4_request) { + if (ses->ses.state == AP_STATE_ACTIVE) + dhcpv4_send_reply(DHCPACK, ses->serv->dhcpv4, ses->dhcpv4_request, &ses->ses, conf_lease_time); + else + dhcpv4_send_nak(ses->serv->dhcpv4, ses->dhcpv4_request); + + dhcpv4_packet_free(ses->dhcpv4_request); + ses->dhcpv4_request = NULL; + } +} + +static void ipoe_session_keepalive(struct ipoe_session *ses) +{ + if (ses->timer.tpd) + triton_timer_mod(&ses->timer, 0); + + ses->xid = ses->dhcpv4_request->hdr->xid; + + if (ses->ses.state == AP_STATE_ACTIVE) + dhcpv4_send_reply(DHCPACK, ses->serv->dhcpv4, ses->dhcpv4_request, &ses->ses, conf_lease_time); + else + dhcpv4_send_nak(ses->serv->dhcpv4, ses->dhcpv4_request); + + dhcpv4_packet_free(ses->dhcpv4_request); + ses->dhcpv4_request = NULL; +} + +static void ipoe_session_started(struct ap_session *s) +{ + struct ipoe_session *ses = container_of(s, typeof(*ses), ses); + + log_ppp_debug("ipoe: session started\n"); + + ses->timer.expire = ipoe_session_timeout; + ses->timer.expire_tv.tv_sec = conf_lease_timeout; + if (ses->timer.tpd) + triton_timer_mod(&ses->timer, 0); +} + +static void ipoe_session_free(struct ipoe_session *ses) +{ + struct unit_cache *uc; + + if (ses->timer.tpd) + triton_timer_del(&ses->timer); + + if (ses->dhcpv4_request) + dhcpv4_packet_free(ses->dhcpv4_request); + + if (ses->ctrl.called_station_id) + _free(ses->ctrl.called_station_id); + + if (ses->ctrl.calling_station_id) + _free(ses->ctrl.calling_station_id); + + triton_context_unregister(&ses->ctx); + + if (ses->data) + _free(ses->data); + + if (ses->ifindex != -1) { + if (uc_size < conf_unit_cache && ipoe_nl_modify(ses->ifindex, 0, 0, "", NULL)) { + uc = mempool_alloc(uc_pool); + uc->ifindex = ses->ifindex; + pthread_mutex_lock(&uc_lock); + list_add_tail(&uc->entry, &uc_list); + ++uc_size; + pthread_mutex_unlock(&uc_lock); + } else + ipoe_nl_delete(ses->ifindex); + } + + mempool_free(ses); +} + +static void ipoe_session_finished(struct ap_session *s) +{ + struct ipoe_session *ses = container_of(s, typeof(*ses), ses); + int serv_close; + + log_ppp_debug("ipoe: session finished\n"); + + pthread_mutex_lock(&ses->serv->lock); + list_del(&ses->entry); + serv_close = ses->serv->need_close && list_empty(&ses->serv->sessions); + pthread_mutex_unlock(&ses->serv->lock); + + if (serv_close) + ipoe_serv_close(&ses->serv->ctx); + + triton_context_call(&ses->ctx, (triton_event_func)ipoe_session_free, ses); +} + +static void ipoe_session_terminate(struct ap_session *s, int hard) +{ + ap_session_finished(s); +} + + +static void ipoe_session_close(struct triton_context_t *ctx) +{ + struct ipoe_session *ses = container_of(ctx, typeof(*ses), ctx); + + if (ses->ses.state) + ap_session_terminate(&ses->ses, TERM_ADMIN_RESET, 1); + else + ipoe_session_finished(&ses->ses); +} + +static struct ipoe_session *ipoe_session_create_dhcpv4(struct ipoe_serv *serv, struct dhcpv4_packet *pack) +{ + struct ipoe_session *ses; + int dlen = 0; + uint8_t *ptr; + + ses = mempool_alloc(ses_pool); + if (!ses) { + log_emerg("out of memery\n"); + return NULL; + } + + memset(ses, 0, sizeof(*ses)); + + ap_session_init(&ses->ses); + + ses->serv = serv; + ses->ifindex = -1; + ses->dhcpv4_request = pack; + + ses->xid = pack->hdr->xid; + memcpy(ses->hwaddr, pack->hdr->chaddr, 6); + ses->giaddr = pack->hdr->giaddr; + + if (pack->agent_circuit_id) + dlen += sizeof(struct dhcp_opt) + pack->agent_circuit_id->len; + + if (pack->agent_remote_id) + dlen += sizeof(struct dhcp_opt) + pack->agent_remote_id->len; + + if (pack->client_id) + dlen += sizeof(struct dhcp_opt) + pack->client_id->len; + + if (dlen) { + ses->data = _malloc(dlen); + if (!ses->data) { + log_emerg("out of memery\n"); + mempool_free(ses); + return NULL; + } + ptr = ses->data; + } + + if (pack->agent_circuit_id) { + ses->agent_circuit_id = (struct dhcp_opt *)ptr; + ses->agent_circuit_id->len = pack->agent_circuit_id->len; + memcpy(ses->agent_circuit_id->data, pack->agent_circuit_id->data, pack->agent_circuit_id->len); + ptr += sizeof(struct dhcp_opt) + pack->agent_circuit_id->len; + } + + if (pack->agent_remote_id) { + ses->agent_remote_id = (struct dhcp_opt *)ptr; + ses->agent_remote_id->len = pack->agent_remote_id->len; + memcpy(ses->agent_remote_id->data, pack->agent_remote_id->data, pack->agent_remote_id->len); + ptr += sizeof(struct dhcp_opt) + pack->agent_remote_id->len; + } + + if (pack->client_id) { + ses->client_id = (struct dhcp_opt *)ptr; + ses->client_id->len = pack->client_id->len; + memcpy(ses->client_id->data, pack->client_id->data, pack->client_id->len); + ptr += sizeof(struct dhcp_opt) + pack->client_id->len; + } + + ses->ctx.before_switch = log_switch; + ses->ctx.close = ipoe_session_close; + ses->ctrl.ctx = &ses->ctx; + ses->ctrl.started = ipoe_session_started; + ses->ctrl.finished = ipoe_session_finished; + ses->ctrl.terminate = ipoe_session_terminate; + ses->ctrl.type = CTRL_TYPE_IPOE; + ses->ctrl.name = "ipoe"; + + ses->ctrl.calling_station_id = _malloc(19); + ses->ctrl.called_station_id = _strdup(serv->ifname); + + ptr = ses->hwaddr; + sprintf(ses->ctrl.calling_station_id, "%02x:%02x:%02x:%02x:%02x:%02x", + ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5]); + + ses->ses.ctrl = &ses->ctrl; + ses->ses.chan_name = ses->ctrl.calling_station_id; + + triton_context_register(&ses->ctx, &ses->ses); + + triton_context_wakeup(&ses->ctx); + + //pthread_mutex_lock(&serv->lock); + list_add_tail(&ses->entry, &serv->sessions); + //pthread_mutex_unlock(&serv->lock); + + triton_context_call(&ses->ctx, (triton_event_func)ipoe_session_start, ses); + + return ses; +} + +static void ipoe_recv_dhcpv4(struct dhcpv4_serv *dhcpv4, struct dhcpv4_packet *pack) +{ + struct ipoe_serv *serv = container_of(dhcpv4->ctx, typeof(*serv), ctx); + struct ipoe_session *ses; + //struct dhcpv4_packet *reply; + + if (ap_shutdown) + return; + + pthread_mutex_lock(&serv->lock); + if (pack->msg_type == DHCPDISCOVER) { + ses = ipoe_session_lookup(serv, pack); + if (!ses) { + ses = ipoe_session_create_dhcpv4(serv, pack); + + if (conf_verbose && ses) { + log_switch(dhcpv4->ctx, &ses->ses); + log_ppp_info2("recv "); + dhcpv4_print_packet(pack, log_ppp_info2); + } + } else { + log_switch(dhcpv4->ctx, &ses->ses); + + if (conf_verbose) { + log_ppp_info2("recv "); + dhcpv4_print_packet(pack, log_ppp_info2); + } + + if (ses->ses.ipv4 && ses->ses.state == AP_STATE_ACTIVE && pack->request_ip == ses->ses.ipv4->peer_addr) + dhcpv4_send_reply(DHCPOFFER, dhcpv4, pack, &ses->ses, conf_lease_time); + + dhcpv4_packet_free(pack); + } + } else if (pack->msg_type == DHCPREQUEST) { + ses = ipoe_session_lookup(serv, pack); + + if (!ses) { + if (conf_verbose) { + log_info2("recv "); + dhcpv4_print_packet(pack, log_info2); + } + + dhcpv4_send_nak(dhcpv4, pack); + } else { + if (!ses->ses.ipv4 || + (pack->server_id && (pack->server_id != ses->ses.ipv4->addr || pack->request_ip != ses->ses.ipv4->peer_addr)) || + (pack->hdr->ciaddr && (pack->hdr->xid != ses->xid || pack->hdr->ciaddr != ses->ses.ipv4->peer_addr))) { + + if (conf_verbose) { + log_info2("recv "); + dhcpv4_print_packet(pack, log_info2); + } + + if (ses->ses.ipv4 && pack->server_id == ses->ses.ipv4->addr && pack->request_ip && pack->request_ip != ses->ses.ipv4->peer_addr) + dhcpv4_send_nak(dhcpv4, pack); + + ap_session_terminate(&ses->ses, TERM_USER_REQUEST, 0); + } else { + if (conf_verbose) { + log_switch(dhcpv4->ctx, &ses->ses); + log_ppp_info2("recv "); + dhcpv4_print_packet(pack, log_ppp_info2); + } + + if (serv->opt_shared == 0) + ipoe_drop_sessions(serv, ses); + + if (ses->ses.state == AP_STATE_STARTING && !ses->dhcpv4_request) { + ses->dhcpv4_request = pack; + pack = NULL; + triton_context_call(&ses->ctx, (triton_event_func)ipoe_session_activate, ses); + } else if (ses->ses.state == AP_STATE_ACTIVE && !ses->dhcpv4_request) { + ses->dhcpv4_request = pack; + pack = NULL; + triton_context_call(&ses->ctx, (triton_event_func)ipoe_session_keepalive, ses); + } + } + } + if (pack) + dhcpv4_packet_free(pack); + } else if (pack->msg_type == DHCPDECLINE || pack->msg_type == DHCPRELEASE) { + ses = ipoe_session_lookup(serv, pack); + if (ses) { + if (conf_verbose) { + log_switch(dhcpv4->ctx, &ses->ses); + log_ppp_info2("recv "); + dhcpv4_print_packet(pack, log_ppp_info2); + } + + ap_session_terminate(&ses->ses, TERM_USER_REQUEST, 0); + } + dhcpv4_packet_free(pack); + } + pthread_mutex_unlock(&serv->lock); +} + +static struct ipoe_session *ipoe_session_create_up(struct ipoe_serv *serv, struct ethhdr *eth, struct iphdr *iph) +{ + struct ipoe_session *ses; + + if (ap_shutdown) + return NULL; + + ses = mempool_alloc(ses_pool); + if (!ses) { + log_emerg("out of memery\n"); + return NULL; + } + + memset(ses, 0, sizeof(*ses)); + + ap_session_init(&ses->ses); + + ses->serv = serv; + ses->ifindex = -1; + + memcpy(ses->hwaddr, eth->h_source, 6); + + ses->ctx.before_switch = log_switch; + ses->ctx.close = ipoe_session_close; + ses->ctrl.ctx = &ses->ctx; + ses->ctrl.started = ipoe_session_started; + ses->ctrl.finished = ipoe_session_finished; + ses->ctrl.terminate = ipoe_session_terminate; + ses->ctrl.type = CTRL_TYPE_IPOE; + ses->ctrl.name = "ipoe"; + + ses->giaddr = iph->saddr; + + ses->ctrl.calling_station_id = _malloc(17); + ses->ctrl.called_station_id = _malloc(17); + + u_inet_ntoa(iph->saddr, ses->ctrl.calling_station_id); + u_inet_ntoa(iph->daddr, ses->ctrl.called_station_id); + + ses->ses.username = _strdup(ses->ctrl.calling_station_id); + + ses->ses.ctrl = &ses->ctrl; + ses->ses.chan_name = ses->ctrl.calling_station_id; + + triton_context_register(&ses->ctx, &ses->ses); + + triton_context_wakeup(&ses->ctx); + + //pthread_mutex_lock(&serv->lock); + list_add_tail(&ses->entry, &serv->sessions); + //pthread_mutex_unlock(&serv->lock); + + triton_context_call(&ses->ctx, (triton_event_func)ipoe_session_start, ses); + + return ses; +} + +struct ipoe_session *ipoe_session_alloc(void) +{ + struct ipoe_session *ses; + + ses = mempool_alloc(ses_pool); + if (!ses) { + log_emerg("out of memery\n"); + return NULL; + } + + memset(ses, 0, sizeof(*ses)); + + ap_session_init(&ses->ses); + + ses->ifindex = -1; + + ses->ctx.before_switch = log_switch; + ses->ctx.close = ipoe_session_close; + ses->ctrl.ctx = &ses->ctx; + ses->ctrl.started = ipoe_session_started; + ses->ctrl.finished = ipoe_session_finished; + ses->ctrl.terminate = ipoe_session_terminate; + ses->ctrl.type = CTRL_TYPE_IPOE; + ses->ctrl.name = "ipoe"; + + ses->ses.ctrl = &ses->ctrl; + ses->ses.chan_name = ses->ctrl.calling_station_id; + + return ses; +} + +void ipoe_recv_up(int ifindex, struct ethhdr *eth, struct iphdr *iph) +{ + struct ipoe_serv *serv; + struct ipoe_session *ses; + + list_for_each_entry(serv, &serv_list, entry) { + if (serv->ifindex != ifindex) + continue; + + if (!serv->opt_up) + return; + + pthread_mutex_lock(&serv->lock); + list_for_each_entry(ses, &serv->sessions, entry) { + if (ses->giaddr == iph->saddr) { + pthread_mutex_unlock(&serv->lock); + return; + } + } + pthread_mutex_unlock(&serv->lock); + + ipoe_session_create_up(serv, eth, iph); + + break; + } +} + +static void ipoe_serv_close(struct triton_context_t *ctx) +{ + struct ipoe_serv *serv = container_of(ctx, typeof(*serv), ctx); + + pthread_mutex_lock(&serv->lock); + if (!list_empty(&serv->sessions)) { + serv->need_close = 1; + pthread_mutex_unlock(&serv->lock); + return; + } + pthread_mutex_unlock(&serv->lock); + + if (serv->dhcpv4) + dhcpv4_free(serv->dhcpv4); + + triton_context_unregister(ctx); + + _free(serv->ifname); + _free(serv); +} + +static int show_stat_exec(const char *cmd, char * const *fields, int fields_cnt, void *client) +{ + cli_send(client, "ipoe:\r\n"); + cli_sendv(client," starting: %u\r\n", stat_starting); + cli_sendv(client," active: %u\r\n", stat_active); + + return CLI_CMD_OK; +} + +void __export ipoe_get_stat(unsigned int **starting, unsigned int **active) +{ + *starting = &stat_starting; + *active = &stat_active; +} + +static void __terminate(struct ap_session *ses) +{ + ap_session_terminate(ses, TERM_NAS_REQUEST, 0); +} + +static void ipoe_drop_sessions(struct ipoe_serv *serv, struct ipoe_session *skip) +{ + struct ipoe_session *ses; + + list_for_each_entry(ses, &serv->sessions, entry) { + if (ses == skip) + continue; + + if (ses->ses.state == AP_STATE_ACTIVE) + ap_session_ifdown(&ses->ses); + + triton_context_call(&ses->ctx, (triton_event_func)__terminate, &ses->ses); + } +} + +struct ipoe_serv *ipoe_find_serv(const char *ifname) +{ + struct ipoe_serv *serv; + + list_for_each_entry(serv, &serv_list, entry) { + if (strcmp(serv->ifname, ifname) == 0) + return serv; + } + + return NULL; +} + +static void add_interface(const char *ifname, int ifindex, const char *opt) +{ + char *str0, *str, *ptr1, *ptr2; + int end; + struct ipoe_serv *serv; + int opt_shared = conf_shared; + int opt_dhcpv4 = 0; + int opt_up = 0; + int opt_mode = conf_mode; + + str0 = strchr(opt, ','); + if (str0) { + str0 = _strdup(str0 + 1); + str = str0; + + while (1) { + for (ptr1 = str + 1; *ptr1 && *ptr1 != '='; ptr1++); + + if (!*ptr1) + goto parse_err; + + *ptr1 = 0; + + for (ptr2 = ++ptr1; *ptr2 && *ptr2 != ','; ptr2++); + + end = *ptr2 == 0; + + if (!end) + *ptr2 = 0; + + if (ptr2 == ptr1) + goto parse_err; + + if (strcmp(str, "start") == 0) { + if (!strcmp(ptr1, "up")) + opt_up = 1; + else if (!strcmp(ptr1, "dhcpv4")) + opt_dhcpv4 = 1; + else + goto parse_err; + } else if (strcmp(str, "shared") == 0) { + opt_shared = atoi(ptr1); + } else if (strcmp(str, "mode") == 0) { + if (!strcmp(ptr1, "L2")) + opt_mode = MODE_L2; + else if (!strcmp(ptr1, "L3")) + opt_mode = MODE_L3; + else + goto parse_err; + } else + goto parse_err; + + if (end) + break; + + str = ptr2 + 1; + } + + _free(str0); + } + + if (!opt_up && !opt_dhcpv4) { + opt_up = conf_up; + opt_dhcpv4 = conf_dhcpv4; + } + + list_for_each_entry(serv, &serv_list, entry) { + if (strcmp(ifname, serv->ifname)) + continue; + + serv->active = 1; + serv->ifindex = ifindex; + + if ((opt_shared && !serv->opt_shared) || (!opt_shared && serv->opt_shared)) { + ipoe_drop_sessions(serv, NULL); + serv->opt_shared = opt_shared; + } + + if (opt_dhcpv4 && !serv->dhcpv4) { + serv->dhcpv4 = dhcpv4_create(&serv->ctx, serv->ifname); + if (serv->dhcpv4) + serv->dhcpv4->recv = ipoe_recv_dhcpv4; + } else if (!opt_dhcpv4 && serv->dhcpv4) { + dhcpv4_free(serv->dhcpv4); + serv->dhcpv4 = NULL; + } + + serv->opt_up = opt_up; + serv->opt_mode = conf_mode; + + return; + } + + serv = _malloc(sizeof(*serv)); + memset(serv, 0, sizeof(*serv)); + serv->ctx.close = ipoe_serv_close; + serv->ifname = _strdup(ifname); + serv->ifindex = ifindex; + serv->opt_shared = opt_shared; + serv->opt_dhcpv4 = opt_dhcpv4; + serv->opt_up = opt_up; + serv->opt_mode = opt_mode; + serv->active = 1; + INIT_LIST_HEAD(&serv->sessions); + pthread_mutex_init(&serv->lock, NULL); + + triton_context_register(&serv->ctx, NULL); + + if (serv->opt_dhcpv4) { + serv->dhcpv4 = dhcpv4_create(&serv->ctx, serv->ifname); + if (serv->dhcpv4) + serv->dhcpv4->recv = ipoe_recv_dhcpv4; + } + + triton_context_wakeup(&serv->ctx); + + list_add_tail(&serv->entry, &serv_list); + + return; + +parse_err: + log_error("ipoe: failed to parse '%s'\n", opt); + _free(str0); +} + +static void load_interface(const char *opt) +{ + const char *ptr; + struct ifreq ifr; + + for (ptr = opt; *ptr && *ptr != ','; ptr++); + + if (ptr - opt >= sizeof(ifr.ifr_name)) + return; + + memcpy(ifr.ifr_name, opt, ptr - opt); + ifr.ifr_name[ptr - opt] = 0; + + if (ioctl(sock_fd, SIOCGIFINDEX, &ifr)) { + log_error("ipoe: '%s': ioctl(SIOCGIFINDEX): %s\n", ifr.ifr_name, strerror(errno)); + return; + } + + add_interface(ifr.ifr_name, ifr.ifr_ifindex, opt); +} + +static int __load_interface_re(int index, int flags, const char *name, struct iplink_arg *arg) +{ + if (pcre_exec(arg->re, NULL, name, strlen(name), 0, 0, NULL, 0) < 0) + return 0; + + add_interface(name, index, arg->opt); + + return 0; +} + +static void load_interface_re(const char *opt) +{ + pcre *re = NULL; + const char *pcre_err; + char *pattern; + const char *ptr; + int pcre_offset; + struct iplink_arg arg; + + for (ptr = opt; *ptr && *ptr != ','; ptr++); + + pattern = _malloc(ptr - (opt + 3) + 1); + memcpy(pattern, opt + 3, ptr - (opt + 3)); + pattern[ptr - (opt + 3)] = 0; + + re = pcre_compile2(pattern, 0, NULL, &pcre_err, &pcre_offset, NULL); + + if (!re) { + log_error("ipoe: %s at %i\r\n", pcre_err, pcre_offset); + return; + } + + arg.re = re; + arg.opt = opt; + + iplink_list((iplink_list_func)__load_interface_re, &arg); + + pcre_free(re); + _free(pattern); +} + +static void load_interfaces(struct conf_sect_t *sect) +{ + struct ipoe_serv *serv; + struct conf_option_t *opt; + struct list_head *pos, *n; + + list_for_each_entry(serv, &serv_list, entry) + serv->active = 0; + + list_for_each_entry(opt, §->items, entry) { + if (strcmp(opt->name, "interface")) + continue; + if (!opt->val) + continue; + + if (strlen(opt->val) > 3 && memcmp(opt->val, "re:", 3) == 0) + load_interface_re(opt->val); + else + load_interface(opt->val); + } + + list_for_each_safe(pos, n, &serv_list) { + serv = list_entry(pos, typeof(*serv), entry); + if (!serv->active) { + ipoe_drop_sessions(serv, NULL); + list_del(&serv->entry); + triton_context_call(&serv->ctx, (triton_event_func)ipoe_serv_close, &serv->ctx); + } + } +} + +static void parse_local_net(const char *opt) +{ + const char *ptr; + char str[17]; + in_addr_t addr; + int mask; + char *endptr; + + ptr = strchr(opt, '/'); + if (ptr) { + memcpy(str, opt, ptr - opt); + str[ptr - opt] = 0; + addr = inet_addr(str); + if (addr == INADDR_NONE) + goto out_err; + mask = strtoul(ptr + 1, &endptr, 10); + if (mask > 32) + goto out_err; + } else { + addr = inet_addr(opt); + if (addr == INADDR_NONE) + goto out_err; + mask = 24; + } + + mask = (1 << mask) - 1; + + ipoe_nl_add_net(addr & mask, mask); + + return; + +out_err: + log_error("ipoe: failed to parse 'local-net=%s'\n", opt); +} + +static void load_local_nets(struct conf_sect_t *sect) +{ + struct conf_option_t *opt; + + ipoe_nl_delete_nets(); + + list_for_each_entry(opt, §->items, entry) { + if (strcmp(opt->name, "local-net")) + continue; + if (!opt->val) + continue; + parse_local_net(opt->val); + } +} + +static void load_config(void) +{ + const char *opt; + struct conf_sect_t *s = conf_get_section("ipoe"); + struct conf_option_t *opt1; + + if (!s) + return; + + opt = conf_get_opt("ipoe", "username"); + if (opt) { + if (strcmp(opt, "ifname") == 0) + conf_username = USERNAME_IFNAME; +#ifdef USE_LUA + else if (strlen(opt) > 4 && memcmp(opt, "lua:", 4) == 0) { + conf_username = USERNAME_LUA; + conf_lua_username_func = opt + 4; + } +#endif + else + log_emerg("ipoe: unknown username value '%s'\n", opt); + } + + opt = conf_get_opt("ipoe", "gw-ip-address"); + if (opt) + conf_gw_address = inet_addr(opt); + else + conf_gw_address = 0; + + opt = conf_get_opt("ipoe", "netmask"); + if (opt) { + conf_netmask = atoi(opt); + if (conf_netmask <= 0 || conf_netmask > 32) { + log_error("ipoe: invalid netmask %s\n", opt); + conf_netmask = 0; + } + } else + conf_netmask = 0; + + opt = conf_get_opt("ipoe", "verbose"); + if (opt) + conf_verbose = atoi(opt); + + opt = conf_get_opt("ipoe", "lease-time"); + if (opt) + conf_lease_time = atoi(opt); + + opt = conf_get_opt("ipoe", "lease-timeout"); + if (opt) + conf_lease_timeout = atoi(opt); + + opt = conf_get_opt("ipoe", "unit-cache"); + if (opt) + conf_unit_cache = atoi(opt); + + opt = conf_get_opt("ipoe", "shared"); + if (opt) + conf_shared = atoi(opt); + else + conf_shared = 1; + + opt = conf_get_opt("ipoe", "mode"); + if (opt) { + if (!strcmp(opt, "L2")) + conf_mode = MODE_L2; + else if (!strcmp(opt, "L3")) + conf_mode = MODE_L3; + else + log_emerg("ipoe: failed to parse 'mode=%s'\n", opt); + } else + conf_mode = MODE_L2; + + conf_dhcpv4 = 0; + conf_up = 0; + + list_for_each_entry(opt1, &s->items, entry) { + if (strcmp(opt1->name, "start")) + continue; + if (!strcmp(opt1->val, "dhcpv4")) + conf_dhcpv4 = 1; + else if (!strcmp(opt1->val, "up")) + conf_up = 1; + } + + if (!conf_dhcpv4 && !conf_up) + conf_dhcpv4 = 1; + + load_interfaces(s); + load_local_nets(s); +} + +static void ipoe_init(void) +{ + ses_pool = mempool_create(sizeof(struct ipoe_session)); + uc_pool = mempool_create(sizeof(struct unit_cache)); + + load_config(); + + cli_register_simple_cmd2(show_stat_exec, NULL, 2, "show", "stat"); + + triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); +} + +DEFINE_INIT(20, ipoe_init); diff --git a/accel-pppd/ctrl/ipoe/ipoe.h b/accel-pppd/ctrl/ipoe/ipoe.h new file mode 100644 index 00000000..48bd631e --- /dev/null +++ b/accel-pppd/ctrl/ipoe/ipoe.h @@ -0,0 +1,81 @@ +#ifndef __IPOE_H +#define __IPOE_H + +#include <stdint.h> +#include <pthread.h> + +#include "triton.h" +#include "ap_session.h" +#include "dhcpv4.h" + +struct ipoe_serv +{ + struct list_head entry; + struct triton_context_t ctx; + char *ifname; + int ifindex; + int active; + struct list_head sessions; + struct dhcpv4_serv *dhcpv4; + pthread_mutex_t lock; + int opt_mode; + int opt_shared:1; + int opt_dhcpv4:1; + int opt_up:1; + int need_close:1; +}; + +struct dhcp_opt +{ + uint8_t len; + uint8_t data[0]; +}; + +struct ipoe_session +{ + struct list_head entry; + struct triton_context_t ctx; + struct triton_timer_t timer; + struct ipoe_serv *serv; + struct ap_ctrl ctrl; + struct ap_session ses; + uint8_t hwaddr[6]; + struct dhcp_opt *client_id; + struct dhcp_opt *agent_circuit_id; + struct dhcp_opt *agent_remote_id; + uint32_t xid; + uint32_t giaddr; + uint8_t *data; + struct dhcpv4_packet *dhcpv4_request; + int ifindex; +}; + +struct ipoe_session_info +{ + struct list_head entry; + int ifindex; + uint32_t addr; + uint32_t peer_addr; +}; + +#ifdef USE_LUA +int ipoe_lua_set_username(struct ipoe_session *, const char *func); +#endif + +struct iphdr; +struct ethhdr; + +void ipoe_recv_up(int ifindex, struct ethhdr *eth, struct iphdr *iph); +struct ipoe_session *ipoe_session_alloc(void); + +struct ipoe_serv *ipoe_find_serv(const char *ifname); + +void ipoe_nl_add_net(uint32_t addr, int mask); +void ipoe_nl_delete_nets(void); +int ipoe_nl_create(uint32_t peer_addr, uint32_t addr, const char *ifname, uint8_t *hwaddr); +void ipoe_nl_delete(int ifindex); +int ipoe_nl_modify(int ifindex, uint32_t peer_addr, uint32_t addr, const char *ifname, uint8_t *hwaddr); +void ipoe_nl_get_sessions(struct list_head *list); + +#endif + diff --git a/accel-pppd/ctrl/ipoe/ipoe_netlink.c b/accel-pppd/ctrl/ipoe/ipoe_netlink.c new file mode 100644 index 00000000..b92ec928 --- /dev/null +++ b/accel-pppd/ctrl/ipoe/ipoe_netlink.c @@ -0,0 +1,480 @@ +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <string.h> +#include <pthread.h> +#include <fcntl.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <net/ethernet.h> +#include <netinet/ip.h> +#include <arpa/inet.h> +#include <linux/if.h> +#include <linux/genetlink.h> + +#include "triton.h" +#include "log.h" +#include "genl.h" +#include "libnetlink.h" + +#include "ipoe.h" +#include "if_ipoe.h" + +#include "memdebug.h" + +#define PKT_ATTR_MAX 256 + +static struct rtnl_handle rth; +static struct triton_md_handler_t up_hnd; +static int ipoe_genl_id; + +void ipoe_nl_delete_nets(void) +{ + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + struct { + struct nlmsghdr n; + char buf[1024]; + } req; + + if (rth.fd == -1) + return; + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + nlh->nlmsg_type = ipoe_genl_id; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = IPOE_CMD_DEL_NET; + + addattr32(nlh, 1024, IPOE_ATTR_ADDR, 0); + + if (rtnl_talk(&rth, nlh, 0, 0, nlh, NULL, NULL, 0) < 0 ) + log_error("ipoe: nl_del_net: error talking to kernel\n"); +} + +void ipoe_nl_add_net(uint32_t addr, int mask) +{ + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + struct { + struct nlmsghdr n; + char buf[1024]; + } req; + + if (rth.fd == -1) + return; + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + nlh->nlmsg_type = ipoe_genl_id; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = IPOE_CMD_ADD_NET; + + addattr32(nlh, 1024, IPOE_ATTR_ADDR, addr); + addattr32(nlh, 1024, IPOE_ATTR_MASK, mask); + + if (rtnl_talk(&rth, nlh, 0, 0, nlh, NULL, NULL, 0) < 0 ) + log_error("ipoe: nl_add_net: error talking to kernel\n"); +} + +int ipoe_nl_create(uint32_t peer_addr, uint32_t addr, const char *ifname, uint8_t *hwaddr) +{ + struct rtnl_handle rth; + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + struct rtattr *tb[IPOE_ATTR_MAX + 1]; + struct rtattr *attrs; + int len; + int ret = -1; + struct { + struct nlmsghdr n; + char buf[1024]; + } req; + union { + uint8_t hwaddr[6]; + uint64_t u64; + } u; + + if (rtnl_open_byproto(&rth, 0, NETLINK_GENERIC)) { + log_ppp_error("ipoe: cannot open generic netlink socket\n"); + return -1; + } + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + nlh->nlmsg_type = ipoe_genl_id; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = IPOE_CMD_CREATE; + + if (peer_addr) + addattr32(nlh, 1024, IPOE_ATTR_PEER_ADDR, peer_addr); + + if (addr) + addattr32(nlh, 1024, IPOE_ATTR_ADDR, addr); + + if (hwaddr) { + memcpy(u.hwaddr, hwaddr, 6); + addattr_l(nlh, 1024, IPOE_ATTR_HWADDR, &u.u64, 8); + } + + if (ifname) + addattr_l(nlh, 1024, IPOE_ATTR_IFNAME, ifname, strlen(ifname) + 1); + + if (rtnl_talk(&rth, nlh, 0, 0, nlh, NULL, NULL, 0) < 0 ) + log_ppp_error("ipoe: nl_create: error talking to kernel\n"); + + if (nlh->nlmsg_type != ipoe_genl_id) { + log_ppp_error("ipoe: not a IPoE message %d\n", nlh->nlmsg_type); + goto out; + } + + ghdr = NLMSG_DATA(nlh); + + if (ghdr->cmd != IPOE_CMD_CREATE) { + log_ppp_error("ipoe: unknown IPoE command %d\n", ghdr->cmd); + goto out; + } + + len = nlh->nlmsg_len - NLMSG_LENGTH(GENL_HDRLEN); + + if (len < 0) { + log_ppp_error("ipoe: wrong IPoE message len %d\n", len); + goto out; + } + + attrs = (struct rtattr *)((char *)ghdr + GENL_HDRLEN); + parse_rtattr(tb, IPOE_ATTR_MAX, attrs, len); + + if (!tb[IPOE_ATTR_IFINDEX]) { + log_ppp_error("ipoe: missing IPOE_ATTR_IFINDEX attribute\n"); + goto out; + } + + ret = *(uint32_t *)(RTA_DATA(tb[IPOE_ATTR_IFINDEX])); + +out: + rtnl_close(&rth); + + return ret; +} + +int ipoe_nl_modify(int ifindex, uint32_t peer_addr, uint32_t addr, const char *ifname, uint8_t *hwaddr) +{ + struct rtnl_handle rth; + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + int ret = 0; + struct { + struct nlmsghdr n; + char buf[1024]; + } req; + union { + uint8_t hwaddr[6]; + uint64_t u64; + } u; + + if (rtnl_open_byproto(&rth, 0, NETLINK_GENERIC)) { + log_ppp_error("ipoe: cannot open generic netlink socket\n"); + return -1; + } + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + nlh->nlmsg_type = ipoe_genl_id; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = IPOE_CMD_MODIFY; + + addattr32(nlh, 1024, IPOE_ATTR_IFINDEX, ifindex); + addattr32(nlh, 1024, IPOE_ATTR_PEER_ADDR, peer_addr); + addattr32(nlh, 1024, IPOE_ATTR_ADDR, addr); + + if (hwaddr) { + memcpy(u.hwaddr, hwaddr, 6); + addattr_l(nlh, 1024, IPOE_ATTR_HWADDR, &u.u64, 8); + } + + if (ifname) + addattr_l(nlh, 1024, IPOE_ATTR_IFNAME, ifname, strlen(ifname) + 1); + + if (rtnl_talk(&rth, nlh, 0, 0, nlh, NULL, NULL, 0) < 0 ) { + log_ppp_error("ipoe: nl_create: error talking to kernel\n"); + ret = -1; + } + + rtnl_close(&rth); + + return ret; +} + +static int dump_session(const struct sockaddr_nl *addr, struct nlmsghdr *n, void *arg) +{ + struct list_head *list = arg; + struct ipoe_session_info *info; + struct rtattr *tb[IPOE_ATTR_MAX + 1]; + struct genlmsghdr *ghdr = NLMSG_DATA(n); + int len = n->nlmsg_len; + struct rtattr *attrs; + + if (ghdr->cmd != IPOE_CMD_GET) { + log_error("ipoe: dump_session: got unexpected command %d\n", ghdr->cmd); + return 0; + } + + len -= NLMSG_LENGTH(GENL_HDRLEN); + if (len < 0 ) { + log_error("ipoe: dump_session: wrong message length %i\n", len); + return -1; + } + + attrs = (struct rtattr *)((char *)ghdr + GENL_HDRLEN); + parse_rtattr(tb, IPOE_ATTR_MAX, attrs, len); + + info = _malloc(sizeof(*info)); + if (!info) { + log_emerg("out of memory\n"); + return -1; + } + + memset(info, 0, sizeof(*info)); + + if (tb[IPOE_ATTR_IFINDEX]) + info->ifindex = *(uint32_t *)(RTA_DATA(tb[IPOE_ATTR_IFINDEX])); + else { + log_error("ipoe: dump_session: IPOE_ATTR_IFINDEX is absent\n"); + _free(info); + return 0; + } + + if (tb[IPOE_ATTR_ADDR]) + info->addr = *(uint32_t *)(RTA_DATA(tb[IPOE_ATTR_ADDR])); + + if (tb[IPOE_ATTR_PEER_ADDR]) + info->peer_addr = *(uint32_t *)(RTA_DATA(tb[IPOE_ATTR_PEER_ADDR])); + + list_add_tail(&info->entry, list); + + return 0; +} + +void ipoe_nl_get_sessions(struct list_head *list) +{ + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + struct { + struct nlmsghdr n; + char buf[1024]; + } req; + + if (rth.fd == -1) + return; + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST; + nlh->nlmsg_type = ipoe_genl_id; + nlh->nlmsg_seq = rth.dump = ++rth.seq; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = IPOE_CMD_GET; + + if (rtnl_send(&rth, (char *)nlh, nlh->nlmsg_len) < 0) { + log_emerg("ipoe: failed to send dump request: %s\n", strerror(errno)); + return; + } + + rtnl_dump_filter(&rth, dump_session, list, NULL, NULL); +} + +void ipoe_nl_delete(int ifindex) +{ + struct rtnl_handle rth; + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + struct { + struct nlmsghdr n; + char buf[1024]; + } req; + + if (rtnl_open_byproto(&rth, 0, NETLINK_GENERIC)) { + log_ppp_error("ipoe: cannot open generic netlink socket\n"); + return; + } + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + nlh->nlmsg_type = ipoe_genl_id; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = IPOE_CMD_DELETE; + + addattr32(nlh, 128, IPOE_ATTR_IFINDEX, ifindex); + + if (rtnl_talk(&rth, nlh, 0, 0, nlh, NULL, NULL, 0) < 0 ) + log_ppp_error("ipoe: nl_delete: error talking to kernel\n"); + + rtnl_close(&rth); +} + +static void ipoe_up_handler(const struct sockaddr_nl *addr, struct nlmsghdr *h) +{ + struct rtattr *tb[PKT_ATTR_MAX + 1]; + struct rtattr *tb2[IPOE_ATTR_MAX + 1]; + struct genlmsghdr *ghdr = NLMSG_DATA(h); + int len = h->nlmsg_len; + struct rtattr *attrs; + int i; + int ifindex; + struct iphdr *iph; + struct ethhdr *eth; + + if (ghdr->cmd != IPOE_REP_PKT) + return; + + len -= NLMSG_LENGTH(GENL_HDRLEN); + + if (len < 0) { + log_warn("ipoe: wrong controller message length %d\n", len); + return; + } + + attrs = (struct rtattr *)((char *)ghdr + GENL_HDRLEN); + parse_rtattr(tb, PKT_ATTR_MAX, attrs, len); + + for (i = 1; i < PKT_ATTR_MAX; i++) { + if (!tb[i]) + break; + + parse_rtattr_nested(tb2, IPOE_ATTR_MAX, tb[i]); + + if (!tb2[IPOE_ATTR_ETH_HDR] || !tb2[IPOE_ATTR_IP_HDR] || !tb2[IPOE_ATTR_IFINDEX]) + continue; + + ifindex = *(uint32_t *)(RTA_DATA(tb2[IPOE_ATTR_IFINDEX])); + iph = (struct iphdr *)(RTA_DATA(tb2[IPOE_ATTR_IP_HDR])); + eth = (struct ethhdr *)(RTA_DATA(tb2[IPOE_ATTR_ETH_HDR])); + + ipoe_recv_up(ifindex, eth, iph); + } +} + +static int ipoe_up_read(struct triton_md_handler_t *h) +{ + int status; + struct nlmsghdr *hdr; + struct sockaddr_nl nladdr; + struct iovec iov; + struct msghdr msg = { + .msg_name = &nladdr, + .msg_namelen = sizeof(nladdr), + .msg_iov = &iov, + .msg_iovlen = 1, + }; + char buf[8192]; + + memset(&nladdr, 0, sizeof(nladdr)); + nladdr.nl_family = AF_NETLINK; + nladdr.nl_pid = 0; + nladdr.nl_groups = 0; + + iov.iov_base = buf; + while (1) { + iov.iov_len = sizeof(buf); + status = recvmsg(h->fd, &msg, 0); + + if (status < 0) { + if (errno == EAGAIN) + break; + log_error("ipoe: netlink error: %s\n", strerror(errno)); + if (errno == ENOBUFS) + continue; + return 0; + } + if (status == 0) { + log_error("ipoe: EOF on netlink\n"); + return 0; + } + if (msg.msg_namelen != sizeof(nladdr)) { + log_error("ipoe: netlink sender address length == %d\n", msg.msg_namelen); + return 0; + } + for (hdr = (struct nlmsghdr*)buf; status >= sizeof(*hdr); ) { + int len = hdr->nlmsg_len; + int l = len - sizeof(*h); + + if (l<0 || len>status) { + if (msg.msg_flags & MSG_TRUNC) { + log_warn("ipoe: truncated netlink message\n"); + continue; + } + log_error("ipoe: malformed netlink message\n"); + continue; + } + + ipoe_up_handler(&nladdr, hdr); + + status -= NLMSG_ALIGN(len); + hdr = (struct nlmsghdr*)((char*)hdr + NLMSG_ALIGN(len)); + } + if (msg.msg_flags & MSG_TRUNC) { + log_warn("ipoe: netlink message truncated\n"); + continue; + } + if (status) { + log_error("ipoe: netlink remnant of size %d\n", status); + return 0; + } + } + + return 0; +} + +static void ipoe_up_close(struct triton_context_t *ctx) +{ + triton_md_unregister_handler(&up_hnd); + triton_context_unregister(ctx); +} + +static struct triton_context_t up_ctx = { + .close = ipoe_up_close, +}; + +static struct triton_md_handler_t up_hnd = { + .read = ipoe_up_read, +}; + +static void init(void) +{ + int mcg_id = genl_resolve_mcg(IPOE_GENL_NAME, IPOE_GENL_MCG_PKT, &ipoe_genl_id); + if (mcg_id == -1) { + log_warn("ipoe: unclassified packet handling is disabled\n"); + rth.fd = -1; + return; + } + + if (rtnl_open_byproto(&rth, 1 << (mcg_id - 1), NETLINK_GENERIC)) { + log_error("ipoe: cannot open generic netlink socket\n"); + rth.fd = -1; + return; + } + + fcntl(rth.fd, F_SETFL, O_NONBLOCK); + fcntl(rth.fd, F_SETFD, fcntl(rth.fd, F_GETFD) | FD_CLOEXEC); + + triton_context_register(&up_ctx, NULL); + up_hnd.fd = rth.fd; + triton_md_register_handler(&up_ctx, &up_hnd); + triton_md_enable_handler(&up_hnd, MD_MODE_READ); + triton_context_wakeup(&up_ctx); +} + +DEFINE_INIT(19, init); diff --git a/accel-pppd/ctrl/ipoe/lua.c b/accel-pppd/ctrl/ipoe/lua.c new file mode 100644 index 00000000..0b24635f --- /dev/null +++ b/accel-pppd/ctrl/ipoe/lua.c @@ -0,0 +1,286 @@ +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <pthread.h> + +/* Include the Lua API header files. */ +#include <lua.h> +#include <lauxlib.h> +#include <lualib.h> + +#include "events.h" +#include "log.h" +#include "utils.h" + +#include "ipoe.h" + +#include "memdebug.h" + +#define IPOE_PACKET4 "ipoe.packet4" + +static const char *conf_filename; +static int serial; +static int file_error; + +static __thread lua_State *L; +static __thread int __serial; +static pthread_key_t __key; + +static int packet4_hdr(lua_State *L); +static int packet4_ifname(lua_State *L); +static int packet4_option(lua_State *L); +static int packet4_options(lua_State *L); +static int packet4_agent_circuit_id(lua_State *L); +static int packet4_agent_remote_id(lua_State *L); + +int luaopen_lpack(lua_State *L); + +static const struct luaL_reg packet4_lib [] = { + {"hdr", packet4_hdr}, + {"ifname", packet4_ifname}, + {"option", packet4_option}, + {"options", packet4_options}, + {"agent_circuit_id", packet4_agent_circuit_id}, + {"agent_remote_id", packet4_agent_remote_id}, + {NULL, NULL} +}; + +static int luaopen_packet4(lua_State *L) +{ + luaL_newmetatable(L, IPOE_PACKET4); + + lua_pushstring(L, "__index"); + lua_pushvalue(L, -2); /* pushes the metatable */ + lua_settable(L, -3); /* metatable.__index = metatable */ + + + luaI_openlib(L, NULL, packet4_lib, 0); + + luaI_openlib(L, "packet4", packet4_lib, 0); + + return 1; +} + +static int packet4_hdr(lua_State *L) +{ + struct ipoe_session *ses = luaL_checkudata(L, 1, IPOE_PACKET4); + const char *name = luaL_checkstring(L, 2); + char str[20]; + uint8_t *ptr; + + if (!ses) + return 0; + + if (!strcmp(name, "xid")) + lua_pushinteger(L, ses->dhcpv4_request->hdr->xid); + else if (!strcmp(name, "ciaddr")) { + u_inet_ntoa(ses->dhcpv4_request->hdr->ciaddr, str); + lua_pushstring(L, str); + } else if (!strcmp(name, "giaddr")) { + u_inet_ntoa(ses->dhcpv4_request->hdr->giaddr, str); + lua_pushstring(L, str); + } else if (!strcmp(name, "chaddr")) { + ptr = ses->dhcpv4_request->hdr->chaddr; + sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x", + ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5]); + lua_pushstring(L, str); + } + + return 1; +} + +static int packet4_ifname(lua_State *L) +{ + struct ipoe_session *ses = luaL_checkudata(L, 1, IPOE_PACKET4); + + if (!ses) + return 0; + + lua_pushstring(L, ses->serv->ifname); + + return 1; +} + +static int packet4_option(lua_State *L) +{ + struct ipoe_session *ses = luaL_checkudata(L, 1, IPOE_PACKET4); + int type = luaL_checkinteger(L, 2); + struct dhcpv4_option *opt; + + list_for_each_entry(opt, &ses->dhcpv4_request->options, entry) { + if (opt->type == type) { + lua_pushlstring(L, (char *)opt->data, opt->len); + return 1; + } + } + + lua_pushnil(L); + + return 1; +} + +static int packet4_options(lua_State *L) +{ + struct ipoe_session *ses = luaL_checkudata(L, 1, IPOE_PACKET4); + struct dhcpv4_option *opt; + int i = 1; + + if (!ses) + return 0; + + lua_newtable(L); + + list_for_each_entry(opt, &ses->dhcpv4_request->options, entry) { + lua_pushinteger(L, opt->type); + lua_rawseti(L, -2, i++); + } + + return 1; +} + +static int packet4_agent_circuit_id(lua_State *L) +{ + struct ipoe_session *ses = luaL_checkudata(L, 1, IPOE_PACKET4); + + if (!ses) + return 0; + + if (ses->agent_circuit_id) + lua_pushlstring(L, (char *)ses->agent_circuit_id->data, ses->agent_circuit_id->len); + else + lua_pushnil(L); + + return 1; +} + +static int packet4_agent_remote_id(lua_State *L) +{ + struct ipoe_session *ses = luaL_checkudata(L, 1, IPOE_PACKET4); + + if (!ses) + return 0; + + if (ses->agent_remote_id) + lua_pushlstring(L, (char *)ses->agent_remote_id->data, ses->agent_remote_id->len); + else + lua_pushnil(L); + + return 1; +} + +static void init_lua() +{ + __serial = serial; + + L = lua_open(); + + luaL_openlibs(L); + + luaopen_lpack(L); + luaopen_packet4(L); + + if (luaL_loadfile(L, conf_filename)) + goto out_err; + + if (lua_pcall(L, 0, 0, 0)) + goto out_err; + + lua_pushlightuserdata(L, L); + luaL_getmetatable(L, IPOE_PACKET4); + lua_setmetatable(L, -2); + + lua_settop(L, 0); + + file_error = 0; + + pthread_setspecific(__key, L); + + return; + +out_err: + file_error = 1; + log_ppp_error("ipoe: lua: %s\n", lua_tostring(L, -1)); + lua_close(L); + L = NULL; +} + +/*static void stackDump (lua_State *L) { + int i=lua_gettop(L); + printf(" ---------------- Stack Dump ----------------" ); + while( i ) { + int t = lua_type(L, i); + switch (t) { + case LUA_TSTRING: + printf("%d:`%s'\n", i, lua_tostring(L, i)); + break; + case LUA_TBOOLEAN: + printf("%d: %s\n",i,lua_toboolean(L, i) ? "true" : "false"); + break; + case LUA_TNUMBER: + printf("%d: %g\n", i, lua_tonumber(L, i)); + break; + default: printf("%d: %s\n", i, lua_typename(L, t)); break; + } + i--; + } + printf("--------------- Stack Dump Finished ---------------" ); + }*/ + + +int ipoe_lua_set_username(struct ipoe_session *ses, const char *func) +{ + if (file_error && serial == __serial) + return -1; + + if (L && serial != __serial) { + lua_close(L); + init_lua(); + } else if (!L) + init_lua(); + + if (!L) + return -1; + + lua_getglobal(L, func); + lua_pushlightuserdata(L, ses); + + if (lua_pcall(L, 1, 1, 0)) { + log_ppp_error("ipoe: lua: %s\n", lua_tostring(L, -1)); + goto out_err; + } + + if (!lua_isstring(L, -1)) { + log_ppp_error("ipoe: lua: function '%s' must return a string\n", func); + goto out_err; + } + + ses->ses.username = _strdup(lua_tostring(L, -1)); + + lua_settop(L, 0); + + return 0; + +out_err: + file_error = 1; + lua_close(L); + L = NULL; + return -1; +} + +static void load_config() +{ + conf_filename = conf_get_opt("ipoe", "lua-file"); + + serial++; +} + +static void init() +{ + load_config(); + + pthread_key_create(&__key, (void (*)(void *))lua_close); + + triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); +} + +DEFINE_INIT(100, init); diff --git a/accel-pppd/ctrl/ipoe/lua_lpack.c b/accel-pppd/ctrl/ipoe/lua_lpack.c new file mode 100644 index 00000000..0dc3046d --- /dev/null +++ b/accel-pppd/ctrl/ipoe/lua_lpack.c @@ -0,0 +1,271 @@ +/* +* lpack.c +* a Lua library for packing and unpacking binary data +* Luiz Henrique de Figueiredo <lhf@tecgraf.puc-rio.br> +* 29 Jun 2007 19:27:20 +* This code is hereby placed in the public domain. +* with contributions from Ignacio Castaño <castanyo@yahoo.es> and +* Roberto Ierusalimschy <roberto@inf.puc-rio.br>. +*/ + +#define OP_ZSTRING 'z' /* zero-terminated string */ +#define OP_BSTRING 'p' /* string preceded by length byte */ +#define OP_WSTRING 'P' /* string preceded by length word */ +#define OP_SSTRING 'a' /* string preceded by length size_t */ +#define OP_STRING 'A' /* string */ +#define OP_FLOAT 'f' /* float */ +#define OP_DOUBLE 'd' /* double */ +#define OP_NUMBER 'n' /* Lua number */ +#define OP_CHAR 'c' /* char */ +#define OP_BYTE 'b' /* byte = unsigned char */ +#define OP_SHORT 'h' /* short */ +#define OP_USHORT 'H' /* unsigned short */ +#define OP_INT 'i' /* int */ +#define OP_UINT 'I' /* unsigned int */ +#define OP_LONG 'l' /* long */ +#define OP_ULONG 'L' /* unsigned long */ +#define OP_LITTLEENDIAN '<' /* little endian */ +#define OP_BIGENDIAN '>' /* big endian */ +#define OP_NATIVE '=' /* native endian */ + +#include <ctype.h> +#include <stdint.h> +#include <string.h> + +#include "lua.h" +#include "lualib.h" +#include "lauxlib.h" + +static void badcode(lua_State *L, int c) +{ + char s[]="bad code `?'"; + s[sizeof(s)-3]=c; + luaL_argerror(L,1,s); +} + +static int doendian(int c) +{ + int x=1; + int e=*(char*)&x; + if (c==OP_LITTLEENDIAN) return !e; + if (c==OP_BIGENDIAN) return e; + if (c==OP_NATIVE) return 0; + return 0; +} + +static void doswap(int swap, void *p, size_t n) +{ + if (swap) + { + char *a=p; + int i,j; + for (i=0, j=n-1, n=n/2; n--; i++, j--) + { + char t=a[i]; a[i]=a[j]; a[j]=t; + } + } +} + +#define UNPACKNUMBER(OP,T) \ + case OP: \ + { \ + T a; \ + int m=sizeof(a); \ + if (i+m>len) goto done; \ + memcpy(&a,s+i,m); \ + i+=m; \ + doswap(swap,&a,m); \ + lua_pushinteger(L,(lua_Integer)a); \ + ++n; \ + break; \ + } + +#define UNPACKSTRING(OP,T) \ + case OP: \ + { \ + T l; \ + int m=sizeof(l); \ + if (i+m>len) goto done; \ + memcpy(&l,s+i,m); \ + doswap(swap,&l,m); \ + if (i+m+l>len) goto done; \ + i+=m; \ + lua_pushlstring(L,s+i,l); \ + i+=l; \ + ++n; \ + break; \ + } + +static int l_unpack(lua_State *L) /** unpack(s,f,[init]) */ +{ + size_t len; + const char *s=luaL_checklstring(L,1,&len); + const char *f=luaL_checkstring(L,2); + int i=luaL_optnumber(L,3,1)-1; + int n=0; + int swap=0; + lua_pushnil(L); + while (*f) + { + int c=*f++; + int N=1; + if (isdigit(*f)) + { + N=0; + while (isdigit(*f)) N=10*N+(*f++)-'0'; + if (N==0 && c==OP_STRING) { lua_pushliteral(L,""); ++n; } + } + while (N--) switch (c) + { + case OP_LITTLEENDIAN: + case OP_BIGENDIAN: + case OP_NATIVE: + { + swap=doendian(c); + N=0; + break; + } + case OP_STRING: + { + ++N; + if (i+N>len) goto done; + lua_pushlstring(L,s+i,N); + i+=N; + ++n; + N=0; + break; + } + case OP_ZSTRING: + { + size_t l; + if (i>=len) goto done; + l=strlen(s+i); + lua_pushlstring(L,s+i,l); + i+=l+1; + ++n; + break; + } + UNPACKSTRING(OP_BSTRING, unsigned char) + UNPACKSTRING(OP_WSTRING, unsigned short) + UNPACKSTRING(OP_SSTRING, size_t) + UNPACKNUMBER(OP_NUMBER, lua_Number) + UNPACKNUMBER(OP_DOUBLE, double) + UNPACKNUMBER(OP_FLOAT, float) + UNPACKNUMBER(OP_CHAR, int8_t) + UNPACKNUMBER(OP_BYTE, uint8_t) + UNPACKNUMBER(OP_SHORT, int16_t) + UNPACKNUMBER(OP_USHORT, uint16_t) + UNPACKNUMBER(OP_INT, int32_t) + UNPACKNUMBER(OP_UINT, uint32_t) + UNPACKNUMBER(OP_LONG, int64_t) + UNPACKNUMBER(OP_ULONG, uint64_t) + case ' ': case ',': + break; + default: + badcode(L,c); + break; + } + } +done: + lua_pushnumber(L,i+1); + lua_replace(L,-n-2); + return n+1; +} + +#define PACKNUMBER(OP,T) \ + case OP: \ + { \ + T a=(T)luaL_checknumber(L,i++); \ + doswap(swap,&a,sizeof(a)); \ + luaL_addlstring(&b,(void*)&a,sizeof(a)); \ + break; \ + } + +#define PACKSTRING(OP,T) \ + case OP: \ + { \ + size_t l; \ + const char *a=luaL_checklstring(L,i++,&l); \ + T ll=(T)l; \ + doswap(swap,&ll,sizeof(ll)); \ + luaL_addlstring(&b,(void*)&ll,sizeof(ll)); \ + luaL_addlstring(&b,a,l); \ + break; \ + } + +static int l_pack(lua_State *L) /** pack(f,...) */ +{ + int i=2; + const char *f=luaL_checkstring(L,1); + int swap=0; + luaL_Buffer b; + luaL_buffinit(L,&b); + while (*f) + { + int c=*f++; + int N=1; + if (isdigit(*f)) + { + N=0; + while (isdigit(*f)) N=10*N+(*f++)-'0'; + } + while (N--) switch (c) + { + case OP_LITTLEENDIAN: + case OP_BIGENDIAN: + case OP_NATIVE: + { + swap=doendian(c); + N=0; + break; + } + case OP_STRING: + case OP_ZSTRING: + { + size_t l; + const char *a=luaL_checklstring(L,i++,&l); + luaL_addlstring(&b,a,l+(c==OP_ZSTRING)); + break; + } + PACKSTRING(OP_BSTRING, unsigned char) + PACKSTRING(OP_WSTRING, unsigned short) + PACKSTRING(OP_SSTRING, size_t) + PACKNUMBER(OP_NUMBER, lua_Number) + PACKNUMBER(OP_DOUBLE, double) + PACKNUMBER(OP_FLOAT, float) + PACKNUMBER(OP_CHAR, int8_t) + PACKNUMBER(OP_BYTE, uint8_t) + PACKNUMBER(OP_SHORT, int16_t) + PACKNUMBER(OP_USHORT, uint16_t) + PACKNUMBER(OP_INT, int32_t) + PACKNUMBER(OP_UINT, uint32_t) + PACKNUMBER(OP_LONG, int64_t) + PACKNUMBER(OP_ULONG, uint64_t) + case ' ': case ',': + break; + default: + badcode(L,c); + break; + } + } + luaL_pushresult(&b); + return 1; +} + +static const luaL_reg R[] = +{ + {"pack", l_pack}, + {"unpack", l_unpack}, + {NULL, NULL} +}; + +int luaopen_lpack(lua_State *L) +{ +#ifdef USE_GLOBALS + lua_register(L,"bpack",l_pack); + lua_register(L,"bunpack",l_unpack); +#else + luaI_openlib(L, LUA_STRLIBNAME, R, 0); +#endif + return 0; +} diff --git a/accel-pppd/ctrl/l2tp/l2tp.c b/accel-pppd/ctrl/l2tp/l2tp.c index d8a98f8d..0ad8649c 100644 --- a/accel-pppd/ctrl/l2tp/l2tp.c +++ b/accel-pppd/ctrl/l2tp/l2tp.c @@ -94,7 +94,7 @@ struct l2tp_conn_t int state1; int state2; - struct ppp_ctrl_t ctrl; + struct ap_ctrl ctrl; struct ppp_t ppp; }; @@ -130,7 +130,7 @@ static void l2tp_disconnect(struct l2tp_conn_t *conn) if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_FIN; - ppp_terminate(&conn->ppp, TERM_USER_REQUEST, 1); + ap_session_terminate(&conn->ppp.ses, TERM_USER_REQUEST, 1); } else if (conn->state != STATE_FIN) __sync_sub_and_fetch(&stat_starting, 1); @@ -144,7 +144,7 @@ static void l2tp_disconnect(struct l2tp_conn_t *conn) if (conn->tunnel_fd != -1) close(conn->tunnel_fd); - triton_event_fire(EV_CTRL_FINISHED, &conn->ppp); + triton_event_fire(EV_CTRL_FINISHED, &conn->ppp.ses); log_ppp_info1("disconnected\n"); @@ -156,8 +156,8 @@ static void l2tp_disconnect(struct l2tp_conn_t *conn) l2tp_packet_free(pack); } - if (conn->ppp.chan_name) - _free(conn->ppp.chan_name); + if (conn->ppp.ses.chan_name) + _free(conn->ppp.ses.chan_name); if (conn->challenge_len) _free(conn->challenge.octets); _free(conn->ctrl.calling_station_id); @@ -193,8 +193,9 @@ out_err: return -1; } -static void l2tp_ppp_started(struct ppp_t *ppp) +static void l2tp_ppp_started(struct ap_session *ses) { + struct ppp_t *ppp = container_of(ses, typeof(*ppp), ses); struct l2tp_conn_t *conn = container_of(ppp, typeof(*conn), ppp); log_ppp_debug("l2tp: ppp started\n"); @@ -203,8 +204,9 @@ static void l2tp_ppp_started(struct ppp_t *ppp) triton_timer_add(&conn->ctx, &conn->hello_timer, 0); } -static void l2tp_ppp_finished(struct ppp_t *ppp) +static void l2tp_ppp_finished(struct ap_session *ses) { + struct ppp_t *ppp = container_of(ses, typeof(*ppp), ses); struct l2tp_conn_t *conn = container_of(ppp, typeof(*conn), ppp); log_ppp_debug("l2tp: ppp finished\n"); @@ -223,7 +225,7 @@ static void l2tp_conn_close(struct triton_context_t *ctx) if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_FIN; - ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 1); + ap_session_terminate(&conn->ppp.ses, TERM_ADMIN_RESET, 1); } if (l2tp_terminate(conn, 0, 0)) @@ -343,11 +345,11 @@ static int l2tp_tunnel_alloc(struct l2tp_serv_t *serv, struct l2tp_packet_t *pac u_inet_ntoa(addr.sin_addr.s_addr, conn->ctrl.called_station_id); ppp_init(&conn->ppp); - conn->ppp.ctrl = &conn->ctrl; + conn->ppp.ses.ctrl = &conn->ctrl; conn->ppp.fd = -1; conn->tunnel_fd = -1; - triton_context_register(&conn->ctx, &conn->ppp); + triton_context_register(&conn->ctx, &conn->ppp.ses); triton_md_register_handler(&conn->ctx, &conn->hnd); triton_md_enable_handler(&conn->hnd, MD_MODE_READ); triton_context_wakeup(&conn->ctx); @@ -419,9 +421,9 @@ static int l2tp_connect(struct l2tp_conn_t *conn) return -1; } - conn->ppp.chan_name = _strdup(inet_ntoa(conn->addr.sin_addr)); + conn->ppp.ses.chan_name = _strdup(inet_ntoa(conn->addr.sin_addr)); - triton_event_fire(EV_CTRL_STARTED, &conn->ppp); + triton_event_fire(EV_CTRL_STARTED, &conn->ppp.ses); if (establish_ppp(&conn->ppp)) return -1; @@ -654,7 +656,7 @@ static int l2tp_recv_SCCRQ(struct l2tp_serv_t *serv, struct l2tp_packet_t *pack, struct l2tp_attr_t *router_id = NULL; struct l2tp_attr_t *challenge = NULL; - if (ppp_shutdown) + if (ap_shutdown) return 0; if (triton_module_loaded("connlimit") && connlimit_check(cl_key_from_ipv4(pack->addr.sin_addr.s_addr))) @@ -863,7 +865,7 @@ static int l2tp_recv_CDN(struct l2tp_conn_t *conn, struct l2tp_packet_t *pack) if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_FIN; - ppp_terminate(&conn->ppp, TERM_USER_REQUEST, 1); + ap_session_terminate(&conn->ppp.ses, TERM_USER_REQUEST, 1); } if (l2tp_terminate(conn, 0, 0)) diff --git a/accel-pppd/ctrl/pppoe/pppoe.c b/accel-pppd/ctrl/pppoe/pppoe.c index f616ade4..2659a6f5 100644 --- a/accel-pppd/ctrl/pppoe/pppoe.c +++ b/accel-pppd/ctrl/pppoe/pppoe.c @@ -48,7 +48,7 @@ struct pppoe_conn_t struct pppoe_tag *tr101; uint8_t cookie[COOKIE_LENGTH]; - struct ppp_ctrl_t ctrl; + struct ap_ctrl ctrl; struct ppp_t ppp; #ifdef RADIUS struct rad_plugin_t radius; @@ -112,7 +112,7 @@ static void disconnect(struct pppoe_conn_t *conn) if (conn->ppp_started) { dpado_check_prev(__sync_fetch_and_sub(&stat_active, 1)); conn->ppp_started = 0; - ppp_terminate(&conn->ppp, TERM_USER_REQUEST, 1); + ap_session_terminate(&conn->ppp.ses, TERM_USER_REQUEST, 1); } pppoe_send_PADT(conn); @@ -120,7 +120,7 @@ static void disconnect(struct pppoe_conn_t *conn) close(conn->disc_sock); - triton_event_fire(EV_CTRL_FINISHED, &conn->ppp); + triton_event_fire(EV_CTRL_FINISHED, &conn->ppp.ses); log_ppp_info1("disconnected\n"); @@ -149,13 +149,14 @@ static void disconnect(struct pppoe_conn_t *conn) mempool_free(conn); } -static void ppp_started(struct ppp_t *ppp) +static void ppp_started(struct ap_session *ses) { log_ppp_debug("pppoe: ppp started\n"); } -static void ppp_finished(struct ppp_t *ppp) +static void ppp_finished(struct ap_session *ses) { + struct ppp_t *ppp = container_of(ses, typeof(*ppp), ses); struct pppoe_conn_t *conn = container_of(ppp, typeof(*conn), ppp); log_ppp_debug("pppoe: ppp finished\n"); @@ -172,7 +173,7 @@ static void pppoe_conn_close(struct triton_context_t *ctx) struct pppoe_conn_t *conn = container_of(ctx, typeof(*conn), ctx); if (conn->ppp_started) - ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 0); + ap_session_terminate(&conn->ppp.ses, TERM_ADMIN_RESET, 0); else disconnect(conn); } @@ -261,6 +262,7 @@ static struct pppoe_conn_t *allocate_channel(struct pppoe_serv_t *serv, const ui conn->ctrl.ctx = &conn->ctx; conn->ctrl.started = ppp_started; conn->ctrl.finished = ppp_finished; + conn->ctrl.terminate = ppp_terminate; conn->ctrl.max_mtu = MAX_PPPOE_MTU; conn->ctrl.type = CTRL_TYPE_PPPOE; conn->ctrl.name = "pppoe"; @@ -285,14 +287,14 @@ static struct pppoe_conn_t *allocate_channel(struct pppoe_serv_t *serv, const ui ppp_init(&conn->ppp); - conn->ppp.ctrl = &conn->ctrl; - conn->ppp.chan_name = conn->ctrl.calling_station_id; + conn->ppp.ses.ctrl = &conn->ctrl; + conn->ppp.ses.chan_name = conn->ctrl.calling_station_id; - triton_context_register(&conn->ctx, &conn->ppp); + triton_context_register(&conn->ctx, &conn->ppp.ses); triton_context_wakeup(&conn->ctx); - triton_event_fire(EV_CTRL_STARTING, &conn->ppp); - triton_event_fire(EV_CTRL_STARTED, &conn->ppp); + triton_event_fire(EV_CTRL_STARTING, &conn->ppp.ses); + triton_event_fire(EV_CTRL_STARTED, &conn->ppp.ses); conn->disc_sock = dup(serv->hnd.fd); @@ -334,7 +336,7 @@ static void connect_channel(struct pppoe_conn_t *conn) if (conn->tr101 && triton_module_loaded("radius")) { conn->radius.send_access_request = pppoe_rad_send_access_request; conn->radius.send_accounting_request = pppoe_rad_send_accounting_request; - rad_register_plugin(&conn->ppp, &conn->radius); + rad_register_plugin(&conn->ppp.ses, &conn->radius); } #endif @@ -704,7 +706,7 @@ static void pado_timer(struct triton_timer_t *t) { struct delayed_pado_t *pado = container_of(t, typeof(*pado), timer); - if (!ppp_shutdown) + if (!ap_shutdown) pppoe_send_PADO(pado->serv, pado->addr, pado->host_uniq, pado->relay_sid, pado->service_name); free_delayed_pado(pado); @@ -774,7 +776,7 @@ static void pppoe_recv_PADI(struct pppoe_serv_t *serv, uint8_t *pack, int size) __sync_add_and_fetch(&stat_PADI_recv, 1); - if (ppp_shutdown || pado_delay == -1) + if (ap_shutdown || pado_delay == -1) return; if (check_padi_limit(serv, ethhdr->h_source)) { @@ -886,7 +888,7 @@ static void pppoe_recv_PADR(struct pppoe_serv_t *serv, uint8_t *pack, int size) __sync_add_and_fetch(&stat_PADR_recv, 1); - if (ppp_shutdown) + if (ap_shutdown) return; if (!memcmp(ethhdr->h_dest, bc_addr, ETH_ALEN)) { @@ -972,7 +974,7 @@ static void pppoe_recv_PADR(struct pppoe_serv_t *serv, uint8_t *pack, int size) pthread_mutex_lock(&serv->lock); conn = find_channel(serv, (uint8_t *)ac_cookie_tag->tag_data); - if (conn && !conn->ppp.username) { + if (conn && !conn->ppp.ses.username) { __sync_add_and_fetch(&stat_PADR_dup_recv, 1); pppoe_send_PADS(conn); } @@ -1277,7 +1279,7 @@ out_err: static void _conn_stop(struct pppoe_conn_t *conn) { - ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 0); + ap_session_terminate(&conn->ppp.ses, TERM_ADMIN_RESET, 0); } static void _server_stop(struct pppoe_serv_t *serv) diff --git a/accel-pppd/ctrl/pptp/pptp.c b/accel-pppd/ctrl/pptp/pptp.c index 2f0c5efd..dddf5edc 100644 --- a/accel-pppd/ctrl/pptp/pptp.c +++ b/accel-pppd/ctrl/pptp/pptp.c @@ -50,7 +50,7 @@ struct pptp_conn_t int out_size; int out_pos; - struct ppp_ctrl_t ctrl; + struct ap_ctrl ctrl; struct ppp_t ppp; }; @@ -68,8 +68,8 @@ static unsigned int stat_active; static int pptp_read(struct triton_md_handler_t *h); static int pptp_write(struct triton_md_handler_t *h); static void pptp_timeout(struct triton_timer_t *); -static void ppp_started(struct ppp_t *); -static void ppp_finished(struct ppp_t *); +static void ppp_started(struct ap_session *); +static void ppp_finished(struct ap_session *); static void disconnect(struct pptp_conn_t *conn) { @@ -87,18 +87,18 @@ static void disconnect(struct pptp_conn_t *conn) if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_CLOSE; - ppp_terminate(&conn->ppp, TERM_LOST_CARRIER, 1); + ap_session_terminate(&conn->ppp.ses, TERM_LOST_CARRIER, 1); } else if (conn->state != STATE_CLOSE) __sync_sub_and_fetch(&stat_starting, 1); - triton_event_fire(EV_CTRL_FINISHED, &conn->ppp); + triton_event_fire(EV_CTRL_FINISHED, &conn->ppp.ses); log_ppp_info1("disconnected\n"); triton_context_unregister(&conn->ctx); - if (conn->ppp.chan_name) - _free(conn->ppp.chan_name); + if (conn->ppp.ses.chan_name) + _free(conn->ppp.ses.chan_name); _free(conn->in_buf); _free(conn->out_buf); @@ -325,9 +325,9 @@ static int pptp_out_call_rqst(struct pptp_conn_t *conn) conn->call_id = src_addr.sa_addr.pptp.call_id; conn->peer_call_id = msg->call_id; conn->ppp.fd = pptp_sock; - conn->ppp.chan_name = _strdup(inet_ntoa(dst_addr.sa_addr.pptp.sin_addr)); + conn->ppp.ses.chan_name = _strdup(inet_ntoa(dst_addr.sa_addr.pptp.sin_addr)); - triton_event_fire(EV_CTRL_STARTED, &conn->ppp); + triton_event_fire(EV_CTRL_STARTED, &conn->ppp.ses); if (establish_ppp(&conn->ppp)) { close(pptp_sock); @@ -379,7 +379,7 @@ static int pptp_call_clear_rqst(struct pptp_conn_t *conn) if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_CLOSE; - ppp_terminate(&conn->ppp, TERM_USER_REQUEST, 1); + ap_session_terminate(&conn->ppp.ses, TERM_USER_REQUEST, 1); } return send_pptp_call_disconnect_notify(conn, 4); @@ -561,7 +561,7 @@ static void pptp_close(struct triton_context_t *ctx) if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_CLOSE; - ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 1); + ap_session_terminate(&conn->ppp.ses, TERM_ADMIN_RESET, 1); if (send_pptp_call_disconnect_notify(conn, 3)) { triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn); return; @@ -578,12 +578,13 @@ static void pptp_close(struct triton_context_t *ctx) else triton_timer_add(ctx, &conn->timeout_timer, 0); } -static void ppp_started(struct ppp_t *ppp) +static void ppp_started(struct ap_session *ses) { log_ppp_debug("pptp: ppp started\n"); } -static void ppp_finished(struct ppp_t *ppp) +static void ppp_finished(struct ap_session *ses) { + struct ppp_t *ppp = container_of(ses, typeof(*ppp), ses); struct pptp_conn_t *conn = container_of(ppp, typeof(*conn), ppp); if (conn->state != STATE_CLOSE) { @@ -628,7 +629,7 @@ static int pptp_connect(struct triton_md_handler_t *h) continue; } - if (ppp_shutdown) { + if (ap_shutdown) { close(sock); continue; } @@ -667,6 +668,7 @@ static int pptp_connect(struct triton_md_handler_t *h) conn->ctrl.ctx = &conn->ctx; conn->ctrl.started = ppp_started; conn->ctrl.finished = ppp_finished; + conn->ctrl.terminate = ppp_terminate; conn->ctrl.max_mtu = PPTP_MAX_MTU; conn->ctrl.type = CTRL_TYPE_PPTP; conn->ctrl.name = "pptp"; @@ -679,15 +681,15 @@ static int pptp_connect(struct triton_md_handler_t *h) u_inet_ntoa(addr.sin_addr.s_addr, conn->ctrl.called_station_id); ppp_init(&conn->ppp); - conn->ppp.ctrl = &conn->ctrl; + conn->ppp.ses.ctrl = &conn->ctrl; - triton_context_register(&conn->ctx, &conn->ppp); + triton_context_register(&conn->ctx, &conn->ppp.ses); triton_md_register_handler(&conn->ctx, &conn->hnd); triton_md_enable_handler(&conn->hnd,MD_MODE_READ); triton_timer_add(&conn->ctx, &conn->timeout_timer, 0); triton_context_wakeup(&conn->ctx); - triton_event_fire(EV_CTRL_STARTING, &conn->ppp); + triton_event_fire(EV_CTRL_STARTING, &conn->ppp.ses); __sync_add_and_fetch(&stat_starting, 1); } diff --git a/accel-pppd/extra/chap-secrets.c b/accel-pppd/extra/chap-secrets.c index db1bbb2f..c0e620b1 100644 --- a/accel-pppd/extra/chap-secrets.c +++ b/accel-pppd/extra/chap-secrets.c @@ -23,7 +23,7 @@ static struct ipdb_t ipdb; struct cs_pd_t { - struct ppp_pd_t pd; + struct ap_private pd; struct ipv4db_item_t ip; char *passwd; char *rate; @@ -93,7 +93,7 @@ static int split(char *buf, char **ptr) } -static struct cs_pd_t *create_pd(struct ppp_t *ppp, const char *username) +static struct cs_pd_t *create_pd(struct ap_session *ses, const char *username) { FILE *f; char *buf; @@ -161,7 +161,7 @@ found: if (n >= 4) pd->rate = _strdup(ptr[3]); - list_add_tail(&pd->pd.entry, &ppp->pd_list); + list_add_tail(&pd->pd.entry, &ses->pd_list); fclose(f); _free(buf); @@ -169,11 +169,11 @@ found: return pd; } -static struct cs_pd_t *find_pd(struct ppp_t *ppp) +static struct cs_pd_t *find_pd(struct ap_session *ses) { - struct ppp_pd_t *pd; + struct ap_private *pd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) { return container_of(pd, typeof(struct cs_pd_t), pd); } @@ -182,9 +182,9 @@ static struct cs_pd_t *find_pd(struct ppp_t *ppp) return NULL; } -static void ev_ppp_finished(struct ppp_t *ppp) +static void ev_ses_finished(struct ap_session *ses) { - struct cs_pd_t *pd = find_pd(ppp); + struct cs_pd_t *pd = find_pd(ses); if (!pd) return; @@ -196,11 +196,11 @@ static void ev_ppp_finished(struct ppp_t *ppp) _free(pd); } -static void ev_ppp_pre_up(struct ppp_t *ppp) +static void ev_ses_pre_up(struct ap_session *ses) { - struct cs_pd_t *pd = find_pd(ppp); + struct cs_pd_t *pd = find_pd(ses); struct ev_shaper_t ev = { - .ppp = ppp, + .ses = ses, }; if (!pd) @@ -212,14 +212,14 @@ static void ev_ppp_pre_up(struct ppp_t *ppp) } } -static struct ipv4db_item_t *get_ip(struct ppp_t *ppp) +static struct ipv4db_item_t *get_ip(struct ap_session *ses) { struct cs_pd_t *pd; if (!conf_gw_ip_address) return NULL; - pd = find_pd(ppp); + pd = find_pd(ses); if (!pd) return NULL; @@ -230,12 +230,12 @@ static struct ipv4db_item_t *get_ip(struct ppp_t *ppp) return &pd->ip; } -static char* get_passwd(struct pwdb_t *pwdb, struct ppp_t *ppp, const char *username) +static char* get_passwd(struct pwdb_t *pwdb, struct ap_session *ses, const char *username) { - struct cs_pd_t *pd = find_pd(ppp); + struct cs_pd_t *pd = find_pd(ses); if (!pd) - pd = create_pd(ppp, username); + pd = create_pd(ses, username); if (!pd) return NULL; @@ -275,8 +275,8 @@ static void init(void) pwdb_register(&pwdb); ipdb_register(&ipdb); - triton_event_register_handler(EV_PPP_FINISHED, (triton_event_func)ev_ppp_finished); - triton_event_register_handler(EV_PPP_PRE_UP, (triton_event_func)ev_ppp_pre_up); + triton_event_register_handler(EV_SES_FINISHED, (triton_event_func)ev_ses_finished); + triton_event_register_handler(EV_SES_PRE_UP, (triton_event_func)ev_ses_pre_up); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); } diff --git a/accel-pppd/extra/ippool.c b/accel-pppd/extra/ippool.c index fc4fd182..ccb3b2a2 100644 --- a/accel-pppd/extra/ippool.c +++ b/accel-pppd/extra/ippool.c @@ -9,6 +9,8 @@ #include "ipdb.h" #include "list.h" #include "spinlock.h" +#include "backup.h" +#include "ap_session_backup.h" #ifdef RADIUS #include "radius.h" @@ -23,6 +25,8 @@ struct ippool_t struct list_head gw_list; struct list_head tunnel_list; struct list_head items; + uint32_t startip; + uint32_t endip; spinlock_t lock; }; @@ -143,7 +147,7 @@ static int parse2(const char *str, uint32_t *begin, uint32_t *end) return 0; } -static void add_range(struct list_head *list, const char *name) +static void add_range(struct ippool_t *p, struct list_head *list, const char *name) { uint32_t i,startip, endip; struct ipaddr_t *ip; @@ -161,6 +165,9 @@ static void add_range(struct list_head *list, const char *name) list_add_tail(&ip->entry, list); cnt++; } + + p->startip = startip; + p->endip = endip; } static void generate_pool(struct ippool_t *p) @@ -205,13 +212,13 @@ static void generate_pool(struct ippool_t *p) } } -static struct ipv4db_item_t *get_ip(struct ppp_t *ppp) +static struct ipv4db_item_t *get_ip(struct ap_session *ses) { struct ippool_item_t *it; struct ippool_t *p; - if (ppp->ipv4_pool_name) - p = find_pool(ppp->ipv4_pool_name, 0); + if (ses->ipv4_pool_name) + p = find_pool(ses->ipv4_pool_name, 0); else p = def_pool; @@ -229,7 +236,7 @@ static struct ipv4db_item_t *get_ip(struct ppp_t *ppp) return it ? &it->it : NULL; } -static void put_ip(struct ppp_t *ppp, struct ipv4db_item_t *it) +static void put_ip(struct ap_session *ses, struct ipv4db_item_t *it) { struct ippool_item_t *pit = container_of(it, typeof(*pit), it); @@ -243,13 +250,96 @@ static struct ipdb_t ipdb = { .put_ipv4 = put_ip, }; +#ifdef USE_BACKUP +static void put_ip_b(struct ap_session *ses, struct ipv4db_item_t *it) +{ + _free(it); +} + +static struct ipdb_t ipdb_b = { + .put_ipv4 = put_ip_b, +}; + +static int session_save(struct ap_session *ses, struct backup_mod *m) +{ + if (!ses->ipv4 || ses->ipv4->owner != &ipdb) + return -2; + + return 0; +} + +static int session_restore(struct ap_session *ses, struct backup_mod *m) +{ + struct backup_tag *tag; + in_addr_t addr = 0, peer_addr; + struct ippool_t *p; + struct ippool_item_t *it, *it0 = NULL; + + m = backup_find_mod(m->data, MODID_COMMON); + + list_for_each_entry(tag, &m->tag_list, entry) { + switch (tag->id) { + case SES_TAG_IPV4_ADDR: + addr = *(in_addr_t *)tag->data; + break; + case SES_TAG_IPV4_PEER_ADDR: + peer_addr = *(in_addr_t *)tag->data; + break; + } + } + + spin_lock(&def_pool->lock); + list_for_each_entry(it, &def_pool->items, entry) { + if (peer_addr == it->it.peer_addr && addr == it->it.addr) { + list_del(&it->entry); + it0 = it; + break; + } + } + spin_unlock(&def_pool->lock); + + if (!it0) { + list_for_each_entry(p, &pool_list, entry) { + spin_lock(&p->lock); + list_for_each_entry(it, &p->items, entry) { + if (peer_addr == it->it.peer_addr && addr == it->it.addr) { + list_del(&it->entry); + it0 = it; + break; + } + } + spin_unlock(&p->lock); + if (it0) + break; + } + } + + if (it0) + ses->ipv4 = &it0->it; + else { + ses->ipv4 = _malloc(sizeof(*ses->ipv4)); + ses->ipv4->addr = addr; + ses->ipv4->peer_addr = peer_addr; + ses->ipv4->owner = &ipdb_b; + } + + return 0; +} + +static struct backup_module backup_mod = { + .id = MODID_IPPOOL, + .save = session_save, + .restore = session_restore, +}; +#endif + #ifdef RADIUS -static int parse_attr(struct ppp_t *ppp, struct rad_attr_t *attr) +static int parse_attr(struct ap_session *ses, struct rad_attr_t *attr) { if (attr->len > sizeof("ip:addr-pool=") && memcmp(attr->val.string, "ip:addr-pool=", sizeof("ip:addr-pool=") - 1) == 0) - ppp->ipv4_pool_name = _strdup(attr->val.string + sizeof("ip:addr-pool=") - 1); + ses->ipv4_pool_name = _strdup(attr->val.string + sizeof("ip:addr-pool=") - 1); else if (!attr->vendor) - ppp->ipv4_pool_name = _strdup(attr->val.string); + ses->ipv4_pool_name = _strdup(attr->val.string); else return -1; @@ -269,7 +359,7 @@ static void ev_radius_access_accept(struct ev_radius_t *ev) continue; if (attr->attr->id != conf_attr) continue; - if (parse_attr(ev->ppp, attr)) + if (parse_attr(ev->ses, attr)) continue; break; } @@ -348,11 +438,11 @@ static void ippool_init(void) p = pool_name ? find_pool(pool_name + 1, 1) : def_pool; if (!strcmp(opt->name, "gw")) - add_range(&p->gw_list, opt->val); + add_range(p, &p->gw_list, opt->val); else if (!strcmp(opt->name, "tunnel")) - add_range(&p->tunnel_list, opt->val); + add_range(p, &p->tunnel_list, opt->val); else if (!opt->val) - add_range(&p->tunnel_list, opt->name); + add_range(p, &p->tunnel_list, opt->name); } } @@ -363,6 +453,10 @@ static void ippool_init(void) ipdb_register(&ipdb); +#ifdef USE_BACKUP + backup_register_module(&backup_mod); +#endif + #ifdef RADIUS if (triton_module_loaded("radius")) triton_event_register_handler(EV_RADIUS_ACCESS_ACCEPT, (triton_event_func)ev_radius_access_accept); diff --git a/accel-pppd/extra/ipv6pool.c b/accel-pppd/extra/ipv6pool.c index 59bda253..5a9dde96 100644 --- a/accel-pppd/extra/ipv6pool.c +++ b/accel-pppd/extra/ipv6pool.c @@ -129,7 +129,7 @@ err: _free(val); } -static struct ipv6db_item_t *get_ip(struct ppp_t *ppp) +static struct ipv6db_item_t *get_ip(struct ap_session *ses) { struct ippool_item_t *it; @@ -146,7 +146,7 @@ static struct ipv6db_item_t *get_ip(struct ppp_t *ppp) return it ? &it->it : NULL; } -static void put_ip(struct ppp_t *ppp, struct ipv6db_item_t *it) +static void put_ip(struct ap_session *ses, struct ipv6db_item_t *it) { struct ippool_item_t *pit = container_of(it, typeof(*pit), it); @@ -155,7 +155,7 @@ static void put_ip(struct ppp_t *ppp, struct ipv6db_item_t *it) spin_unlock(&pool_lock); } -static struct ipv6db_prefix_t *get_dp(struct ppp_t *ppp) +static struct ipv6db_prefix_t *get_dp(struct ap_session *ses) { struct dppool_item_t *it; @@ -170,7 +170,7 @@ static struct ipv6db_prefix_t *get_dp(struct ppp_t *ppp) return it ? &it->it : NULL; } -static void put_dp(struct ppp_t *ppp, struct ipv6db_prefix_t *it) +static void put_dp(struct ap_session *ses, struct ipv6db_prefix_t *it) { struct dppool_item_t *pit = container_of(it, typeof(*pit), it); diff --git a/accel-pppd/extra/logwtmp.c b/accel-pppd/extra/logwtmp.c index 5848102e..9f31d694 100644 --- a/accel-pppd/extra/logwtmp.c +++ b/accel-pppd/extra/logwtmp.c @@ -14,20 +14,20 @@ #include "memdebug.h" -static void ev_ppp_started(struct ppp_t *ppp) +static void ev_ses_started(struct ap_session *ses) { - logwtmp(ppp->ifname, ppp->username, ppp->ctrl->calling_station_id); + logwtmp(ses->ifname, ses->username, ses->ctrl->calling_station_id); } -static void ev_ppp_finished(struct ppp_t *ppp) +static void ev_ses_finished(struct ap_session *ses) { - logwtmp(ppp->ifname, "", ""); + logwtmp(ses->ifname, "", ""); } static void init(void) { - triton_event_register_handler(EV_PPP_STARTED, (triton_event_func)ev_ppp_started); - triton_event_register_handler(EV_PPP_FINISHED, (triton_event_func)ev_ppp_finished); + triton_event_register_handler(EV_SES_STARTED, (triton_event_func)ev_ses_started); + triton_event_register_handler(EV_SES_FINISHED, (triton_event_func)ev_ses_finished); } DEFINE_INIT(200, init); diff --git a/accel-pppd/extra/net-snmp/sessionTable.h b/accel-pppd/extra/net-snmp/sessionTable.h index f5619d98..645ddd35 100644 --- a/accel-pppd/extra/net-snmp/sessionTable.h +++ b/accel-pppd/extra/net-snmp/sessionTable.h @@ -75,7 +75,7 @@ typedef netsnmp_data_list sessionTable_registration; */ struct sessionTable_data_s { - char ifname[PPP_IFNAME_LEN]; + char ifname[AP_IFNAME_LEN]; char *username; in_addr_t peer_addr; int type; @@ -96,7 +96,7 @@ typedef struct sessionTable_mib_index_s { /* * sesSID(1)/OCTETSTR/ASN_OCTET_STR/char(char)//L/A/w/e/R/d/h */ - char sesSID[PPP_SESSIONID_LEN]; + char sesSID[AP_SESSIONID_LEN]; size_t sesSID_len; @@ -111,7 +111,7 @@ typedef struct sessionTable_mib_index_s { * POSSIBLE LENGHT FOR EVERY VARIABLE LENGTH INDEX! * Guessing 128 - col/entry(2) - oid len(10) */ -#define MAX_sessionTable_IDX_LEN PPP_SESSIONID_LEN + 1 +#define MAX_sessionTable_IDX_LEN AP_SESSIONID_LEN + 1 /* ********************************************************************* diff --git a/accel-pppd/extra/net-snmp/sessionTable_data_access.c b/accel-pppd/extra/net-snmp/sessionTable_data_access.c index 0d38e5df..4821d38d 100644 --- a/accel-pppd/extra/net-snmp/sessionTable_data_access.c +++ b/accel-pppd/extra/net-snmp/sessionTable_data_access.c @@ -197,23 +197,23 @@ sessionTable_container_load(netsnmp_container *container) { sessionTable_rowreq_ctx *rowreq_ctx; size_t count = 0; - struct ppp_t *ppp; + struct ap_session *ses; time_t t; time(&t); DEBUGMSGTL(("verbose:sessionTable:sessionTable_container_load","called\n")); - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { rowreq_ctx = sessionTable_allocate_rowreq_ctx(NULL, NULL); if (NULL == rowreq_ctx) { - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); snmp_log(LOG_ERR, "memory allocation failed\n"); return MFD_RESOURCE_UNAVAILABLE; } if(MFD_SUCCESS != sessionTable_indexes_set(rowreq_ctx - , ppp->sessionid, PPP_SESSIONID_LEN + , ses->sessionid, AP_SESSIONID_LEN )) { snmp_log(LOG_ERR,"error setting index while loading " "sessionTable data->\n"); @@ -221,24 +221,24 @@ sessionTable_container_load(netsnmp_container *container) continue; } - strcpy(rowreq_ctx->data->ifname, ppp->ifname); + strcpy(rowreq_ctx->data->ifname, ses->ifname); - if (ppp->username) - rowreq_ctx->data->username = strdup(ppp->username); + if (ses->username) + rowreq_ctx->data->username = strdup(ses->username); else - ppp->username = strdup(""); + ses->username = strdup(""); - rowreq_ctx->data->peer_addr = ppp->ipv4 ? ppp->ipv4->peer_addr : 0; - rowreq_ctx->data->type = ppp->ctrl->type; - rowreq_ctx->data->state = ppp->state; - rowreq_ctx->data->uptime = (ppp->stop_time ? ppp->stop_time : t) - ppp->start_time; - rowreq_ctx->data->calling_sid = strdup(ppp->ctrl->calling_station_id); - rowreq_ctx->data->called_sid = strdup(ppp->ctrl->called_station_id); + rowreq_ctx->data->peer_addr = ses->ipv4 ? ses->ipv4->peer_addr : 0; + rowreq_ctx->data->type = ses->ctrl->type; + rowreq_ctx->data->state = ses->state; + rowreq_ctx->data->uptime = (ses->stop_time ? ses->stop_time : t) - ses->start_time; + rowreq_ctx->data->calling_sid = strdup(ses->ctrl->calling_station_id); + rowreq_ctx->data->called_sid = strdup(ses->ctrl->called_station_id); CONTAINER_INSERT(container, rowreq_ctx); ++count; } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); DEBUGMSGT(("verbose:sessionTable:sessionTable_container_load", "inserted %d records\n", count)); diff --git a/accel-pppd/extra/net-snmp/statPPP.c b/accel-pppd/extra/net-snmp/statPPP.c index 7199b570..db8918b7 100644 --- a/accel-pppd/extra/net-snmp/statPPP.c +++ b/accel-pppd/extra/net-snmp/statPPP.c @@ -53,7 +53,7 @@ init_statPPP(void) statPPPStarting_oid, OID_LENGTH(statPPPStarting_oid), HANDLER_CAN_RONLY); winfo = netsnmp_create_watcher_info( - &ppp_stat.starting, sizeof(ppp_stat.starting), + &ap_session_stat.starting, sizeof(ap_session_stat.starting), ASN_INTEGER, WATCHER_FIXED_SIZE); if (netsnmp_register_watched_scalar( reg, winfo ) < 0 ) { snmp_log( LOG_ERR, "Failed to register watched statPPPStarting" ); @@ -67,7 +67,7 @@ init_statPPP(void) statPPPActive_oid, OID_LENGTH(statPPPActive_oid), HANDLER_CAN_RONLY); winfo = netsnmp_create_watcher_info( - &ppp_stat.active, sizeof(ppp_stat.active), + &ap_session_stat.active, sizeof(ap_session_stat.active), ASN_INTEGER, WATCHER_FIXED_SIZE); if (netsnmp_register_watched_scalar( reg, winfo ) < 0 ) { snmp_log( LOG_ERR, "Failed to register watched statPPPActive" ); @@ -81,7 +81,7 @@ init_statPPP(void) statPPPFinishing_oid, OID_LENGTH(statPPPFinishing_oid), HANDLER_CAN_RONLY); winfo = netsnmp_create_watcher_info( - &ppp_stat.finishing, sizeof(ppp_stat.finishing), + &ap_session_stat.finishing, sizeof(ap_session_stat.finishing), ASN_INTEGER, WATCHER_FIXED_SIZE); if (netsnmp_register_watched_scalar( reg, winfo ) < 0 ) { snmp_log( LOG_ERR, "Failed to register watched statPPPFinishing" ); diff --git a/accel-pppd/extra/net-snmp/terminate.c b/accel-pppd/extra/net-snmp/terminate.c index abe92df0..23d355a2 100644 --- a/accel-pppd/extra/net-snmp/terminate.c +++ b/accel-pppd/extra/net-snmp/terminate.c @@ -16,81 +16,81 @@ #include "terminate.h" -static void __terminate(struct ppp_t *ppp) +static void __terminate(struct ap_session *ses) { - ppp_terminate(ppp, TERM_ADMIN_RESET, 0); + ap_session_terminate(ses, TERM_ADMIN_RESET, 0); } static void terminate_by_sid(const char *val) { - struct ppp_t *ppp; + struct ap_session *ses; - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (strncmp(ppp->sessionid, val, PPP_SESSIONID_LEN)) + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (strncmp(ses->sessionid, val, AP_SESSIONID_LEN)) continue; - triton_context_call(ppp->ctrl->ctx, (triton_event_func)__terminate, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate, ses); break; } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); } static void terminate_by_ifname(const char *val, size_t len) { - struct ppp_t *ppp; + struct ap_session *ses; size_t n; - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - n = strlen(ppp->ifname); + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + n = strlen(ses->ifname); if (n != len) continue; - if (strncmp(ppp->ifname, val, len)) + if (strncmp(ses->ifname, val, len)) continue; - triton_context_call(ppp->ctrl->ctx, (triton_event_func)__terminate, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate, ses); break; } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); } static void terminate_by_ip(const char *val, size_t len) { char str[len + 1]; in_addr_t addr; - struct ppp_t *ppp; + struct ap_session *ses; strncpy(str, val, len); str[len] = 0; addr = inet_addr(str); - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (!ppp->ipv4 || ppp->ipv4->peer_addr != addr) + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (!ses->ipv4 || ses->ipv4->peer_addr != addr) continue; - triton_context_call(ppp->ctrl->ctx, (triton_event_func)__terminate, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate, ses); break; } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); } static void terminate_by_username(const char *val, size_t len) { - struct ppp_t *ppp; + struct ap_session *ses; size_t n; - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (!ppp->username) + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (!ses->username) continue; - n = strlen(ppp->username); + n = strlen(ses->username); if (n != len) continue; - if (strncmp(ppp->username, val, len)) + if (strncmp(ses->username, val, len)) continue; - triton_context_call(ppp->ctrl->ctx, (triton_event_func)__terminate, ppp); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate, ses); } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); } @@ -154,7 +154,7 @@ handle_termBySID(netsnmp_mib_handler *handler, */ case MODE_SET_RESERVE1: /* or you could use netsnmp_check_vb_type_and_size instead */ - ret = netsnmp_check_vb_type_and_size(requests->requestvb, ASN_OCTET_STR, PPP_SESSIONID_LEN); + ret = netsnmp_check_vb_type_and_size(requests->requestvb, ASN_OCTET_STR, AP_SESSIONID_LEN); if ( ret != SNMP_ERR_NOERROR ) { netsnmp_set_request_error(reqinfo, requests, ret ); } diff --git a/accel-pppd/extra/pppd_compat.c b/accel-pppd/extra/pppd_compat.c index d89a6a6a..fcb83c25 100644 --- a/accel-pppd/extra/pppd_compat.c +++ b/accel-pppd/extra/pppd_compat.c @@ -36,8 +36,8 @@ static void *pd_key; struct pppd_compat_pd_t { - struct ppp_pd_t pd; - struct ppp_t *ppp; + struct ap_private pd; + struct ap_session *ses; struct sigchld_handler_t ip_pre_up_hnd; struct sigchld_handler_t ip_up_hnd; struct sigchld_handler_t ip_change_hnd; @@ -53,31 +53,31 @@ struct pppd_compat_pd_t in_addr_t ipv4_peer_addr; }; -static struct pppd_compat_pd_t *find_pd(struct ppp_t *ppp); +static struct pppd_compat_pd_t *find_pd(struct ap_session *ses); static void fill_argv(char **argv, struct pppd_compat_pd_t *pd, char *path); static void fill_env(char **env, struct pppd_compat_pd_t *pd); #ifdef RADIUS -static void remove_radattr(struct ppp_t *ppp); -static void write_radattr(struct ppp_t *ppp, struct rad_packet_t *pack, int save_old); +static void remove_radattr(struct ap_session *ses); +static void write_radattr(struct ap_session *ses, struct rad_packet_t *pack, int save_old); #endif static void ip_pre_up_handler(struct sigchld_handler_t *h, int status) { struct pppd_compat_pd_t *pd = container_of(h, typeof(*pd), ip_pre_up_hnd); if (conf_verbose) { - log_switch(NULL, pd->ppp); + log_switch(NULL, pd->ses); log_ppp_info2("pppd_compat: ip-pre-up finished (%i)\n", status); } sched_yield(); pd->res = status; - triton_context_wakeup(pd->ppp->ctrl->ctx); + triton_context_wakeup(pd->ses->ctrl->ctx); } static void ip_up_handler(struct sigchld_handler_t *h, int status) { struct pppd_compat_pd_t *pd = container_of(h, typeof(*pd), ip_up_hnd); if (conf_verbose) { - log_switch(NULL, pd->ppp); + log_switch(NULL, pd->ses); log_ppp_info2("pppd_compat: ip-up finished (%i)\n", status); } } @@ -86,29 +86,30 @@ static void ip_down_handler(struct sigchld_handler_t *h, int status) { struct pppd_compat_pd_t *pd = container_of(h, typeof(*pd), ip_down_hnd); if (conf_verbose) { - log_switch(NULL, pd->ppp); + log_switch(NULL, pd->ses); log_ppp_info2("pppd_compat: ip-down finished (%i)\n", status); } sched_yield(); - triton_context_wakeup(pd->ppp->ctrl->ctx); + triton_context_wakeup(pd->ses->ctrl->ctx); } static void ip_change_handler(struct sigchld_handler_t *h, int status) { struct pppd_compat_pd_t *pd = container_of(h, typeof(*pd), ip_change_hnd); if (conf_verbose) { - log_switch(NULL, pd->ppp); + log_switch(NULL, pd->ses); log_ppp_info2("pppd_compat: ip-change finished (%i)\n", status); } sched_yield(); pd->res = status; - triton_context_wakeup(pd->ppp->ctrl->ctx); + triton_context_wakeup(pd->ses->ctrl->ctx); } -static void ev_ppp_starting(struct ppp_t *ppp) +static void ev_ses_starting(struct ap_session *ses) { - struct pppd_compat_pd_t *pd = _malloc(sizeof(*pd)); - + struct pppd_compat_pd_t *pd; + + pd = _malloc(sizeof(*pd)); if (!pd) { log_emerg("pppd_compat: out of memory\n"); return; @@ -116,15 +117,15 @@ static void ev_ppp_starting(struct ppp_t *ppp) memset(pd, 0, sizeof(*pd)); pd->pd.key = &pd_key; - pd->ppp = ppp; + pd->ses = ses; pd->ip_pre_up_hnd.handler = ip_pre_up_handler; pd->ip_up_hnd.handler = ip_up_handler; pd->ip_down_hnd.handler = ip_down_handler; pd->ip_change_hnd.handler = ip_change_handler; - list_add_tail(&pd->pd.entry, &ppp->pd_list); + list_add_tail(&pd->pd.entry, &ses->pd_list); } -static void ev_ppp_pre_up(struct ppp_t *ppp) +static void ev_ses_pre_up(struct ap_session *ses) { pid_t pid; char *argv[8]; @@ -132,14 +133,14 @@ static void ev_ppp_pre_up(struct ppp_t *ppp) char ipaddr[17]; char peer_ipaddr[17]; char peername[64]; - struct pppd_compat_pd_t *pd = find_pd(ppp); + struct pppd_compat_pd_t *pd = find_pd(ses); if (!pd) return; - if (ppp->ipv4) { - pd->ipv4_addr = ppp->ipv4->addr; - pd->ipv4_peer_addr = ppp->ipv4->peer_addr; + if (ses->ipv4) { + pd->ipv4_addr = ses->ipv4->addr; + pd->ipv4_peer_addr = ses->ipv4->peer_addr; } argv[4] = ipaddr; @@ -163,7 +164,7 @@ static void ev_ppp_pre_up(struct ppp_t *ppp) pthread_mutex_lock(&pd->ip_pre_up_hnd.lock); pthread_mutex_unlock(&pd->ip_pre_up_hnd.lock); if (pd->res != 0) { - ppp_terminate(ppp, pd->res > 127 ? TERM_NAS_ERROR : TERM_ADMIN_RESET, 0); + ap_session_terminate(ses, pd->res > 127 ? TERM_NAS_ERROR : TERM_ADMIN_RESET, 0); return; } } else if (pid == 0) { @@ -179,7 +180,7 @@ static void ev_ppp_pre_up(struct ppp_t *ppp) } } -static void ev_ppp_started(struct ppp_t *ppp) +static void ev_ses_started(struct ap_session *ses) { pid_t pid; char *argv[8]; @@ -187,7 +188,7 @@ static void ev_ppp_started(struct ppp_t *ppp) char ipaddr[17]; char peer_ipaddr[17]; char peername[64]; - struct pppd_compat_pd_t *pd = find_pd(ppp); + struct pppd_compat_pd_t *pd = find_pd(ses); if (!pd) return; @@ -224,28 +225,32 @@ static void ev_ppp_started(struct ppp_t *ppp) pd->started = 1; } -static void ev_ppp_finishing(struct ppp_t *ppp) +static void ev_ses_finishing(struct ap_session *ses) { struct ifpppstatsreq ifreq; - struct pppd_compat_pd_t *pd = find_pd(ppp); + struct pppd_compat_pd_t *pd = find_pd(ses); if (!pd) return; - memset(&ifreq, 0, sizeof(ifreq)); - ifreq.stats_ptr = (void *)&ifreq.stats; - strcpy(ifreq.ifr__name, ppp->ifname); + if (ses->ctrl->type == CTRL_TYPE_IPOE) { - if (ioctl(sock_fd, SIOCGPPPSTATS, &ifreq)) { - log_ppp_error("pppd_compat: failed to get ppp statistics: %s\n", strerror(errno)); - return; - } + } else { + memset(&ifreq, 0, sizeof(ifreq)); + ifreq.stats_ptr = (void *)&ifreq.stats; + strcpy(ifreq.ifr__name, ses->ifname); - pd->bytes_sent = ifreq.stats.p.ppp_obytes; - pd->bytes_rcvd = ifreq.stats.p.ppp_ibytes; + if (ioctl(sock_fd, SIOCGPPPSTATS, &ifreq)) { + log_ppp_error("pppd_compat: failed to get ppp statistics: %s\n", strerror(errno)); + return; + } + + pd->bytes_sent = ifreq.stats.p.ppp_obytes; + pd->bytes_rcvd = ifreq.stats.p.ppp_ibytes; + } } -static void ev_ppp_finished(struct ppp_t *ppp) +static void ev_ses_finished(struct ap_session *ses) { pid_t pid; char *argv[8]; @@ -256,7 +261,7 @@ static void ev_ppp_finished(struct ppp_t *ppp) char connect_time[24]; char bytes_sent[24]; char bytes_rcvd[24]; - struct pppd_compat_pd_t *pd = find_pd(ppp); + struct pppd_compat_pd_t *pd = find_pd(ses); if (!pd) return; @@ -319,7 +324,7 @@ static void ev_ppp_finished(struct ppp_t *ppp) skip: #ifdef RADIUS if (pd->radattr_saved) - remove_radattr(ppp); + remove_radattr(ses); #endif list_del(&pd->pd.entry); @@ -329,9 +334,12 @@ skip: #ifdef RADIUS static void ev_radius_access_accept(struct ev_radius_t *ev) { - struct pppd_compat_pd_t *pd = find_pd(ev->ppp); + struct pppd_compat_pd_t *pd = find_pd(ev->ses); + + if (!pd) + return; - write_radattr(ev->ppp, ev->reply, 0); + write_radattr(ev->ses, ev->reply, 0); pd->radattr_saved = 1; } @@ -344,12 +352,12 @@ static void ev_radius_coa(struct ev_radius_t *ev) char ipaddr[17]; char peer_ipaddr[17]; char peername[64]; - struct pppd_compat_pd_t *pd = find_pd(ev->ppp); + struct pppd_compat_pd_t *pd = find_pd(ev->ses); if (!pd) return; - write_radattr(ev->ppp, ev->request, 1); + write_radattr(ev->ses, ev->request, 1); argv[4] = ipaddr; argv[5] = peer_ipaddr; @@ -378,7 +386,7 @@ static void ev_radius_coa(struct ev_radius_t *ev) log_error("pppd_compat: fork: %s\n", strerror(errno)); } -static void remove_radattr(struct ppp_t *ppp) +static void remove_radattr(struct ap_session *ses) { char *fname; @@ -388,17 +396,17 @@ static void remove_radattr(struct ppp_t *ppp) return; } - sprintf(fname, "%s.%s", conf_radattr_prefix, ppp->ifname); + sprintf(fname, "%s.%s", conf_radattr_prefix, ses->ifname); if (unlink(fname)) { log_ppp_warn("pppd_compat: failed to remove '%s': %s\n", fname, strerror(errno)); } - sprintf(fname, "%s_old.%s", conf_radattr_prefix, ppp->ifname); + sprintf(fname, "%s_old.%s", conf_radattr_prefix, ses->ifname); unlink(fname); _free(fname); } -static void write_radattr(struct ppp_t *ppp, struct rad_packet_t *pack, int save_old) +static void write_radattr(struct ap_session *ses, struct rad_packet_t *pack, int save_old) { struct rad_attr_t *attr; struct rad_dict_value_t *val; @@ -421,9 +429,9 @@ static void write_radattr(struct ppp_t *ppp, struct rad_packet_t *pack, int save } } - sprintf(fname1, "%s.%s", conf_radattr_prefix, ppp->ifname); + sprintf(fname1, "%s.%s", conf_radattr_prefix, ses->ifname); if (save_old) { - sprintf(fname2, "%s_old.%s", conf_radattr_prefix, ppp->ifname); + sprintf(fname2, "%s_old.%s", conf_radattr_prefix, ses->ifname); if (rename(fname1, fname2)) { log_ppp_warn("pppd_compat: rename: %s\n", strerror(errno)); } @@ -467,40 +475,40 @@ static void write_radattr(struct ppp_t *ppp, struct rad_packet_t *pack, int save } #endif -static struct pppd_compat_pd_t *find_pd(struct ppp_t *ppp) +static struct pppd_compat_pd_t *find_pd(struct ap_session *ses) { - struct ppp_pd_t *pd; + struct ap_private *pd; struct pppd_compat_pd_t *cpd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) { cpd = container_of(pd, typeof(*cpd), pd); return cpd; } } - log_ppp_warn("pppd_compat: pd not found\n"); + //log_ppp_warn("pppd_compat: pd not found\n"); return NULL; } static void fill_argv(char **argv, struct pppd_compat_pd_t *pd, char *path) { argv[0] = path; - argv[1] = pd->ppp->ifname; + argv[1] = pd->ses->ifname; argv[2] = "none"; argv[3] = "0"; u_inet_ntoa(pd->ipv4_addr, argv[4]); u_inet_ntoa(pd->ipv4_peer_addr, argv[5]); - argv[6] = pd->ppp->ctrl->calling_station_id; + argv[6] = pd->ses->ctrl->calling_station_id; argv[7] = NULL; } static void fill_env(char **env, struct pppd_compat_pd_t *pd) { - snprintf(env[0], 64, "PEERNAME=%s", pd->ppp->username); + snprintf(env[0], 64, "PEERNAME=%s", pd->ses->username); - if (pd->ppp->stop_time && env[1]) { - snprintf(env[1], 24, "CONNECT_TIME=%lu", pd->ppp->stop_time - pd->ppp->start_time); + if (pd->ses->stop_time && env[1]) { + snprintf(env[1], 24, "CONNECT_TIME=%lu", pd->ses->stop_time - pd->ses->start_time); snprintf(env[2], 24, "BYTES_SENT=%u", pd->bytes_sent); snprintf(env[3], 24, "BYTES_RCVD=%u", pd->bytes_rcvd); } @@ -534,11 +542,11 @@ static void init(void) if (opt && atoi(opt) > 0) conf_verbose = 1; - triton_event_register_handler(EV_PPP_STARTING, (triton_event_func)ev_ppp_starting); - triton_event_register_handler(EV_PPP_PRE_UP, (triton_event_func)ev_ppp_pre_up); - triton_event_register_handler(EV_PPP_STARTED, (triton_event_func)ev_ppp_started); - triton_event_register_handler(EV_PPP_FINISHING, (triton_event_func)ev_ppp_finishing); - triton_event_register_handler(EV_PPP_PRE_FINISHED, (triton_event_func)ev_ppp_finished); + triton_event_register_handler(EV_SES_STARTING, (triton_event_func)ev_ses_starting); + triton_event_register_handler(EV_SES_PRE_UP, (triton_event_func)ev_ses_pre_up); + triton_event_register_handler(EV_SES_STARTED, (triton_event_func)ev_ses_started); + triton_event_register_handler(EV_SES_FINISHING, (triton_event_func)ev_ses_finishing); + triton_event_register_handler(EV_SES_PRE_FINISHED, (triton_event_func)ev_ses_finished); #ifdef RADIUS if (triton_module_loaded("radius")) { triton_event_register_handler(EV_RADIUS_ACCESS_ACCEPT, (triton_event_func)ev_radius_access_accept); diff --git a/accel-pppd/extra/shaper_tbf.c b/accel-pppd/extra/shaper_tbf.c index 718a5c58..7c16475d 100644 --- a/accel-pppd/extra/shaper_tbf.c +++ b/accel-pppd/extra/shaper_tbf.c @@ -58,7 +58,7 @@ struct shaper_pd_t { struct list_head entry; struct ppp_t *ppp; - struct ppp_pd_t pd; + struct ap_private pd; int temp_down_speed; int temp_up_speed; int down_speed; @@ -427,7 +427,7 @@ out: static struct shaper_pd_t *find_pd(struct ppp_t *ppp, int create) { - struct ppp_pd_t *pd; + struct ap_private *pd; struct shaper_pd_t *spd; list_for_each_entry(pd, &ppp->pd_list, entry) { @@ -732,7 +732,7 @@ static void ev_radius_access_accept(struct ev_radius_t *ev) } if (down_speed > 0 && up_speed > 0) { - if (!install_shaper(ev->ppp->ifname, down_speed, down_burst, up_speed, up_burst)) { + if (!install_shaper(ev->ppp->ses.ifname, down_speed, down_burst, up_speed, up_burst)) { if (conf_verbose) log_ppp_info2("tbf: installed shaper %i/%i (Kbit)\n", down_speed, up_speed); } @@ -760,7 +760,7 @@ static void ev_radius_coa(struct ev_radius_t *ev) pd->up_speed = 0; if (conf_verbose) log_ppp_info2("tbf: removed shaper\n"); - remove_shaper(ev->ppp->ifname); + remove_shaper(ev->ppp->ses.ifname); } return; } @@ -769,13 +769,13 @@ static void ev_radius_coa(struct ev_radius_t *ev) pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - if (remove_shaper(ev->ppp->ifname)) { + if (remove_shaper(ev->ppp->ses.ifname)) { ev->res = -1; return; } if (pd->down_speed > 0 || pd->up_speed > 0) { - if (install_shaper(ev->ppp->ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { + if (install_shaper(ev->ppp->ses.ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { ev->res= -1; return; } else { @@ -827,7 +827,7 @@ static void ev_shaper(struct ev_shaper_t *ev) } if (pd->down_speed > 0 && pd->up_speed > 0) { - if (!install_shaper(ev->ppp->ifname, down_speed, down_burst, up_speed, up_burst)) { + if (!install_shaper(ev->ppp->ses.ifname, down_speed, down_burst, up_speed, up_burst)) { if (conf_verbose) log_ppp_info2("tbf: installed shaper %i/%i (Kbit)\n", down_speed, up_speed); } @@ -845,7 +845,7 @@ static void ev_ppp_pre_up(struct ppp_t *ppp) pd->temp_up_speed = temp_up_speed; pd->down_speed = temp_down_speed; pd->up_speed = temp_up_speed; - if (!install_shaper(ppp->ifname, temp_down_speed, 0, temp_up_speed, 0)) { + if (!install_shaper(ppp->ses.ifname, temp_down_speed, 0, temp_up_speed, 0)) { if (conf_verbose) log_ppp_info2("tbf: installed shaper %i/%i (Kbit)\n", temp_down_speed, temp_up_speed); } @@ -875,16 +875,16 @@ static void shaper_change_help(char * const *f, int f_cnt, void *cli) static void shaper_change(struct shaper_pd_t *pd) { if (pd->down_speed || pd->up_speed) - remove_shaper(pd->ppp->ifname); + remove_shaper(pd->ppp->ses.ifname); if (pd->temp_down_speed || pd->temp_up_speed) { pd->down_speed = pd->temp_down_speed; pd->up_speed = pd->temp_up_speed; - install_shaper(pd->ppp->ifname, pd->temp_down_speed, 0, pd->temp_up_speed, 0); + install_shaper(pd->ppp->ses.ifname, pd->temp_down_speed, 0, pd->temp_up_speed, 0); } else if (pd->cur_tr->down_speed || pd->cur_tr->up_speed) { pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - install_shaper(pd->ppp->ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); + install_shaper(pd->ppp->ses.ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); } } @@ -921,7 +921,7 @@ static int shaper_change_exec(const char *cmd, char * const *f, int f_cnt, void pthread_rwlock_rdlock(&shaper_lock); list_for_each_entry(pd, &shaper_list, entry) { - if (all || !strcmp(f[2], pd->ppp->ifname)) { + if (all || !strcmp(f[2], pd->ppp->ses.ifname)) { if (temp) { pd->temp_down_speed = down_speed; pd->temp_up_speed = up_speed; @@ -935,7 +935,7 @@ static int shaper_change_exec(const char *cmd, char * const *f, int f_cnt, void pd->cur_tr->up_speed = up_speed; pd->cur_tr->up_burst = up_burst; } - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)shaper_change, pd); + triton_context_call(pd->ppp->ses.ctrl->ctx, (triton_event_func)shaper_change, pd); if (!all) { found = 1; break; @@ -958,12 +958,12 @@ static void shaper_restore_help(char * const *f, int f_cnt, void *cli) static void shaper_restore(struct shaper_pd_t *pd) { - remove_shaper(pd->ppp->ifname); + remove_shaper(pd->ppp->ses.ifname); if (pd->cur_tr) { pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - install_shaper(pd->ppp->ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); + install_shaper(pd->ppp->ses.ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); } else { pd->down_speed = 0; pd->up_speed = 0; @@ -991,10 +991,10 @@ static int shaper_restore_exec(const char *cmd, char * const *f, int f_cnt, void list_for_each_entry(pd, &shaper_list, entry) { if (!pd->temp_down_speed) continue; - if (all || !strcmp(f[2], pd->ppp->ifname)) { + if (all || !strcmp(f[2], pd->ppp->ses.ifname)) { pd->temp_down_speed = 0; pd->temp_up_speed = 0; - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)shaper_restore, pd); + triton_context_call(pd->ppp->ses.ctrl->ctx, (triton_event_func)shaper_restore, pd); if (!all) { found = 1; break; @@ -1056,13 +1056,13 @@ static void update_shaper_tr(struct shaper_pd_t *pd) if (pd->down_speed || pd->up_speed) { if (pd->cur_tr && pd->down_speed == pd->cur_tr->down_speed && pd->up_speed == pd->cur_tr->up_speed) return; - remove_shaper(pd->ppp->ifname); + remove_shaper(pd->ppp->ses.ifname); } if (pd->cur_tr && (pd->cur_tr->down_speed || pd->cur_tr->up_speed)) { pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - if (!install_shaper(pd->ppp->ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { + if (!install_shaper(pd->ppp->ses.ifname, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { if (conf_verbose) log_ppp_info2("tbf: changed shaper %i/%i (Kbit)\n", pd->cur_tr->down_speed, pd->cur_tr->up_speed); } @@ -1082,7 +1082,7 @@ static void time_range_begin_timer(struct triton_timer_t *t) pthread_rwlock_rdlock(&shaper_lock); list_for_each_entry(pd, &shaper_list, entry) - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)update_shaper_tr, pd); + triton_context_call(pd->ppp->ses.ctrl->ctx, (triton_event_func)update_shaper_tr, pd); pthread_rwlock_unlock(&shaper_lock); } @@ -1096,7 +1096,7 @@ static void time_range_end_timer(struct triton_timer_t *t) pthread_rwlock_rdlock(&shaper_lock); list_for_each_entry(pd, &shaper_list, entry) - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)update_shaper_tr, pd); + triton_context_call(pd->ppp->ses.ctrl->ctx, (triton_event_func)update_shaper_tr, pd); pthread_rwlock_unlock(&shaper_lock); } @@ -1336,7 +1336,7 @@ static void init(void) triton_event_register_handler(EV_RADIUS_COA, (triton_event_func)ev_radius_coa); } #endif - triton_event_register_handler(EV_PPP_PRE_UP, (triton_event_func)ev_ppp_pre_up); + triton_event_register_handler(EV_SES_PRE_UP, (triton_event_func)ev_ppp_pre_up); triton_event_register_handler(EV_CTRL_FINISHED, (triton_event_func)ev_ctrl_finished); triton_event_register_handler(EV_SHAPER, (triton_event_func)ev_shaper); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); diff --git a/accel-pppd/ifcfg.c b/accel-pppd/ifcfg.c new file mode 100644 index 00000000..0b4927b6 --- /dev/null +++ b/accel-pppd/ifcfg.c @@ -0,0 +1,217 @@ +#include <unistd.h> +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> +#include <errno.h> +#include <limits.h> +#include <arpa/inet.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <linux/route.h> +#include "linux_ppp.h" + +#include "triton.h" +#include "events.h" +#include "ppp.h" +#include "ipdb.h" +#include "log.h" +#include "backup.h" + +// from /usr/include/linux/ipv6.h +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +static void devconf(struct ap_session *ses, const char *attr, const char *val) +{ + int fd; + char fname[PATH_MAX]; + + sprintf(fname, "/proc/sys/net/ipv6/conf/%s/%s", ses->ifname, attr); + fd = open(fname, O_WRONLY); + if (!fd) { + log_ppp_error("failed to open '%s': %s\n", fname, strerror(errno)); + return; + } + + write(fd, val, strlen(val)); + + close(fd); +} + +static void build_addr(struct ipv6db_addr_t *a, uint64_t intf_id, struct in6_addr *addr) +{ + memcpy(addr, &a->addr, sizeof(*addr)); + + if (a->prefix_len <= 64) + *(uint64_t *)(addr->s6_addr + 8) = intf_id; + else + *(uint64_t *)(addr->s6_addr + 8) |= intf_id & ((1 << (128 - a->prefix_len)) - 1); +} + +void ap_session_ifup(struct ap_session *ses) +{ + struct ipv6db_addr_t *a; + struct ifreq ifr; + //struct rtentry rt; + struct in6_ifreq ifr6; + struct npioctl np; + struct sockaddr_in addr; + struct ppp_t *ppp; + + triton_event_fire(EV_SES_ACCT_START, ses); + if (ses->stop_time) + return; + + triton_event_fire(EV_SES_PRE_UP, ses); + if (ses->stop_time) + return; + + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, ses->ifname); + +#ifdef USE_BACKUP + if (!ses->backup || !ses->backup->internal) { +#endif + if (ses->ipv4) { + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = ses->ipv4->addr; + memcpy(&ifr.ifr_addr, &addr, sizeof(addr)); + + if (ioctl(sock_fd, SIOCSIFADDR, &ifr)) + log_ppp_error("failed to set IPv4 address: %s\n", strerror(errno)); + + /*if (ses->ctrl->type == CTRL_TYPE_IPOE) { + addr.sin_addr.s_addr = 0xffffffff; + memcpy(&ifr.ifr_netmask, &addr, sizeof(addr)); + if (ioctl(sock_fd, SIOCSIFNETMASK, &ifr)) + log_ppp_error("failed to set IPv4 nask: %s\n", strerror(errno)); + }*/ + + addr.sin_addr.s_addr = ses->ipv4->peer_addr; + + /*if (ses->ctrl->type == CTRL_TYPE_IPOE) { + memset(&rt, 0, sizeof(rt)); + memcpy(&rt.rt_dst, &addr, sizeof(addr)); + rt.rt_flags = RTF_HOST | RTF_UP; + rt.rt_metric = 1; + rt.rt_dev = ifr.ifr_name; + if (ioctl(sock_fd, SIOCADDRT, &rt, sizeof(rt))) + log_ppp_error("failed to add route: %s\n", strerror(errno)); + } else*/ { + memcpy(&ifr.ifr_dstaddr, &addr, sizeof(addr)); + + if (ioctl(sock_fd, SIOCSIFDSTADDR, &ifr)) + log_ppp_error("failed to set peer IPv4 address: %s\n", strerror(errno)); + } + } + + if (ses->ipv6) { + devconf(ses, "accept_ra", "0"); + devconf(ses, "autoconf", "0"); + devconf(ses, "forwarding", "1"); + + memset(&ifr6, 0, sizeof(ifr6)); + + if (ses->ctrl->type != CTRL_TYPE_IPOE) { + ifr6.ifr6_addr.s6_addr32[0] = htons(0xfe80); + *(uint64_t *)(ifr6.ifr6_addr.s6_addr + 8) = ses->ipv6->intf_id; + ifr6.ifr6_prefixlen = 64; + ifr6.ifr6_ifindex = ses->ifindex; + + if (ioctl(sock6_fd, SIOCSIFADDR, &ifr6)) + log_ppp_error("faild to set LL IPv6 address: %s\n", strerror(errno)); + } + + list_for_each_entry(a, &ses->ipv6->addr_list, entry) { + if (a->prefix_len == 128) + continue; + + build_addr(a, ses->ipv6->intf_id, &ifr6.ifr6_addr); + ifr6.ifr6_prefixlen = a->prefix_len; + + if (ioctl(sock6_fd, SIOCSIFADDR, &ifr6)) + log_ppp_error("failed to add IPv6 address: %s\n", strerror(errno)); + } + } + + if (ioctl(sock_fd, SIOCGIFFLAGS, &ifr)) + log_ppp_error("failed to get interface flags: %s\n", strerror(errno)); + + ifr.ifr_flags |= IFF_UP; + + if (ioctl(sock_fd, SIOCSIFFLAGS, &ifr)) + log_ppp_error("failed to set interface flags: %s\n", strerror(errno)); + + if (ses->ctrl->type != CTRL_TYPE_IPOE) { + ppp = container_of(ses, typeof(*ppp), ses); + if (ses->ipv4) { + np.protocol = PPP_IP; + np.mode = NPMODE_PASS; + + if (ioctl(ppp->unit_fd, PPPIOCSNPMODE, &np)) + log_ppp_error("failed to set NP (IPv4) mode: %s\n", strerror(errno)); + } + + if (ses->ipv6) { + np.protocol = PPP_IPV6; + np.mode = NPMODE_PASS; + + if (ioctl(ppp->unit_fd, PPPIOCSNPMODE, &np)) + log_ppp_error("failed to set NP (IPv6) mode: %s\n", strerror(errno)); + } + } +#ifdef USE_BACKUP + } +#endif + + ses->ctrl->started(ses); + + triton_event_fire(EV_SES_STARTED, ses); +} + +void __export ap_session_ifdown(struct ap_session *ses) +{ + struct ifreq ifr; + struct sockaddr_in addr; + struct in6_ifreq ifr6; + struct ipv6db_addr_t *a; + + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, ses->ifname); + + ioctl(sock_fd, SIOCSIFFLAGS, &ifr); + + if (ses->ipv4) { + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + memcpy(&ifr.ifr_addr,&addr,sizeof(addr)); + ioctl(sock_fd, SIOCSIFADDR, &ifr); + } + + if (ses->ipv6) { + memset(&ifr6, 0, sizeof(ifr6)); + ifr6.ifr6_addr.s6_addr32[0] = htons(0xfe80); + *(uint64_t *)(ifr6.ifr6_addr.s6_addr + 8) = ses->ipv6->intf_id; + ifr6.ifr6_prefixlen = 64; + ifr6.ifr6_ifindex = ses->ifindex; + + ioctl(sock6_fd, SIOCDIFADDR, &ifr6); + + list_for_each_entry(a, &ses->ipv6->addr_list, entry) { + if (a->prefix_len == 128) + continue; + + build_addr(a, ses->ipv6->intf_id, &ifr6.ifr6_addr); + ifr6.ifr6_prefixlen = a->prefix_len; + + ioctl(sock6_fd, SIOCDIFADDR, &ifr6); + } + } +} + diff --git a/accel-pppd/include/ap_session.h b/accel-pppd/include/ap_session.h new file mode 100644 index 00000000..ab6f699c --- /dev/null +++ b/accel-pppd/include/ap_session.h @@ -0,0 +1,114 @@ +#ifndef __AP_SESSION_H__ +#define __AP_SESSION_H__ + +#define AP_SESSIONID_LEN 16 +#define AP_IFNAME_LEN 16 + +#define AP_STATE_STARTING 1 +#define AP_STATE_ACTIVE 2 +#define AP_STATE_FINISHING 3 +#define AP_STATE_RESTORE 4 + +#define TERM_USER_REQUEST 1 +#define TERM_SESSION_TIMEOUT 2 +#define TERM_ADMIN_RESET 3 +#define TERM_USER_ERROR 4 +#define TERM_NAS_ERROR 5 +#define TERM_NAS_REQUEST 6 +#define TERM_NAS_REBOOT 7 +#define TERM_AUTH_ERROR 8 +#define TERM_LOST_CARRIER 9 + +#define CTRL_TYPE_PPTP 1 +#define CTRL_TYPE_L2TP 2 +#define CTRL_TYPE_PPPOE 3 +#define CTRL_TYPE_IPOE 4 + +#define MPPE_UNSET -2 +#define MPPE_ALLOW -1 +#define MPPE_DENY 0 +#define MPPE_PREFER 1 +#define MPPE_REQUIRE 2 + +struct ap_session; +struct backup_data; + +struct ap_ctrl +{ + struct triton_context_t *ctx; + int type; + const char *name; + int max_mtu; + int mppe; + char *calling_station_id; + char *called_station_id; + void (*started)(struct ap_session*); + void (*finished)(struct ap_session *); + void (*terminate)(struct ap_session *, int hard); +}; + +struct ap_private +{ + struct list_head entry; + void *key; +}; + +struct ap_session +{ + struct list_head entry; + + int state; + char *chan_name; + char ifname[AP_IFNAME_LEN]; + int unit_idx; + int ifindex; + char sessionid[AP_SESSIONID_LEN+1]; + time_t start_time; + time_t stop_time; + char *username; + struct ipv4db_item_t *ipv4; + struct ipv6db_item_t *ipv6; + char *ipv4_pool_name; + char *ipv6_pool_name; + + struct ap_ctrl *ctrl; + +#ifdef USE_BACKUP + struct backup_data *backup; +#endif + + int terminating:1; + int terminated:1; + int terminate_cause; + + struct list_head pd_list; +}; + +struct ap_session_stat +{ + unsigned int active; + unsigned int starting; + unsigned int finishing; +}; + + +extern pthread_rwlock_t ses_lock; +extern struct list_head ses_list; +extern int ap_shutdown; +extern int sock_fd; // internet socket for ioctls +extern int sock6_fd; // internet socket for ioctls +extern int urandom_fd; +extern struct ap_session_stat ap_session_stat; + +void ap_session_init(struct ap_session *ses); +int ap_session_starting(struct ap_session *ses); +void ap_session_finished(struct ap_session *ses); +void ap_session_terminate(struct ap_session *ses, int cause, int hard); +void ap_session_activate(struct ap_session *ses); + +void ap_session_ifup(struct ap_session *ses); +void ap_session_ifdown(struct ap_session *ses); + +void ap_shutdown_soft(void (*cb)(void)); + +#endif diff --git a/accel-pppd/include/ap_session_backup.h b/accel-pppd/include/ap_session_backup.h new file mode 100644 index 00000000..e6382ff5 --- /dev/null +++ b/accel-pppd/include/ap_session_backup.h @@ -0,0 +1,23 @@ +#ifndef __AP_SESSION_BACKUP_H +#define __AP_SESSION_BACKUP_H + +#define SES_TAG_USERNAME 1 +#define SES_TAG_SESSIONID 2 +#define SES_TAG_START_TIME 3 +#define SES_TAG_IPV4_ADDR 4 +#define SES_TAG_IPV4_PEER_ADDR 5 +#define SES_TAG_IPV6_INTFID 6 +#define SES_TAG_IPV6_PEER_INTFID 7 +#define SES_TAG_IPV6_ADDR 8 +#define SES_TAG_IFINDEX 9 +#define SES_TAG_IFNAME 10 + + +struct ses_tag_ipv6 +{ + struct in6_addr addr; + uint8_t prefix_len; +} __attribute__((packed)); + +#endif + diff --git a/accel-pppd/include/backup.h b/accel-pppd/include/backup.h new file mode 120000 index 00000000..0271d056 --- /dev/null +++ b/accel-pppd/include/backup.h @@ -0,0 +1 @@ +../backup/backup.h
\ No newline at end of file diff --git a/accel-pppd/include/events.h b/accel-pppd/include/events.h index 5d6fea7e..ca04d757 100644 --- a/accel-pppd/include/events.h +++ b/accel-pppd/include/events.h @@ -4,19 +4,19 @@ #include <stdint.h> #include <netinet/in.h> -#define EV_PPP_STARTING 1 -#define EV_PPP_STARTED 2 -#define EV_PPP_FINISHING 3 -#define EV_PPP_FINISHED 4 -#define EV_PPP_AUTHORIZED 5 +#define EV_SES_STARTING 1 +#define EV_SES_STARTED 2 +#define EV_SES_FINISHING 3 +#define EV_SES_FINISHED 4 +#define EV_SES_AUTHORIZED 5 #define EV_CTRL_STARTING 6 #define EV_CTRL_STARTED 7 #define EV_CTRL_FINISHED 8 -#define EV_PPP_PRE_UP 9 -#define EV_PPP_ACCT_START 10 +#define EV_SES_PRE_UP 9 +#define EV_SES_ACCT_START 10 #define EV_CONFIG_RELOAD 11 -#define EV_PPP_AUTH_FAILED 12 -#define EV_PPP_PRE_FINISHED 13 +#define EV_SES_AUTH_FAILED 12 +#define EV_SES_PRE_FINISHED 13 #define EV_IP_CHANGED 100 #define EV_SHAPER 101 #define EV_MPPE_KEYS 102 @@ -24,11 +24,12 @@ #define EV_RADIUS_ACCESS_ACCEPT 200 #define EV_RADIUS_COA 201 +struct ap_session; struct ppp_t; struct rad_packet_t; struct ev_radius_t { - struct ppp_t *ppp; + struct ap_session *ses; struct rad_packet_t *request; struct rad_packet_t *reply; int res; @@ -45,13 +46,13 @@ struct ev_mppe_keys_t struct ev_shaper_t { - struct ppp_t *ppp; + struct ap_session *ses; const char *val; }; struct ev_dns_t { - struct ppp_t *ppp; + struct ap_session *ses; in_addr_t dns1; in_addr_t dns2; }; diff --git a/accel-pppd/include/genl.h b/accel-pppd/include/genl.h new file mode 120000 index 00000000..d790809f --- /dev/null +++ b/accel-pppd/include/genl.h @@ -0,0 +1 @@ +../libnetlink/genl.h
\ No newline at end of file diff --git a/accel-pppd/include/iplink.h b/accel-pppd/include/iplink.h new file mode 120000 index 00000000..7f0f09d4 --- /dev/null +++ b/accel-pppd/include/iplink.h @@ -0,0 +1 @@ +../libnetlink/iplink.h
\ No newline at end of file diff --git a/accel-pppd/include/libnetlink.h b/accel-pppd/include/libnetlink.h new file mode 120000 index 00000000..d494ddb4 --- /dev/null +++ b/accel-pppd/include/libnetlink.h @@ -0,0 +1 @@ +../libnetlink/libnetlink.h
\ No newline at end of file diff --git a/accel-pppd/ipdb.c b/accel-pppd/ipdb.c index f87d4788..7c3ebb82 100644 --- a/accel-pppd/ipdb.c +++ b/accel-pppd/ipdb.c @@ -5,7 +5,7 @@ static LIST_HEAD(ipdb_handlers); -struct ipv4db_item_t __export *ipdb_get_ipv4(struct ppp_t *ppp) +struct ipv4db_item_t __export *ipdb_get_ipv4(struct ap_session *ses) { struct ipdb_t *ipdb; struct ipv4db_item_t *it; @@ -13,7 +13,7 @@ struct ipv4db_item_t __export *ipdb_get_ipv4(struct ppp_t *ppp) list_for_each_entry(ipdb, &ipdb_handlers, entry) { if (!ipdb->get_ipv4) continue; - it = ipdb->get_ipv4(ppp); + it = ipdb->get_ipv4(ses); if (it) return it; } @@ -21,13 +21,13 @@ struct ipv4db_item_t __export *ipdb_get_ipv4(struct ppp_t *ppp) return NULL; } -void __export ipdb_put_ipv4(struct ppp_t *ppp, struct ipv4db_item_t *it) +void __export ipdb_put_ipv4(struct ap_session *ses, struct ipv4db_item_t *it) { if (it->owner->put_ipv4) - it->owner->put_ipv4(ppp, it); + it->owner->put_ipv4(ses, it); } -struct ipv6db_item_t __export *ipdb_get_ipv6(struct ppp_t *ppp) +struct ipv6db_item_t __export *ipdb_get_ipv6(struct ap_session *ses) { struct ipdb_t *ipdb; struct ipv6db_item_t *it; @@ -35,7 +35,7 @@ struct ipv6db_item_t __export *ipdb_get_ipv6(struct ppp_t *ppp) list_for_each_entry(ipdb, &ipdb_handlers, entry) { if (!ipdb->get_ipv6) continue; - it = ipdb->get_ipv6(ppp); + it = ipdb->get_ipv6(ses); if (it) return it; } @@ -43,13 +43,13 @@ struct ipv6db_item_t __export *ipdb_get_ipv6(struct ppp_t *ppp) return NULL; } -void __export ipdb_put_ipv6(struct ppp_t *ppp, struct ipv6db_item_t *it) +void __export ipdb_put_ipv6(struct ap_session *ses, struct ipv6db_item_t *it) { if (it->owner->put_ipv6) - it->owner->put_ipv6(ppp, it); + it->owner->put_ipv6(ses, it); } -struct ipv6db_prefix_t __export *ipdb_get_ipv6_prefix(struct ppp_t *ppp) +struct ipv6db_prefix_t __export *ipdb_get_ipv6_prefix(struct ap_session *ses) { struct ipdb_t *ipdb; struct ipv6db_prefix_t *it; @@ -57,7 +57,7 @@ struct ipv6db_prefix_t __export *ipdb_get_ipv6_prefix(struct ppp_t *ppp) list_for_each_entry(ipdb, &ipdb_handlers, entry) { if (!ipdb->get_ipv6_prefix) continue; - it = ipdb->get_ipv6_prefix(ppp); + it = ipdb->get_ipv6_prefix(ses); if (it) return it; } @@ -65,10 +65,10 @@ struct ipv6db_prefix_t __export *ipdb_get_ipv6_prefix(struct ppp_t *ppp) return NULL; } -void __export ipdb_put_ipv6_prefix(struct ppp_t *ppp, struct ipv6db_prefix_t *it) +void __export ipdb_put_ipv6_prefix(struct ap_session *ses, struct ipv6db_prefix_t *it) { if (it->owner->put_ipv6_prefix) - it->owner->put_ipv6_prefix(ppp, it); + it->owner->put_ipv6_prefix(ses, it); } diff --git a/accel-pppd/ipdb.h b/accel-pppd/ipdb.h index 2781bd08..69cb12f3 100644 --- a/accel-pppd/ipdb.h +++ b/accel-pppd/ipdb.h @@ -11,6 +11,7 @@ struct ipv4db_item_t struct ipdb_t *owner; in_addr_t addr; in_addr_t peer_addr; + int mask; }; struct ipv6db_addr_t @@ -40,24 +41,24 @@ struct ipdb_t { struct list_head entry; - struct ipv4db_item_t *(*get_ipv4)(struct ppp_t *ppp); - void (*put_ipv4)(struct ppp_t *ppp, struct ipv4db_item_t *); + struct ipv4db_item_t *(*get_ipv4)(struct ap_session *ses); + void (*put_ipv4)(struct ap_session *ses, struct ipv4db_item_t *); - struct ipv6db_item_t *(*get_ipv6)(struct ppp_t *ppp); - void (*put_ipv6)(struct ppp_t *ppp, struct ipv6db_item_t *); + struct ipv6db_item_t *(*get_ipv6)(struct ap_session *ses); + void (*put_ipv6)(struct ap_session *ses, struct ipv6db_item_t *); - struct ipv6db_prefix_t *(*get_ipv6_prefix)(struct ppp_t *ppp); - void (*put_ipv6_prefix)(struct ppp_t *ppp, struct ipv6db_prefix_t *); + struct ipv6db_prefix_t *(*get_ipv6_prefix)(struct ap_session *ses); + void (*put_ipv6_prefix)(struct ap_session *ses, struct ipv6db_prefix_t *); }; -struct ipv4db_item_t *ipdb_get_ipv4(struct ppp_t *ppp); -void ipdb_put_ipv4(struct ppp_t *ppp, struct ipv4db_item_t *); +struct ipv4db_item_t *ipdb_get_ipv4(struct ap_session *ses); +void ipdb_put_ipv4(struct ap_session *ses, struct ipv4db_item_t *); -struct ipv6db_item_t *ipdb_get_ipv6(struct ppp_t *ppp); -void ipdb_put_ipv6(struct ppp_t *ppp, struct ipv6db_item_t *); +struct ipv6db_item_t *ipdb_get_ipv6(struct ap_session *ses); +void ipdb_put_ipv6(struct ap_session *ses, struct ipv6db_item_t *); -struct ipv6db_prefix_t __export *ipdb_get_ipv6_prefix(struct ppp_t *ppp); -void __export ipdb_put_ipv6_prefix(struct ppp_t *ppp, struct ipv6db_prefix_t *it); +struct ipv6db_prefix_t __export *ipdb_get_ipv6_prefix(struct ap_session *ses); +void __export ipdb_put_ipv6_prefix(struct ap_session *ses, struct ipv6db_prefix_t *it); void ipdb_register(struct ipdb_t *); diff --git a/accel-pppd/ipv6/dhcpv6.c b/accel-pppd/ipv6/dhcpv6.c index ddc977ba..93888245 100644 --- a/accel-pppd/ipv6/dhcpv6.c +++ b/accel-pppd/ipv6/dhcpv6.c @@ -42,7 +42,7 @@ static int conf_dnssl_size; struct dhcpv6_pd { - struct ppp_pd_t pd; + struct ap_private pd; struct dhcpv6_opt_clientid *clientid; uint32_t addr_iaid; uint32_t dp_iaid; @@ -56,22 +56,22 @@ static struct triton_context_t dhcpv6_ctx; static uint8_t *buf; static void *pd_key; -static void ev_ppp_started(struct ppp_t *ppp) +static void ev_ppp_started(struct ap_session *ses) { struct ipv6_mreq mreq; struct dhcpv6_pd *pd; - if (!ppp->ipv6) + if (!ses->ipv6) return; pd = _malloc(sizeof(*pd)); memset(pd, 0, sizeof(*pd)); pd->pd.key = &pd_key; - list_add_tail(&pd->pd.entry, &ppp->pd_list); + list_add_tail(&pd->pd.entry, &ses->pd_list); memset(&mreq, 0, sizeof(mreq)); - mreq.ipv6mr_interface = ppp->ifindex; + mreq.ipv6mr_interface = ses->ifindex; mreq.ipv6mr_multiaddr.s6_addr32[0] = htonl(0xff020000); mreq.ipv6mr_multiaddr.s6_addr32[3] = htonl(0x010002); @@ -81,11 +81,11 @@ static void ev_ppp_started(struct ppp_t *ppp) } } -static struct dhcpv6_pd *find_pd(struct ppp_t *ppp) +static struct dhcpv6_pd *find_pd(struct ap_session *ses) { - struct ppp_pd_t *pd; + struct ap_private *pd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) return container_of(pd, struct dhcpv6_pd, pd); } @@ -93,9 +93,9 @@ static struct dhcpv6_pd *find_pd(struct ppp_t *ppp) return NULL; } -static void ev_ppp_finished(struct ppp_t *ppp) +static void ev_ppp_finished(struct ap_session *ses) { - struct dhcpv6_pd *pd = find_pd(ppp); + struct dhcpv6_pd *pd = find_pd(ses); if (!pd) return; @@ -106,7 +106,7 @@ static void ev_ppp_finished(struct ppp_t *ppp) _free(pd->clientid); if (pd->ipv6_dp) - ipdb_put_ipv6_prefix(ppp, pd->ipv6_dp); + ipdb_put_ipv6_prefix(ses, pd->ipv6_dp); _free(pd); } @@ -119,8 +119,8 @@ static void dhcpv6_send(struct dhcpv6_packet *reply) addr.sin6_family = AF_INET6; addr.sin6_port = htons(DHCPV6_CLIENT_PORT); addr.sin6_addr.s6_addr32[0] = htons(0xfe80); - *(uint64_t *)(addr.sin6_addr.s6_addr + 8) = reply->ppp->ipv6->peer_intf_id; - addr.sin6_scope_id = reply->ppp->ifindex; + *(uint64_t *)(addr.sin6_addr.s6_addr + 8) = reply->ses->ipv6->peer_intf_id; + addr.sin6_scope_id = reply->ses->ifindex; sendto(dhcpv6_hnd.fd, reply->hdr, reply->endptr - (void *)reply->hdr, 0, (struct sockaddr *)&addr, sizeof(addr)); } @@ -135,7 +135,7 @@ static void build_addr(struct ipv6db_addr_t *a, uint64_t intf_id, struct in6_add *(uint64_t *)(addr->s6_addr + 8) |= intf_id & ((1 << (128 - a->prefix_len)) - 1); } -static void insert_dp_routes(struct ppp_t *ppp, struct dhcpv6_pd *pd) +static void insert_dp_routes(struct ap_session *ses, struct dhcpv6_pd *pd) { struct ipv6db_addr_t *a; struct ipv6db_addr_t *p; @@ -145,7 +145,7 @@ static void insert_dp_routes(struct ppp_t *ppp, struct dhcpv6_pd *pd) int err; memset(&rt6, 0, sizeof(rt6)); - rt6.rtmsg_ifindex = ppp->ifindex; + rt6.rtmsg_ifindex = ses->ifindex; rt6.rtmsg_flags = RTF_UP; list_for_each_entry(p, &pd->ipv6_dp->prefix_list, entry) { @@ -155,8 +155,8 @@ static void insert_dp_routes(struct ppp_t *ppp, struct dhcpv6_pd *pd) if (conf_route_via_gw) { rt6.rtmsg_flags |= RTF_GATEWAY; - list_for_each_entry(a, &ppp->ipv6->addr_list, entry) { - build_addr(a, ppp->ipv6->peer_intf_id, &rt6.rtmsg_gateway); + list_for_each_entry(a, &ses->ipv6->addr_list, entry) { + build_addr(a, ses->ipv6->peer_intf_id, &rt6.rtmsg_gateway); if (ioctl(sock6_fd, SIOCADDRT, &rt6)) { err = errno; inet_ntop(AF_INET6, &p->addr, str1, sizeof(str1)); @@ -252,7 +252,7 @@ static void dhcpv6_send_reply(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, i if (req->hdr->type == D6_RENEW && pd->addr_iaid != ia_na->iaid) { insert_status(reply, opt1, D6_STATUS_NoBinding); - } else if (list_empty(&req->ppp->ipv6->addr_list) || f) { + } else if (list_empty(&req->ses->ipv6->addr_list) || f) { insert_status(reply, opt1, D6_STATUS_NoAddrsAvail); } else { @@ -261,11 +261,11 @@ static void dhcpv6_send_reply(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, i f = 1; - list_for_each_entry(a, &req->ppp->ipv6->addr_list, entry) { + list_for_each_entry(a, &req->ses->ipv6->addr_list, entry) { opt2 = dhcpv6_nested_option_alloc(reply, opt1, D6_OPTION_IAADDR, sizeof(*ia_addr) - sizeof(struct dhcpv6_opt_hdr)); ia_addr = (struct dhcpv6_opt_ia_addr *)opt2->hdr; - build_addr(a, req->ppp->ipv6->peer_intf_id, &ia_addr->addr); + build_addr(a, req->ses->ipv6->peer_intf_id, &ia_addr->addr); ia_addr->pref_lifetime = htonl(conf_pref_lifetime); ia_addr->valid_lifetime = htonl(conf_valid_lifetime); @@ -279,8 +279,8 @@ static void dhcpv6_send_reply(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, i continue; f1 = 0; - list_for_each_entry(a, &req->ppp->ipv6->addr_list, entry) { - build_addr(a, req->ppp->ipv6->peer_intf_id, &addr); + list_for_each_entry(a, &req->ses->ipv6->addr_list, entry) { + build_addr(a, req->ses->ipv6->peer_intf_id, &addr); if (memcmp(&addr, &ia_addr->addr, sizeof(addr))) continue; f1 = 1; @@ -316,7 +316,7 @@ static void dhcpv6_send_reply(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, i ia_na->T2 = conf_pref_lifetime == -1 ? -1 : htonl((conf_pref_lifetime * 4) / 5); if (!pd->ipv6_dp) - pd->ipv6_dp = ipdb_get_ipv6_prefix(req->ppp); + pd->ipv6_dp = ipdb_get_ipv6_prefix(req->ses); if ((req->hdr->type == D6_RENEW) && pd->dp_iaid != ia_na->iaid) { insert_status(reply, opt1, D6_STATUS_NoBinding); @@ -327,7 +327,7 @@ static void dhcpv6_send_reply(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, i if (req->hdr->type == D6_REQUEST || req->rapid_commit) { pd->dp_iaid = ia_na->iaid; if (!pd->dp_active) - insert_dp_routes(req->ppp, pd); + insert_dp_routes(req->ses, pd); } f2 = 1; @@ -447,8 +447,8 @@ static void dhcpv6_send_reply2(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, f1 = 0; if (!f) { - list_for_each_entry(a, &req->ppp->ipv6->addr_list, entry) { - build_addr(a, req->ppp->ipv6->peer_intf_id, &addr); + list_for_each_entry(a, &req->ses->ipv6->addr_list, entry) { + build_addr(a, req->ses->ipv6->peer_intf_id, &addr); if (memcmp(&addr, &ia_addr->addr, sizeof(addr))) continue; f1 = 1; @@ -489,7 +489,7 @@ static void dhcpv6_send_reply2(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, ia_na->T2 = conf_pref_lifetime == -1 ? -1 : htonl((conf_pref_lifetime * 4) / 5); if (!pd->ipv6_dp) - pd->ipv6_dp = ipdb_get_ipv6_prefix(req->ppp); + pd->ipv6_dp = ipdb_get_ipv6_prefix(req->ses); f3 = 0; @@ -558,7 +558,7 @@ static void dhcpv6_send_reply2(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, static void dhcpv6_recv_solicit(struct dhcpv6_packet *req) { - struct dhcpv6_pd *pd = find_pd(req->ppp); + struct dhcpv6_pd *pd = find_pd(req->ses); if (!pd) return; @@ -590,7 +590,7 @@ static void dhcpv6_recv_solicit(struct dhcpv6_packet *req) static void dhcpv6_recv_request(struct dhcpv6_packet *req) { - struct dhcpv6_pd *pd = find_pd(req->ppp); + struct dhcpv6_pd *pd = find_pd(req->ses); if (!pd) return; @@ -618,7 +618,7 @@ static void dhcpv6_recv_request(struct dhcpv6_packet *req) static void dhcpv6_recv_renew(struct dhcpv6_packet *req) { - struct dhcpv6_pd *pd = find_pd(req->ppp); + struct dhcpv6_pd *pd = find_pd(req->ses); if (!pd) return; @@ -655,7 +655,7 @@ static void dhcpv6_recv_renew(struct dhcpv6_packet *req) static void dhcpv6_recv_information_request(struct dhcpv6_packet *req) { - struct dhcpv6_pd *pd = find_pd(req->ppp); + struct dhcpv6_pd *pd = find_pd(req->ses); if (req->rapid_commit) { log_ppp_error("dhcpv6: unexpected Rapid-Commit option\n"); @@ -669,7 +669,7 @@ static void dhcpv6_recv_information_request(struct dhcpv6_packet *req) static void dhcpv6_recv_rebind(struct dhcpv6_packet *req) { - struct dhcpv6_pd *pd = find_pd(req->ppp); + struct dhcpv6_pd *pd = find_pd(req->ses); if (!pd) return; @@ -747,7 +747,7 @@ static int dhcpv6_read(struct triton_md_handler_t *h) struct sockaddr_in6 addr; socklen_t len = sizeof(addr); struct dhcpv6_packet *pkt; - struct ppp_t *ppp; + struct ap_session *ses; while (1) { n = recvfrom(h->fd, buf, BUF_SIZE, 0, &addr, &len); @@ -768,26 +768,26 @@ static int dhcpv6_read(struct triton_md_handler_t *h) continue; } - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (ppp->state != PPP_STATE_ACTIVE) + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (ses->state != AP_STATE_ACTIVE) continue; - if (!ppp->ipv6) + if (!ses->ipv6) continue; - if (ppp->ifindex != addr.sin6_scope_id) + if (ses->ifindex != addr.sin6_scope_id) continue; - if (ppp->ipv6->peer_intf_id != *(uint64_t *)(addr.sin6_addr.s6_addr + 8)) + if (ses->ipv6->peer_intf_id != *(uint64_t *)(addr.sin6_addr.s6_addr + 8)) continue; - pkt->ppp = ppp; + pkt->ses = ses; - triton_context_call(ppp->ctrl->ctx, (triton_event_func)dhcpv6_recv_packet, pkt); + triton_context_call(ses->ctrl->ctx, (triton_event_func)dhcpv6_recv_packet, pkt); break; } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); } return 0; @@ -991,8 +991,8 @@ static void init(void) triton_context_wakeup(&dhcpv6_ctx); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); - triton_event_register_handler(EV_PPP_STARTED, (triton_event_func)ev_ppp_started); - triton_event_register_handler(EV_PPP_FINISHED, (triton_event_func)ev_ppp_finished); + triton_event_register_handler(EV_SES_STARTED, (triton_event_func)ev_ppp_started); + triton_event_register_handler(EV_SES_FINISHED, (triton_event_func)ev_ppp_finished); } DEFINE_INIT(10, init); diff --git a/accel-pppd/ipv6/dhcpv6.h b/accel-pppd/ipv6/dhcpv6.h index d1c0267c..4afb5fa6 100644 --- a/accel-pppd/ipv6/dhcpv6.h +++ b/accel-pppd/ipv6/dhcpv6.h @@ -167,7 +167,7 @@ struct dhcpv6_option struct ppp_t; struct dhcpv6_packet { - struct ppp_t *ppp; + struct ap_session *ses; struct dhcpv6_msg_hdr *hdr; struct dhcpv6_opt_clientid *clientid; diff --git a/accel-pppd/ipv6/dhcpv6_packet.c b/accel-pppd/ipv6/dhcpv6_packet.c index 026b6afd..71184911 100644 --- a/accel-pppd/ipv6/dhcpv6_packet.c +++ b/accel-pppd/ipv6/dhcpv6_packet.c @@ -212,7 +212,7 @@ struct dhcpv6_packet *dhcpv6_packet_alloc_reply(struct dhcpv6_packet *req, int t memset(pkt, 0, sizeof(*pkt)); INIT_LIST_HEAD(&pkt->opt_list); - pkt->ppp = req->ppp; + pkt->ses = req->ses; pkt->hdr = _malloc(BUF_SIZE); if (!pkt->hdr) { diff --git a/accel-pppd/ipv6/nd.c b/accel-pppd/ipv6/nd.c index e6040a07..79a228cb 100644 --- a/accel-pppd/ipv6/nd.c +++ b/accel-pppd/ipv6/nd.c @@ -79,8 +79,8 @@ struct nd_opt_dnssl_info_local struct ipv6_nd_handler_t { - struct ppp_t *ppp; - struct ppp_pd_t pd; + struct ap_session *ses; + struct ap_private pd; struct triton_md_handler_t hnd; struct triton_timer_t timer; int ra_sent; @@ -120,7 +120,7 @@ static void ipv6_nd_send_ra(struct ipv6_nd_handler_t *h, struct sockaddr_in6 *ad adv->nd_ra_retransmit = htonl(conf_AdvRetransTimer); pinfo = (struct nd_opt_prefix_info *)(adv + 1); - list_for_each_entry(a, &h->ppp->ipv6->addr_list, entry) { + list_for_each_entry(a, &h->ses->ipv6->addr_list, entry) { if (a->prefix_len > 64) continue; @@ -136,7 +136,7 @@ static void ipv6_nd_send_ra(struct ipv6_nd_handler_t *h, struct sockaddr_in6 *ad } /*rinfo = (struct nd_opt_route_info_local *)pinfo; - list_for_each_entry(a, &h->ppp->ipv6->route_list, entry) { + list_for_each_entry(a, &h->ses->ipv6->route_list, entry) { memset(rinfo, 0, sizeof(*rinfo)); rinfo->nd_opt_ri_type = ND_OPT_ROUTE_INFORMATION; rinfo->nd_opt_ri_len = 3; @@ -186,7 +186,7 @@ static void send_ra_timer(struct triton_timer_t *t) addr.sin6_family = AF_INET6; addr.sin6_addr.s6_addr32[0] = htonl(0xff020000); addr.sin6_addr.s6_addr32[3] = htonl(0x1); - addr.sin6_scope_id = h->ppp->ifindex; + addr.sin6_scope_id = h->ses->ifindex; if (h->ra_sent++ == conf_init_ra) { h->timer.period = conf_MaxRtrAdvInterval * 1000; @@ -233,7 +233,7 @@ static int ipv6_nd_read(struct triton_md_handler_t *_h) continue; } - /*if (*(uint64_t *)(addr.sin6_addr.s6_addr + 8) != *(uint64_t *)(h->ppp->ipv6_addr.s6_addr + 8)) { + /*if (*(uint64_t *)(addr.sin6_addr.s6_addr + 8) != *(uint64_t *)(h->ses->ipv6_addr.s6_addr + 8)) { log_ppp_warn("ipv6_nd: received icmp packet from unknown address\n"); continue; }*/ @@ -246,7 +246,7 @@ static int ipv6_nd_read(struct triton_md_handler_t *_h) return 0; } -static int ipv6_nd_start(struct ppp_t *ppp) +static int ipv6_nd_start(struct ap_session *ses) { int sock; struct icmp6_filter filter; @@ -267,8 +267,8 @@ static int ipv6_nd_start(struct ppp_t *ppp) memset(&addr, 0, sizeof(addr)); addr.sin6_family = AF_INET6; addr.sin6_addr.s6_addr32[0] = htons(0xfe80); - *(uint64_t *)(addr.sin6_addr.s6_addr + 8) = ppp->ipv6->intf_id; - addr.sin6_scope_id = ppp->ifindex; + *(uint64_t *)(addr.sin6_addr.s6_addr + 8) = ses->ipv6->intf_id; + addr.sin6_scope_id = ses->ifindex; if (bind(sock, (struct sockaddr *)&addr, sizeof(addr))) { log_ppp_error("ipv6_nd: bind: %s %i\n", strerror(errno), errno); @@ -307,7 +307,7 @@ static int ipv6_nd_start(struct ppp_t *ppp) } memset(&mreq, 0, sizeof(mreq)); - mreq.ipv6mr_interface = ppp->ifindex; + mreq.ipv6mr_interface = ses->ifindex; mreq.ipv6mr_multiaddr.s6_addr32[0] = htonl(0xff020000); mreq.ipv6mr_multiaddr.s6_addr32[3] = htonl(0x2); @@ -320,18 +320,18 @@ static int ipv6_nd_start(struct ppp_t *ppp) h = _malloc(sizeof(*h)); memset(h, 0, sizeof(*h)); - h->ppp = ppp; + h->ses = ses; h->pd.key = &pd_key; h->hnd.fd = sock; h->hnd.read = ipv6_nd_read; h->timer.expire = send_ra_timer; h->timer.period = conf_init_ra_interval * 1000; - list_add_tail(&h->pd.entry, &ppp->pd_list); + list_add_tail(&h->pd.entry, &ses->pd_list); - triton_md_register_handler(ppp->ctrl->ctx, &h->hnd); + triton_md_register_handler(ses->ctrl->ctx, &h->hnd); triton_md_enable_handler(&h->hnd, MD_MODE_READ); - triton_timer_add(ppp->ctrl->ctx, &h->timer, 0); + triton_timer_add(ses->ctrl->ctx, &h->timer, 0); return 0; @@ -340,11 +340,11 @@ out_err: return -1; } -static struct ipv6_nd_handler_t *find_pd(struct ppp_t *ppp) +static struct ipv6_nd_handler_t *find_pd(struct ap_session *ses) { - struct ppp_pd_t *pd; + struct ap_private *pd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) return container_of(pd, typeof(struct ipv6_nd_handler_t), pd); } @@ -352,17 +352,17 @@ static struct ipv6_nd_handler_t *find_pd(struct ppp_t *ppp) return NULL; } -static void ev_ppp_started(struct ppp_t *ppp) +static void ev_ses_started(struct ap_session *ses) { - if (!ppp->ipv6) + if (!ses->ipv6) return; - ipv6_nd_start(ppp); + ipv6_nd_start(ses); } -static void ev_ppp_finishing(struct ppp_t *ppp) +static void ev_ses_finishing(struct ap_session *ses) { - struct ipv6_nd_handler_t *h = find_pd(ppp); + struct ipv6_nd_handler_t *h = find_pd(ses); if (!h) return; @@ -531,8 +531,8 @@ static void init(void) load_config(); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); - triton_event_register_handler(EV_PPP_STARTED, (triton_event_func)ev_ppp_started); - triton_event_register_handler(EV_PPP_FINISHING, (triton_event_func)ev_ppp_finishing); + triton_event_register_handler(EV_SES_STARTED, (triton_event_func)ev_ses_started); + triton_event_register_handler(EV_SES_FINISHING, (triton_event_func)ev_ses_finishing); } DEFINE_INIT(5, init); diff --git a/accel-pppd/libnetlink/genl.c b/accel-pppd/libnetlink/genl.c new file mode 100644 index 00000000..7d745566 --- /dev/null +++ b/accel-pppd/libnetlink/genl.c @@ -0,0 +1,108 @@ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <syslog.h> +#include <fcntl.h> +#include <net/if_arp.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <string.h> +#include <errno.h> +#include <time.h> +#include <sys/uio.h> +#include <linux/genetlink.h> + +#include "triton.h" +#include "log.h" + +#include "libnetlink.h" + +#define GENL_MAX_FAM_GRPS 128 + +int __export genl_resolve_mcg(const char *family, const char *name, int *fam_id) +{ + struct rtnl_handle rth; + struct nlmsghdr *nlh; + struct genlmsghdr *ghdr; + struct rtattr *tb[CTRL_ATTR_MAX + 1]; + struct rtattr *tb2[GENL_MAX_FAM_GRPS + 1]; + struct rtattr *tb3[CTRL_ATTR_MCAST_GRP_MAX + 1]; + struct rtattr *attrs; + int i, len, ret = -1; + struct { + struct nlmsghdr n; + char buf[4096]; + } req; + + if (rtnl_open_byproto(&rth, 0, NETLINK_GENERIC)) { + log_error("genl: cannot open rtnetlink\n"); + return -1; + } + + nlh = &req.n; + nlh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + nlh->nlmsg_type = GENL_ID_CTRL; + + ghdr = NLMSG_DATA(&req.n); + ghdr->cmd = CTRL_CMD_GETFAMILY; + + addattr_l(nlh, 128, CTRL_ATTR_FAMILY_NAME, family, strlen(family) + 1); + + if (rtnl_talk(&rth, nlh, 0, 0, nlh, NULL, NULL, 0) < 0 ) { + log_error("genl: error talking to kernel\n"); + goto out; + } + + if (nlh->nlmsg_type != GENL_ID_CTRL) { + log_error("genl: not a controller message %d\n", nlh->nlmsg_type); + goto out; + } + + ghdr = NLMSG_DATA(nlh); + + if (ghdr->cmd != CTRL_CMD_NEWFAMILY) { + log_error("genl: unknown controller command %d\n", ghdr->cmd); + goto out; + } + + len = nlh->nlmsg_len - NLMSG_LENGTH(GENL_HDRLEN); + + if (len < 0) { + log_error("genl: wrong controller message len %d\n", len); + goto out; + } + + attrs = (struct rtattr *)((char *)ghdr + GENL_HDRLEN); + parse_rtattr(tb, CTRL_ATTR_MAX, attrs, len); + + if (!tb[CTRL_ATTR_FAMILY_ID]) { + log_error("genl: missing CTRL_FAMILY_ID attribute\n"); + goto out; + } + + if (!tb[CTRL_ATTR_MCAST_GROUPS]) + goto out; + + if (fam_id) + *fam_id = *(uint32_t *)(RTA_DATA(tb[CTRL_ATTR_FAMILY_ID])); + + parse_rtattr_nested(tb2, GENL_MAX_FAM_GRPS, tb[CTRL_ATTR_MCAST_GROUPS]); + + for (i = 1; i < GENL_MAX_FAM_GRPS; i++) { + if (tb2[i]) { + parse_rtattr_nested(tb3, CTRL_ATTR_MCAST_GRP_MAX, tb2[i]); + if (!tb3[CTRL_ATTR_MCAST_GRP_ID] || !tb3[CTRL_ATTR_MCAST_GRP_NAME]) + continue; + if (strcmp(RTA_DATA(tb3[CTRL_ATTR_MCAST_GRP_NAME]), name)) + continue; + ret = *(uint32_t *)(RTA_DATA(tb3[CTRL_ATTR_MCAST_GRP_ID])); + break; + } + } + +out: + + rtnl_close(&rth); + return ret; +} diff --git a/accel-pppd/libnetlink/genl.h b/accel-pppd/libnetlink/genl.h new file mode 100644 index 00000000..5239d869 --- /dev/null +++ b/accel-pppd/libnetlink/genl.h @@ -0,0 +1,6 @@ +#ifndef __GENL_H +#define __GENL_H + +int genl_resolve_mcg(const char *family, const char *name, int *fam_id); + +#endif diff --git a/accel-pppd/libnetlink/iplink.c b/accel-pppd/libnetlink/iplink.c new file mode 100644 index 00000000..ba3ada06 --- /dev/null +++ b/accel-pppd/libnetlink/iplink.c @@ -0,0 +1,77 @@ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <syslog.h> +#include <fcntl.h> +#include <net/if_arp.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <string.h> +#include <errno.h> +#include <time.h> +#include <sys/uio.h> + +#include "libnetlink.h" +#include "iplink.h" +#include "triton.h" +#include "log.h" + +struct arg +{ + iplink_list_func func; + void *arg; +}; + +static int store_nlmsg(const struct sockaddr_nl *who, struct nlmsghdr *n, void *arg) +{ + struct ifinfomsg *ifi = NLMSG_DATA(n); + struct rtattr *tb[IFLA_MAX + 1]; + struct arg *a = arg; + + if (n->nlmsg_type != RTM_NEWLINK) + return 0; + + if (n->nlmsg_len < NLMSG_LENGTH(sizeof(*ifi))) + return -1; + + memset(tb, 0, sizeof(tb)); + parse_rtattr(tb, IFLA_MAX, IFLA_RTA(ifi), IFLA_PAYLOAD(n)); + + if (tb[IFLA_IFNAME] == NULL) + return 0; + + //printf("%i %s\n", ifi->ifi_index, RTA_DATA(tb[IFLA_IFNAME])); + + return a->func(ifi->ifi_index, ifi->ifi_flags, RTA_DATA(tb[IFLA_IFNAME]), a->arg); +} + +int __export iplink_list(iplink_list_func func, void *arg) +{ + struct rtnl_handle rth; + struct arg a = { .func = func, .arg = arg }; + + if (rtnl_open(&rth, 0)) { + log_emerg("iplink: cannot open rtnetlink\n"); + return -1; + } + + if (rtnl_wilddump_request(&rth, AF_PACKET, RTM_GETLINK) < 0) { + log_emerg("iplink: cannot send dump request\n"); + goto out_err; + } + + if (rtnl_dump_filter(&rth, store_nlmsg, &a, NULL, NULL) < 0) { + log_emerg("iplink: dump terminated\n"); + goto out_err; + } + + rtnl_close(&rth); + + return 0; + +out_err: + rtnl_close(&rth); + + return -1; +} + diff --git a/accel-pppd/libnetlink/iplink.h b/accel-pppd/libnetlink/iplink.h new file mode 100644 index 00000000..70c7c600 --- /dev/null +++ b/accel-pppd/libnetlink/iplink.h @@ -0,0 +1,8 @@ +#ifndef __IPLINK_H +#define __IPLINK_H + +typedef int (*iplink_list_func)(int index, int flags, const char *name, void *arg); + +int iplink_list(iplink_list_func func, void *arg); + +#endif diff --git a/accel-pppd/shaper/libnetlink.c b/accel-pppd/libnetlink/libnetlink.c index 74cd5cb5..808e4339 100644 --- a/accel-pppd/shaper/libnetlink.c +++ b/accel-pppd/libnetlink/libnetlink.c @@ -26,9 +26,11 @@ #include "libnetlink.h" #include "log.h" +#define __export __attribute__((visibility("default"))) + int rcvbuf = 1024 * 1024; -void rtnl_close(struct rtnl_handle *rth) +void __export rtnl_close(struct rtnl_handle *rth) { if (rth->fd >= 0) { close(rth->fd); @@ -36,7 +38,7 @@ void rtnl_close(struct rtnl_handle *rth) } } -int rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions, +int __export rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions, int protocol) { socklen_t addr_len; @@ -85,12 +87,12 @@ int rtnl_open_byproto(struct rtnl_handle *rth, unsigned subscriptions, return 0; } -int rtnl_open(struct rtnl_handle *rth, unsigned subscriptions) +int __export rtnl_open(struct rtnl_handle *rth, unsigned subscriptions) { return rtnl_open_byproto(rth, subscriptions, NETLINK_ROUTE); } -int rtnl_wilddump_request(struct rtnl_handle *rth, int family, int type) +int __export rtnl_wilddump_request(struct rtnl_handle *rth, int family, int type) { struct { struct nlmsghdr nlh; @@ -108,12 +110,12 @@ int rtnl_wilddump_request(struct rtnl_handle *rth, int family, int type) return send(rth->fd, (void*)&req, sizeof(req), 0); } -int rtnl_send(struct rtnl_handle *rth, const char *buf, int len) +int __export rtnl_send(struct rtnl_handle *rth, const char *buf, int len) { return send(rth->fd, buf, len, 0); } -int rtnl_send_check(struct rtnl_handle *rth, const char *buf, int len) +int __export rtnl_send_check(struct rtnl_handle *rth, const char *buf, int len) { struct nlmsghdr *h; int status; @@ -146,7 +148,7 @@ int rtnl_send_check(struct rtnl_handle *rth, const char *buf, int len) return 0; } -int rtnl_dump_request(struct rtnl_handle *rth, int type, void *req, int len) +int __export rtnl_dump_request(struct rtnl_handle *rth, int type, void *req, int len) { struct nlmsghdr nlh; struct sockaddr_nl nladdr; @@ -173,7 +175,7 @@ int rtnl_dump_request(struct rtnl_handle *rth, int type, void *req, int len) return sendmsg(rth->fd, &msg, 0); } -int rtnl_dump_filter_l(struct rtnl_handle *rth, +int __export rtnl_dump_filter_l(struct rtnl_handle *rth, const struct rtnl_dump_filter_arg *arg) { struct sockaddr_nl nladdr; @@ -266,7 +268,7 @@ skip_it: } } -int rtnl_dump_filter(struct rtnl_handle *rth, +int __export rtnl_dump_filter(struct rtnl_handle *rth, rtnl_filter_t filter, void *arg1, rtnl_filter_t junk, @@ -280,7 +282,7 @@ int rtnl_dump_filter(struct rtnl_handle *rth, return rtnl_dump_filter_l(rth, a); } -int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer, +int __export rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer, unsigned groups, struct nlmsghdr *answer, rtnl_filter_t junk, void *jarg, int ignore_einval) @@ -405,7 +407,7 @@ int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer, } } -int rtnl_listen(struct rtnl_handle *rtnl, +int __export rtnl_listen(struct rtnl_handle *rtnl, rtnl_filter_t handler, void *jarg) { @@ -480,7 +482,7 @@ int rtnl_listen(struct rtnl_handle *rtnl, } } -int rtnl_from_file(FILE *rtnl, rtnl_filter_t handler, +int __export rtnl_from_file(FILE *rtnl, rtnl_filter_t handler, void *jarg) { int status; @@ -535,7 +537,7 @@ int rtnl_from_file(FILE *rtnl, rtnl_filter_t handler, } } -int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data) +int __export addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data) { int len = RTA_LENGTH(4); struct rtattr *rta; @@ -551,7 +553,7 @@ int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data) return 0; } -int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, +int __export addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen) { int len = RTA_LENGTH(alen); @@ -569,7 +571,7 @@ int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, return 0; } -int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len) +int __export addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len) { if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) { log_error("libnetlink: ""addraw_l ERROR: message exceeded bound of %d\n",maxlen); @@ -582,7 +584,7 @@ int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len) return 0; } -struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type) +struct rtattr __export *addattr_nest(struct nlmsghdr *n, int maxlen, int type) { struct rtattr *nest = NLMSG_TAIL(n); @@ -590,13 +592,13 @@ struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type) return nest; } -int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest) +int __export addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest) { nest->rta_len = (void *)NLMSG_TAIL(n) - (void *)nest; return n->nlmsg_len; } -struct rtattr *addattr_nest_compat(struct nlmsghdr *n, int maxlen, int type, +struct rtattr __export *addattr_nest_compat(struct nlmsghdr *n, int maxlen, int type, const void *data, int len) { struct rtattr *start = NLMSG_TAIL(n); @@ -606,7 +608,7 @@ struct rtattr *addattr_nest_compat(struct nlmsghdr *n, int maxlen, int type, return start; } -int addattr_nest_compat_end(struct nlmsghdr *n, struct rtattr *start) +int __export addattr_nest_compat_end(struct nlmsghdr *n, struct rtattr *start) { struct rtattr *nest = (void *)start + NLMSG_ALIGN(start->rta_len); @@ -615,7 +617,7 @@ int addattr_nest_compat_end(struct nlmsghdr *n, struct rtattr *start) return n->nlmsg_len; } -int rta_addattr32(struct rtattr *rta, int maxlen, int type, __u32 data) +int __export rta_addattr32(struct rtattr *rta, int maxlen, int type, __u32 data) { int len = RTA_LENGTH(4); struct rtattr *subrta; @@ -632,7 +634,7 @@ int rta_addattr32(struct rtattr *rta, int maxlen, int type, __u32 data) return 0; } -int rta_addattr_l(struct rtattr *rta, int maxlen, int type, +int __export rta_addattr_l(struct rtattr *rta, int maxlen, int type, const void *data, int alen) { struct rtattr *subrta; @@ -650,7 +652,7 @@ int rta_addattr_l(struct rtattr *rta, int maxlen, int type, return 0; } -int parse_rtattr(struct rtattr *tb[], int max, struct rtattr *rta, int len) +int __export parse_rtattr(struct rtattr *tb[], int max, struct rtattr *rta, int len) { memset(tb, 0, sizeof(struct rtattr *) * (max + 1)); while (RTA_OK(rta, len)) { diff --git a/accel-pppd/shaper/libnetlink.h b/accel-pppd/libnetlink/libnetlink.h index f68bf8a1..f68bf8a1 100644 --- a/accel-pppd/shaper/libnetlink.h +++ b/accel-pppd/libnetlink/libnetlink.h diff --git a/accel-pppd/log.c b/accel-pppd/log.c index 43e98a97..b19f64b4 100644 --- a/accel-pppd/log.c +++ b/accel-pppd/log.c @@ -25,8 +25,8 @@ struct log_pd_t { - struct ppp_pd_t pd; - struct ppp_t *ppp; + struct ap_private pd; + struct ap_session *ses; struct list_head msgs; struct log_msg_t *msg; int authorized:1; @@ -48,7 +48,7 @@ static mempool_t msg_pool; static mempool_t _msg_pool; static mempool_t chunk_pool; -static __thread struct ppp_t *cur_ppp; +static __thread struct ap_session *cur_ses; static __thread struct _log_msg_t *cur_msg; static __thread char *stat_buf; static pthread_key_t stat_buf_key; @@ -59,15 +59,15 @@ static FILE *debug_file; static void _log_free_msg(struct _log_msg_t *msg); static struct log_msg_t *clone_msg(struct _log_msg_t *msg); static int add_msg(struct _log_msg_t *msg, const char *buf); -//static struct log_pd_t *find_pd(struct ppp_t *ppp); -static void write_msg(FILE *f, struct _log_msg_t *msg, struct ppp_t *ppp); +//static struct log_pd_t *find_pd(struct ap_session *ses); +static void write_msg(FILE *f, struct _log_msg_t *msg, struct ap_session *ses); static void stat_buf_free(void *ptr) { _free(ptr); } -static void do_log(int level, const char *fmt, va_list ap, struct ppp_t *ppp) +static void do_log(int level, const char *fmt, va_list ap, struct ap_session *ses) { struct log_target_t *t; struct log_msg_t *m; @@ -96,13 +96,13 @@ static void do_log(int level, const char *fmt, va_list ap, struct ppp_t *ppp) return; if (debug_file) - write_msg(debug_file, cur_msg, ppp); + write_msg(debug_file, cur_msg, ses); list_for_each_entry(t, &targets, entry) { m = clone_msg(cur_msg); if (!m) break; - t->log(t, m, ppp); + t->log(t, m, ses); } out: @@ -183,7 +183,7 @@ void __export log_ppp_error(const char *fmt,...) if (log_level >= LOG_ERROR) { va_list ap; va_start(ap, fmt); - do_log(LOG_ERROR, fmt, ap, cur_ppp); + do_log(LOG_ERROR, fmt, ap, cur_ses); va_end(ap); } } @@ -193,7 +193,7 @@ void __export log_ppp_warn(const char *fmt,...) if (log_level >= LOG_WARN) { va_list ap; va_start(ap, fmt); - do_log(LOG_WARN, fmt, ap, cur_ppp); + do_log(LOG_WARN, fmt, ap, cur_ses); va_end(ap); } } @@ -203,7 +203,7 @@ void __export log_ppp_info1(const char *fmt,...) if (log_level >= LOG_INFO1) { va_list ap; va_start(ap, fmt); - do_log(LOG_INFO1, fmt, ap, cur_ppp); + do_log(LOG_INFO1, fmt, ap, cur_ses); va_end(ap); } } @@ -213,7 +213,7 @@ void __export log_ppp_info2(const char *fmt,...) if (log_level >= LOG_INFO2) { va_list ap; va_start(ap, fmt); - do_log(LOG_INFO2, fmt, ap, cur_ppp); + do_log(LOG_INFO2, fmt, ap, cur_ses); va_end(ap); } } @@ -223,7 +223,7 @@ void __export log_ppp_debug(const char *fmt,...) if (log_level >= LOG_DEBUG) { va_list ap; va_start(ap, fmt); - do_log(LOG_DEBUG, fmt, ap, cur_ppp); + do_log(LOG_DEBUG, fmt, ap, cur_ses); va_end(ap); } } @@ -232,7 +232,7 @@ void __export log_ppp_msg(const char *fmt,...) { va_list ap; va_start(ap, fmt); - do_log(LOG_MSG, fmt, ap, cur_ppp); + do_log(LOG_MSG, fmt, ap, cur_ses); va_end(ap); } @@ -326,12 +326,12 @@ static int add_msg(struct _log_msg_t *msg, const char *buf) return 0; } -static void write_msg(FILE *f, struct _log_msg_t *msg, struct ppp_t *ppp) +static void write_msg(FILE *f, struct _log_msg_t *msg, struct ap_session *ses) { struct log_chunk_t *chunk; - if (ppp) - sprintf(stat_buf,"%s: %s: ", ppp->ifname, ppp->sessionid); + if (ses) + sprintf(stat_buf,"%s: %s: ", ses->ifname, ses->sessionid); else stat_buf[0] = 0; @@ -342,12 +342,12 @@ static void write_msg(FILE *f, struct _log_msg_t *msg, struct ppp_t *ppp) fflush(f); } -/*static struct log_pd_t *find_pd(struct ppp_t *ppp) +/*static struct log_pd_t *find_pd(struct ap_session *ses) { - struct ppp_pd_t *pd; + struct ap_private *pd; struct log_pd_t *lpd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) { lpd = container_of(pd, typeof(*lpd), pd); return lpd; @@ -357,7 +357,7 @@ static void write_msg(FILE *f, struct _log_msg_t *msg, struct ppp_t *ppp) abort(); } -static void ev_ctrl_starting(struct ppp_t *ppp) +static void ev_ctrl_starting(struct ap_session *ses) { struct log_pd_t *lpd = _malloc(sizeof(*lpd)); if (!lpd) { @@ -369,10 +369,10 @@ static void ev_ctrl_starting(struct ppp_t *ppp) lpd->pd.key = &pd_key; lpd->ppp = ppp; INIT_LIST_HEAD(&lpd->msgs); - list_add_tail(&lpd->pd.entry, &ppp->pd_list); + list_add_tail(&lpd->pd.entry, &ses->pd_list); } -static void ev_ctrl_finished(struct ppp_t *ppp) +static void ev_ctrl_finished(struct ap_session *ses) { struct log_pd_t *lpd = find_pd(ppp); struct _log_msg_t *msg; @@ -414,7 +414,7 @@ static void ev_ctrl_finished(struct ppp_t *ppp) _free(lpd); } -static void ev_ppp_authorized(struct ppp_t *ppp) +static void ev_ppp_authorized(struct ap_session *ses) { struct log_pd_t *lpd = find_pd(ppp); struct _log_msg_t *msg; @@ -446,7 +446,7 @@ static void ev_ppp_authorized(struct ppp_t *ppp) void __export log_switch(struct triton_context_t *ctx, void *arg) { - cur_ppp = (struct ppp_t *)arg; + cur_ses = (struct ap_session *)arg; } diff --git a/accel-pppd/log.h b/accel-pppd/log.h index 58461465..2ff2ac80 100644 --- a/accel-pppd/log.h +++ b/accel-pppd/log.h @@ -8,7 +8,7 @@ #define LOG_MAX_SIZE 4096 #define LOG_CHUNK_SIZE 128 -struct ppp_t; +struct ap_session; struct triton_context_t; struct log_msg_t @@ -33,7 +33,7 @@ struct log_target_t { struct list_head entry; - void (*log)(struct log_target_t *, struct log_msg_t *, struct ppp_t *ppp); + void (*log)(struct log_target_t *, struct log_msg_t *, struct ap_session *ses); void (*reopen)(void); }; diff --git a/accel-pppd/logs/log_file.c b/accel-pppd/logs/log_file.c index 48f43510..1da3ee3f 100644 --- a/accel-pppd/logs/log_file.c +++ b/accel-pppd/logs/log_file.c @@ -42,14 +42,14 @@ struct log_file_t struct log_file_pd_t { - struct ppp_pd_t pd; + struct ap_private pd; struct log_file_t lf; unsigned long tmp; }; struct fail_log_pd_t { - struct ppp_pd_t pd; + struct ap_private pd; struct list_head msgs; }; @@ -285,7 +285,7 @@ static void queue_log_list(struct log_file_t *lf, struct list_head *l) } -static void set_hdr(struct log_msg_t *msg, struct ppp_t *ppp) +static void set_hdr(struct log_msg_t *msg, struct ap_session *ses) { struct tm tm; char timestamp[32]; @@ -295,28 +295,28 @@ static void set_hdr(struct log_msg_t *msg, struct ppp_t *ppp) strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", &tm); sprintf(msg->hdr->msg, "%s[%s]: %s: %s%s%s", conf_color ? level_color[msg->level] : "", timestamp, level_name[msg->level], - ppp ? ppp->ifname : "", - ppp ? ": " : "", + ses ? ses->ifname : "", + ses ? ": " : "", conf_color ? NORMAL_COLOR : ""); msg->hdr->len = strlen(msg->hdr->msg); } -static void general_log(struct log_target_t *t, struct log_msg_t *msg, struct ppp_t *ppp) +static void general_log(struct log_target_t *t, struct log_msg_t *msg, struct ap_session *ses) { - if (ppp && !conf_copy) { + if (ses && !conf_copy) { log_free_msg(msg); return; } - set_hdr(msg, ppp); + set_hdr(msg, ses); queue_log(log_file, msg); } -static struct ppp_pd_t *find_pd(struct ppp_t *ppp, void *pd_key) +static struct ap_private *find_pd(struct ap_session *ses, void *pd_key) { - struct ppp_pd_t *pd; + struct ap_private *pd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == pd_key) { return pd; } @@ -325,9 +325,9 @@ static struct ppp_pd_t *find_pd(struct ppp_t *ppp, void *pd_key) return NULL; } -static struct log_file_pd_t *find_lpd(struct ppp_t *ppp, void *pd_key) +static struct log_file_pd_t *find_lpd(struct ap_session *ses, void *pd_key) { - struct ppp_pd_t *pd = find_pd(ppp, pd_key); + struct ap_private *pd = find_pd(ses, pd_key); if (!pd) return NULL; @@ -335,9 +335,9 @@ static struct log_file_pd_t *find_lpd(struct ppp_t *ppp, void *pd_key) return container_of(pd, struct log_file_pd_t, pd); } -static struct fail_log_pd_t *find_fpd(struct ppp_t *ppp, void *pd_key) +static struct fail_log_pd_t *find_fpd(struct ap_session *ses, void *pd_key) { - struct ppp_pd_t *pd = find_pd(ppp, pd_key); + struct ap_private *pd = find_pd(ses, pd_key); if (!pd) return NULL; @@ -346,63 +346,63 @@ static struct fail_log_pd_t *find_fpd(struct ppp_t *ppp, void *pd_key) } -static void per_user_log(struct log_target_t *t, struct log_msg_t *msg, struct ppp_t *ppp) +static void per_user_log(struct log_target_t *t, struct log_msg_t *msg, struct ap_session *ses) { struct log_file_pd_t *lpd; - if (!ppp) { + if (!ses) { log_free_msg(msg); return; } - lpd = find_lpd(ppp, &pd_key1); + lpd = find_lpd(ses, &pd_key1); if (!lpd) { log_free_msg(msg); return; } - set_hdr(msg, ppp); + set_hdr(msg, ses); queue_log(&lpd->lf, msg); } -static void per_session_log(struct log_target_t *t, struct log_msg_t *msg, struct ppp_t *ppp) +static void per_session_log(struct log_target_t *t, struct log_msg_t *msg, struct ap_session *ses) { struct log_file_pd_t *lpd; - if (!ppp) { + if (!ses) { log_free_msg(msg); return; } - lpd = find_lpd(ppp, &pd_key2); + lpd = find_lpd(ses, &pd_key2); if (!lpd) { log_free_msg(msg); return; } - set_hdr(msg, ppp); + set_hdr(msg, ses); queue_log(&lpd->lf, msg); } -static void fail_log(struct log_target_t *t, struct log_msg_t *msg, struct ppp_t *ppp) +static void fail_log(struct log_target_t *t, struct log_msg_t *msg, struct ap_session *ses) { struct fail_log_pd_t *fpd; - if (!ppp || !conf_fail_log) { + if (!ses || !conf_fail_log) { log_free_msg(msg); return; } - fpd = find_fpd(ppp, &pd_key3); + fpd = find_fpd(ses, &pd_key3); if (!fpd) { log_free_msg(msg); return; } - set_hdr(msg, ppp); + set_hdr(msg, ses); list_add_tail(&msg->entry, &fpd->msgs); } @@ -453,12 +453,12 @@ static void free_lpd(struct log_file_pd_t *lpd) } } -static void ev_ppp_authorized2(struct ppp_t *ppp) +static void ev_ses_authorized2(struct ap_session *ses) { struct fail_log_pd_t *fpd; struct log_msg_t *msg; - fpd = find_fpd(ppp, &pd_key3); + fpd = find_fpd(ses, &pd_key3); if (!fpd) return; @@ -472,12 +472,12 @@ static void ev_ppp_authorized2(struct ppp_t *ppp) mempool_free(fpd); } -static void ev_ppp_authorized1(struct ppp_t *ppp) +static void ev_ses_authorized1(struct ap_session *ses) { struct log_file_pd_t *lpd; char *fname; - lpd = find_lpd(ppp, &pd_key1); + lpd = find_lpd(ses, &pd_key1); if (!lpd) return; @@ -489,14 +489,14 @@ static void ev_ppp_authorized1(struct ppp_t *ppp) strcpy(fname, conf_per_user_dir); strcat(fname, "/"); - strcat(fname, ppp->username); + strcat(fname, ses->username); if (conf_per_session) { if (mkdir(fname, S_IRWXU) && errno != EEXIST) { log_emerg("log_file: mkdir '%s': %s'\n", fname, strerror(errno)); goto out_err; } strcat(fname, "/"); - strcat(fname, ppp->sessionid); + strcat(fname, ses->sessionid); } strcat(fname, ".log"); @@ -518,7 +518,7 @@ out_err: free_lpd(lpd); } -static void ev_ctrl_started(struct ppp_t *ppp) +static void ev_ctrl_started(struct ap_session *ses) { struct log_file_pd_t *lpd; struct fail_log_pd_t *fpd; @@ -534,7 +534,7 @@ static void ev_ctrl_started(struct ppp_t *ppp) lpd->pd.key = &pd_key1; log_file_init(&lpd->lf); lpd->lf.lpd = lpd; - list_add_tail(&lpd->pd.entry, &ppp->pd_list); + list_add_tail(&lpd->pd.entry, &ses->pd_list); } if (conf_per_session_dir) { @@ -568,7 +568,7 @@ static void ev_ctrl_started(struct ppp_t *ppp) _free(fname); - list_add_tail(&lpd->pd.entry, &ppp->pd_list); + list_add_tail(&lpd->pd.entry, &ses->pd_list); } if (conf_fail_log) { @@ -579,29 +579,29 @@ static void ev_ctrl_started(struct ppp_t *ppp) } memset(fpd, 0, sizeof(*fpd)); fpd->pd.key = &pd_key3; - list_add_tail(&fpd->pd.entry, &ppp->pd_list); + list_add_tail(&fpd->pd.entry, &ses->pd_list); INIT_LIST_HEAD(&fpd->msgs); } } -static void ev_ctrl_finished(struct ppp_t *ppp) +static void ev_ctrl_finished(struct ap_session *ses) { struct log_file_pd_t *lpd; struct fail_log_pd_t *fpd; char *fname; - fpd = find_fpd(ppp, &pd_key3); + fpd = find_fpd(ses, &pd_key3); if (fpd) { queue_log_list(fail_log_file, &fpd->msgs); list_del(&fpd->pd.entry); mempool_free(fpd); } - lpd = find_lpd(ppp, &pd_key1); + lpd = find_lpd(ses, &pd_key1); if (lpd) free_lpd(lpd); - lpd = find_lpd(ppp, &pd_key2); + lpd = find_lpd(ses, &pd_key2); if (lpd) { if (lpd->tmp) { fname = _malloc(PATH_MAX); @@ -619,12 +619,12 @@ static void ev_ctrl_finished(struct ppp_t *ppp) } } -static void ev_ppp_starting(struct ppp_t *ppp) +static void ev_ses_starting(struct ap_session *ses) { struct log_file_pd_t *lpd; char *fname1, *fname2; - lpd = find_lpd(ppp, &pd_key2); + lpd = find_lpd(ses, &pd_key2); if (!lpd) return; @@ -647,7 +647,7 @@ static void ev_ppp_starting(struct ppp_t *ppp) strcpy(fname2, conf_per_session_dir); strcat(fname2, "/"); - strcat(fname2, ppp->sessionid); + strcat(fname2, ses->sessionid); strcat(fname2, ".log"); if (rename(fname1, fname2)) @@ -753,17 +753,17 @@ static void init(void) if (conf_per_user_dir) { log_register_target(&per_user_target); - triton_event_register_handler(EV_PPP_AUTHORIZED, (triton_event_func)ev_ppp_authorized1); + triton_event_register_handler(EV_SES_AUTHORIZED, (triton_event_func)ev_ses_authorized1); } if (conf_per_session_dir) { log_register_target(&per_session_target); - triton_event_register_handler(EV_PPP_STARTING, (triton_event_func)ev_ppp_starting); + triton_event_register_handler(EV_SES_STARTING, (triton_event_func)ev_ses_starting); } if (conf_fail_log) { log_register_target(&fail_log_target); - triton_event_register_handler(EV_PPP_AUTHORIZED, (triton_event_func)ev_ppp_authorized2); + triton_event_register_handler(EV_SES_AUTHORIZED, (triton_event_func)ev_ses_authorized2); } triton_event_register_handler(EV_CTRL_STARTED, (triton_event_func)ev_ctrl_started); diff --git a/accel-pppd/logs/log_syslog.c b/accel-pppd/logs/log_syslog.c index 5a922f57..cbab5252 100644 --- a/accel-pppd/logs/log_syslog.c +++ b/accel-pppd/logs/log_syslog.c @@ -50,10 +50,10 @@ static void unpack_msg(struct log_msg_t *msg) log_buf[0] = 0; } -static void set_hdr(struct log_msg_t *msg, struct ppp_t *ppp) +static void set_hdr(struct log_msg_t *msg, struct ap_session *ses) { - if (ppp) { - if (snprintf(msg->hdr->msg, LOG_CHUNK_SIZE, "%s:%s: ", ppp->ifname, ppp->username ? ppp->username : "")) + if (ses) { + if (snprintf(msg->hdr->msg, LOG_CHUNK_SIZE, "%s:%s: ", ses->ifname, ses->username ? ses->username : "")) strcpy(msg->hdr->msg + LOG_CHUNK_SIZE - 3, ": "); } else msg->hdr->msg[0] = 0; @@ -104,9 +104,9 @@ static void queue_log(struct log_msg_t *msg) } -static void general_log(struct log_target_t *t, struct log_msg_t *msg, struct ppp_t *ppp) +static void general_log(struct log_target_t *t, struct log_msg_t *msg, struct ap_session *ses) { - set_hdr(msg, ppp); + set_hdr(msg, ses); if (syslog_ctx.tpd) queue_log(msg); diff --git a/accel-pppd/logs/log_tcp.c b/accel-pppd/logs/log_tcp.c index 3f76ff8a..b89b7d65 100644 --- a/accel-pppd/logs/log_tcp.c +++ b/accel-pppd/logs/log_tcp.c @@ -130,7 +130,7 @@ static void queue_log(struct tcp_target_t *t, struct log_msg_t *msg) } } -static void set_hdr(struct log_msg_t *msg, struct ppp_t *ppp) +static void set_hdr(struct log_msg_t *msg, struct ap_session *ses) { struct tm tm; char timestamp[32]; @@ -138,15 +138,15 @@ static void set_hdr(struct log_msg_t *msg, struct ppp_t *ppp) localtime_r(&msg->timestamp.tv_sec, &tm); strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", &tm); - sprintf(msg->hdr->msg, "[%s]: %s: %s: ", timestamp, level_name[msg->level], ppp ? ppp->ifname : ""); + sprintf(msg->hdr->msg, "[%s]: %s: %s: ", timestamp, level_name[msg->level], ses ? ses->ifname : ""); msg->hdr->len = strlen(msg->hdr->msg); } -static void general_log(struct log_target_t *lt, struct log_msg_t *msg, struct ppp_t *ppp) +static void general_log(struct log_target_t *lt, struct log_msg_t *msg, struct ap_session *ses) { struct tcp_target_t *t = container_of(lt, typeof(*t), target); - set_hdr(msg, ppp); + set_hdr(msg, ses); queue_log(t, msg); } diff --git a/accel-pppd/main.c b/accel-pppd/main.c index e153caf4..14180605 100644 --- a/accel-pppd/main.c +++ b/accel-pppd/main.c @@ -4,6 +4,11 @@ #include <stdlib.h> #include <stdio.h> #include <errno.h> +#include <fcntl.h> +#include <time.h> +#include <limits.h> +#include <malloc.h> +#include <dirent.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/resource.h> @@ -13,9 +18,16 @@ #include "memdebug.h" #include "log.h" #include "events.h" +#include "backup.h" + +#ifndef ARG_MAX +#define ARG_MAX 128*1024 +#endif static char *pid_file; static char *conf_file; +static char *conf_dump; +static sigset_t orig_set; static void change_limits(void) { @@ -50,15 +62,150 @@ static void config_reload_notify(int r) if (!r) triton_event_fire(EV_CONFIG_RELOAD, NULL); } + static void config_reload(int num) { triton_conf_reload(config_reload_notify); } +/*static void close_all_fd(void) +{ + DIR *dirp; + struct dirent ent, *res; + char path[128]; + int fd; + + sprintf(path, "/proc/%u/fd", getpid()); + + dirp = opendir(path); + if (!dirp) + return; + + while (1) { + if (readdir_r(dirp, &ent, &res)) + return; + if (!res) + break; + + fd = atol(ent.d_name); + if (fd > 2) + close(fd); + } + + closedir(dirp); +}*/ + +void core_restart(int soft) +{ + char fname[128]; + int fd, n, f = 0; + char cmdline[ARG_MAX]; + char exe[PATH_MAX]; + char *argv[16]; + char *ptr = cmdline, *endptr; + + if (fork()) { + //close_all_fd(); + return; + } + + pthread_sigmask(SIG_SETMASK, &orig_set, NULL); + + sprintf(fname, "/proc/%i/cmdline", getpid()); + + fd = open(fname, O_RDONLY); + n = read(fd, cmdline, ARG_MAX); + + endptr = ptr + n; + + n = 0; + while (ptr < endptr && n < 14) { + if (strcmp(ptr, "--internal")) + argv[n++] = ptr; + else if (soft) { + f = 1; + argv[n++] = ptr; + } + + while (ptr < endptr && *ptr++); + } + +#ifdef USE_BACKUP + if (soft) + backup_restore_fd(); +#endif + + sprintf(exe, "/proc/%u/exe", getpid()); + readlink(exe, exe, PATH_MAX); + + if (!f) + argv[n++] = "--internal"; + + argv[n++] = NULL; + + while (1) { + execv(exe, argv); + sleep(3); + } +} + +static void sigsegv(int num) +{ + char cmd[PATH_MAX]; + char fname[128]; + char exec_file[PATH_MAX]; + struct rlimit lim; + + pthread_sigmask(SIG_SETMASK, &orig_set, NULL); + + if (conf_dump) { + FILE *f; + unsigned int t = time(NULL); + + chdir(conf_dump); + + sprintf(fname, "cmd-%u", t); + f = fopen(fname, "w"); + if (!f) + goto out; + fprintf(f, "thread apply all bt full\ndetach\nquit\n"); + fclose(f); + + sprintf(exec_file, "/proc/%u/exe", getpid()); + readlink(exec_file, exec_file, PATH_MAX); + + sprintf(cmd, "gdb -x %s %s %d > dump-%u", fname, exec_file, getpid(), t); + + system(cmd); + + unlink(fname); + } + +out: +#ifdef USE_BACKUP + core_restart(1); +#else + core_restart(0); +#endif + + if (conf_dump) { + lim.rlim_cur = RLIM_INFINITY; + lim.rlim_max = RLIM_INFINITY; + + setrlimit(RLIMIT_CORE, &lim); + } + + abort(); +} + int main(int argc, char **argv) { sigset_t set; - int i, sig, goto_daemon = 0; + int i, sig, goto_daemon = 0, len; + pid_t pid = 0; + struct sigaction sa; + int pagesize = sysconf(_SC_PAGE_SIZE); + int internal = 0; if (argc < 2) goto usage; @@ -74,16 +221,29 @@ int main(int argc, char **argv) if (i == argc - 1) goto usage; conf_file = argv[++i]; - } + } else if (!strcmp(argv[i], "--dump")) { + if (i == argc - 1) + goto usage; + len = (strlen(argv[i + 1]) / pagesize + 1) * pagesize; + conf_dump = memalign(pagesize, len); + strcpy(conf_dump, argv[++i]); + mprotect(conf_dump, len, PROT_READ); + } else if (!strcmp(argv[i], "--internal")) + internal = 1; } if (!conf_file) goto usage; - + + if (internal) { + while (getppid() != 1) + sleep(1); + } + if (triton_init(conf_file)) _exit(EXIT_FAILURE); - if (goto_daemon) { + if (goto_daemon && pid != getpid()) { /*pid_t pid = fork(); if (pid > 0) _exit(EXIT_SUCCESS); @@ -121,15 +281,19 @@ int main(int argc, char **argv) triton_run(); - sigfillset(&set); - struct sigaction sa = { - .sa_handler = config_reload, - .sa_mask = set, - }; + sigfillset(&set); + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = config_reload; + sa.sa_mask = set; sigaction(SIGUSR1, &sa, NULL); + + sa.sa_handler = sigsegv; + sa.sa_mask = set; + sigaction(SIGSEGV, &sa, NULL); + sigdelset(&set, SIGKILL); sigdelset(&set, SIGSTOP); sigdelset(&set, SIGSEGV); @@ -142,7 +306,7 @@ int main(int argc, char **argv) sigdelset(&set, SIGUSR1); sigdelset(&set, 35); sigdelset(&set, 36); - pthread_sigmask(SIG_SETMASK, &set, NULL); + pthread_sigmask(SIG_SETMASK, &set, &orig_set); sigemptyset(&set); //sigaddset(&set, SIGINT); @@ -151,7 +315,11 @@ int main(int argc, char **argv) sigaddset(&set, SIGILL); sigaddset(&set, SIGFPE); sigaddset(&set, SIGBUS); - + +#ifdef USE_BACKUP + backup_restore(internal); +#endif + sigwait(&set, &sig); log_info1("terminate, sig = %i\n", sig); diff --git a/accel-pppd/ppp/ccp_mppe.c b/accel-pppd/ppp/ccp_mppe.c index 388e2f6c..ae2b2cae 100644 --- a/accel-pppd/ppp/ccp_mppe.c +++ b/accel-pppd/ppp/ccp_mppe.c @@ -61,10 +61,10 @@ static struct ccp_option_t *mppe_init(struct ppp_ccp_t *ccp) memset(mppe_opt, 0, sizeof(*mppe_opt)); int mppe; - if (ccp->ppp->ctrl->mppe == MPPE_UNSET) + if (ccp->ppp->ses.ctrl->mppe == MPPE_UNSET) mppe = conf_mppe; else - mppe = ccp->ppp->ctrl->mppe; + mppe = ccp->ppp->ses.ctrl->mppe; if (mppe != MPPE_ALLOW) mppe_opt->policy = mppe; @@ -78,6 +78,9 @@ static struct ccp_option_t *mppe_init(struct ppp_ccp_t *ccp) if (mppe == MPPE_REQUIRE || mppe == MPPE_PREFER) ccp->ld.passive = 0; + + if (mppe == MPPE_REQUIRE) + ccp->ld.optional = 0; mppe_opt->opt.id = CI_MPPE; mppe_opt->opt.len = 6; @@ -121,7 +124,7 @@ static int decrease_mtu(struct ppp_t *ppp) { struct ifreq ifr; - strcpy(ifr.ifr_name, ppp->ifname); + strcpy(ifr.ifr_name, ppp->ses.ifname); if (ioctl(sock_fd, SIOCGIFMTU, &ifr)) { log_ppp_error("mppe: failed to get MTU: %s\n", strerror(errno)); @@ -173,10 +176,10 @@ static int mppe_recv_conf_req(struct ppp_ccp_t *ccp, struct ccp_option_t *opt, u struct ccp_opt32_t *opt32 = (struct ccp_opt32_t *)ptr; int mppe; - if (ccp->ppp->ctrl->mppe == MPPE_UNSET) + if (ccp->ppp->ses.ctrl->mppe == MPPE_UNSET) mppe = conf_mppe; else - mppe = ccp->ppp->ctrl->mppe; + mppe = ccp->ppp->ses.ctrl->mppe; if (!ptr) { if (mppe_opt->policy == 2) @@ -299,12 +302,12 @@ static void ev_mppe_keys(struct ev_mppe_keys_t *ev) return; } - if (ccp->ppp->ctrl->mppe == MPPE_UNSET) + if (ccp->ppp->ses.ctrl->mppe == MPPE_UNSET) mppe = conf_mppe; else - mppe = ev->ppp->ctrl->mppe; + mppe = ev->ppp->ses.ctrl->mppe; - if (ev->ppp->ctrl->mppe == MPPE_UNSET) { + if (ev->ppp->ses.ctrl->mppe == MPPE_UNSET) { mppe_opt->policy = ev->policy; if (ev->policy == 2) { diff --git a/accel-pppd/ppp/ipcp_opt_dns.c b/accel-pppd/ppp/ipcp_opt_dns.c index 19183eb7..1ca8723b 100644 --- a/accel-pppd/ppp/ipcp_opt_dns.c +++ b/accel-pppd/ppp/ipcp_opt_dns.c @@ -143,11 +143,17 @@ static void dns2_print(void (*print)(const char *fmt,...),struct ipcp_option_t * static void ev_dns(struct ev_dns_t *ev) { struct dns_option_t *dns_opt; + struct ppp_t *ppp; - dns_opt = container_of(ipcp_find_option(ev->ppp, &dns1_opt_hnd), typeof(*dns_opt), opt); + if (ev->ses->ctrl->type == CTRL_TYPE_IPOE) + return; + + ppp = container_of(ev->ses, typeof(*ppp), ses); + + dns_opt = container_of(ipcp_find_option(ppp, &dns1_opt_hnd), typeof(*dns_opt), opt); dns_opt->addr = ev->dns1; - dns_opt = container_of(ipcp_find_option(ev->ppp, &dns2_opt_hnd), typeof(*dns_opt), opt); + dns_opt = container_of(ipcp_find_option(ppp, &dns2_opt_hnd), typeof(*dns_opt), opt); dns_opt->addr = ev->dns2; } diff --git a/accel-pppd/ppp/ipcp_opt_ipaddr.c b/accel-pppd/ppp/ipcp_opt_ipaddr.c index e965f5c2..44dd3ddd 100644 --- a/accel-pppd/ppp/ipcp_opt_ipaddr.c +++ b/accel-pppd/ppp/ipcp_opt_ipaddr.c @@ -58,9 +58,9 @@ static void ipaddr_free(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *opt) { struct ipaddr_option_t *ipaddr_opt = container_of(opt, typeof(*ipaddr_opt), opt); - if (ipcp->ppp->ipv4) { - ipdb_put_ipv4(ipcp->ppp, ipcp->ppp->ipv4); - ipcp->ppp->ipv4 = NULL; + if (ipcp->ppp->ses.ipv4) { + ipdb_put_ipv4(&ipcp->ppp->ses, ipcp->ppp->ses.ipv4); + ipcp->ppp->ses.ipv4 = NULL; } _free(ipaddr_opt); @@ -68,40 +68,40 @@ static void ipaddr_free(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *opt) static int check_exists(struct ppp_t *self_ppp, in_addr_t addr) { - struct ppp_t *ppp; + struct ap_session *ses; int r = 0; - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (!ppp->terminating && ppp->ipv4 && ppp->ipv4->peer_addr == addr && ppp != self_ppp) { - log_ppp_warn("ppp: requested IPv4 address already assigned to %s\n", ppp->ifname); + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (!ses->terminating && ses->ipv4 && ses->ipv4->peer_addr == addr && ses != &self_ppp->ses) { + log_ppp_warn("ppp: requested IPv4 address already assigned to %s\n", ses->ifname); r = 1; break; } } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); return r; } static int alloc_ip(struct ppp_t *ppp) { - ppp->ipv4 = ipdb_get_ipv4(ppp); - if (!ppp->ipv4) { + ppp->ses.ipv4 = ipdb_get_ipv4(&ppp->ses); + if (!ppp->ses.ipv4) { log_ppp_warn("ppp: no free IPv4 address\n"); return IPCP_OPT_CLOSE; } - if (iprange_tunnel_check(ppp->ipv4->peer_addr)) { + if (iprange_tunnel_check(ppp->ses.ipv4->peer_addr)) { log_ppp_warn("ppp:ipcp: to avoid kernel soft lockup requested IP cannot be assigned (%i.%i.%i.%i)\n", - ppp->ipv4->peer_addr&0xff, - (ppp->ipv4->peer_addr >> 8)&0xff, - (ppp->ipv4->peer_addr >> 16)&0xff, - (ppp->ipv4->peer_addr >> 24)&0xff); + ppp->ses.ipv4->peer_addr&0xff, + (ppp->ses.ipv4->peer_addr >> 8)&0xff, + (ppp->ses.ipv4->peer_addr >> 16)&0xff, + (ppp->ses.ipv4->peer_addr >> 24)&0xff); return IPCP_OPT_FAIL; } - if (conf_check_exists && check_exists(ppp, ppp->ipv4->peer_addr)) + if (conf_check_exists && check_exists(ppp, ppp->ses.ipv4->peer_addr)) return IPCP_OPT_FAIL; return 0; @@ -113,7 +113,7 @@ static int ipaddr_send_conf_req(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *o struct ipcp_opt32_t *opt32 = (struct ipcp_opt32_t *)ptr; int r; - if (!ipcp->ppp->ipv4) { + if (!ipcp->ppp->ses.ipv4) { r = alloc_ip(ipcp->ppp); if (r) return r; @@ -121,7 +121,7 @@ static int ipaddr_send_conf_req(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *o opt32->hdr.id = CI_ADDR; opt32->hdr.len = 6; - opt32->val = ipcp->ppp->ipv4->addr; + opt32->val = ipcp->ppp->ses.ipv4->addr; return 6; } @@ -131,7 +131,7 @@ static int ipaddr_send_conf_nak(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *o struct ipcp_opt32_t *opt32 = (struct ipcp_opt32_t *)ptr; opt32->hdr.id = CI_ADDR; opt32->hdr.len = 6; - opt32->val = ipcp->ppp->ipv4->peer_addr; + opt32->val = ipcp->ppp->ses.ipv4->peer_addr; return 6; } @@ -141,7 +141,7 @@ static int ipaddr_recv_conf_req(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *o struct ipcp_opt32_t *opt32 = (struct ipcp_opt32_t *)ptr; int r; - if (!ipcp->ppp->ipv4) { + if (!ipcp->ppp->ses.ipv4) { r = alloc_ip(ipcp->ppp); if (r) return r; @@ -150,7 +150,7 @@ static int ipaddr_recv_conf_req(struct ppp_ipcp_t *ipcp, struct ipcp_option_t *o if (opt32->hdr.len != 6) return IPCP_OPT_REJ; - if (ipcp->ppp->ipv4->peer_addr == opt32->val) { + if (ipcp->ppp->ses.ipv4->peer_addr == opt32->val) { ipcp->delay_ack = ccp_ipcp_started(ipcp->ppp); return IPCP_OPT_ACK; } @@ -166,8 +166,8 @@ static void ipaddr_print(void (*print)(const char *fmt,...),struct ipcp_option_t if (ptr) in.s_addr = opt32->val; - else if (ipaddr_opt->ppp->ipv4) - in.s_addr = ipaddr_opt->ppp->ipv4->addr; + else if (ipaddr_opt->ppp->ses.ipv4) + in.s_addr = ipaddr_opt->ppp->ses.ipv4->addr; print("<addr %s>",inet_ntoa(in)); } diff --git a/accel-pppd/ppp/ipv6cp_opt_intfid.c b/accel-pppd/ppp/ipv6cp_opt_intfid.c index 9a2ddee0..21f0b931 100644 --- a/accel-pppd/ppp/ipv6cp_opt_intfid.c +++ b/accel-pppd/ppp/ipv6cp_opt_intfid.c @@ -72,9 +72,9 @@ static void ipaddr_free(struct ppp_ipv6cp_t *ipv6cp, struct ipv6cp_option_t *opt { struct ipaddr_option_t *ipaddr_opt=container_of(opt,typeof(*ipaddr_opt),opt); - if (ipv6cp->ppp->ipv6) { - ipdb_put_ipv6(ipv6cp->ppp, ipv6cp->ppp->ipv6); - ipv6cp->ppp->ipv6 = NULL; + if (ipv6cp->ppp->ses.ipv6) { + ipdb_put_ipv6(&ipv6cp->ppp->ses, ipv6cp->ppp->ses.ipv6); + ipv6cp->ppp->ses.ipv6 = NULL; } _free(ipaddr_opt); @@ -82,24 +82,24 @@ static void ipaddr_free(struct ppp_ipv6cp_t *ipv6cp, struct ipv6cp_option_t *opt static int check_exists(struct ppp_t *self_ppp) { - struct ppp_t *ppp; + struct ap_session *ses; struct ipv6db_addr_t *a1, *a2; int r = 0; - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(ppp, &ppp_list, entry) { - if (ppp->terminating) + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (ses->terminating) continue; - if (!ppp->ipv6) + if (!ses->ipv6) continue; - if (ppp == self_ppp) + if (ses == &self_ppp->ses) continue; - list_for_each_entry(a1, &ppp->ipv6->addr_list, entry) { - list_for_each_entry(a2, &self_ppp->ipv6->addr_list, entry) { + list_for_each_entry(a1, &ses->ipv6->addr_list, entry) { + list_for_each_entry(a2, &ses->ipv6->addr_list, entry) { if (a1->addr.s6_addr32[0] == a2->addr.s6_addr32[0] && a1->addr.s6_addr32[1] == a2->addr.s6_addr32[1]) { - log_ppp_warn("ppp: requested IPv6 address already assigned to %s\n", ppp->ifname); + log_ppp_warn("ppp: requested IPv6 address already assigned to %s\n", ses->ifname); r = 1; goto out; } @@ -107,7 +107,7 @@ static int check_exists(struct ppp_t *self_ppp) } } out: - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); return r; } @@ -148,9 +148,9 @@ static uint64_t generate_peer_intf_id(struct ppp_t *ppp) case INTF_ID_CSID: break; case INTF_ID_IPV4: - if (ppp->ipv4) { + if (ppp->ses.ipv4) { for (i = 0; i < 4; i++) { - sprintf(str, "%i", (ppp->ipv4->peer_addr >> (i*8)) & 0xff); + sprintf(str, "%i", (ppp->ses.ipv4->peer_addr >> (i*8)) & 0xff); sscanf(str, "%x", &n); u.addr16[i] = htons(n); } @@ -163,14 +163,14 @@ static uint64_t generate_peer_intf_id(struct ppp_t *ppp) static int alloc_ip(struct ppp_t *ppp) { - ppp->ipv6 = ipdb_get_ipv6(ppp); - if (!ppp->ipv6) { + ppp->ses.ipv6 = ipdb_get_ipv6(&ppp->ses); + if (!ppp->ses.ipv6) { log_ppp_warn("ppp: no free IPv6 address\n"); return IPV6CP_OPT_CLOSE; } - if (!ppp->ipv6->intf_id) - ppp->ipv6->intf_id = generate_intf_id(ppp); + if (!ppp->ses.ipv6->intf_id) + ppp->ses.ipv6->intf_id = generate_intf_id(ppp); if (conf_check_exists && check_exists(ppp)) return IPV6CP_OPT_FAIL; @@ -184,7 +184,7 @@ static int ipaddr_send_conf_req(struct ppp_ipv6cp_t *ipv6cp, struct ipv6cp_optio struct ipv6cp_opt64_t *opt64 = (struct ipv6cp_opt64_t *)ptr; int r; - if (!ipv6cp->ppp->ipv6) { + if (!ipv6cp->ppp->ses.ipv6) { r = alloc_ip(ipv6cp->ppp); if (r) return r; @@ -192,7 +192,7 @@ static int ipaddr_send_conf_req(struct ppp_ipv6cp_t *ipv6cp, struct ipv6cp_optio opt64->hdr.id = CI_INTFID; opt64->hdr.len = 10; - opt64->val = ipv6cp->ppp->ipv6->intf_id; + opt64->val = ipv6cp->ppp->ses.ipv6->intf_id; return 10; } @@ -202,7 +202,7 @@ static int ipaddr_send_conf_nak(struct ppp_ipv6cp_t *ipv6cp, struct ipv6cp_optio struct ipv6cp_opt64_t *opt64 = (struct ipv6cp_opt64_t *)ptr; opt64->hdr.id = CI_INTFID; opt64->hdr.len = 10; - opt64->val = ipv6cp->ppp->ipv6->peer_intf_id; + opt64->val = ipv6cp->ppp->ses.ipv6->peer_intf_id; return 10; } @@ -215,21 +215,21 @@ static int ipaddr_recv_conf_req(struct ppp_ipv6cp_t *ipv6cp, struct ipv6cp_optio if (opt64->hdr.len != 10) return IPV6CP_OPT_REJ; - if (!ipv6cp->ppp->ipv6) { + if (!ipv6cp->ppp->ses.ipv6) { r = alloc_ip(ipv6cp->ppp); if (r) return r; } if (conf_accept_peer_intf_id && opt64->val) - ipv6cp->ppp->ipv6->peer_intf_id = opt64->val; - else if (!ipv6cp->ppp->ipv6->peer_intf_id) { - ipv6cp->ppp->ipv6->peer_intf_id = generate_peer_intf_id(ipv6cp->ppp); - if (!ipv6cp->ppp->ipv6->peer_intf_id) + ipv6cp->ppp->ses.ipv6->peer_intf_id = opt64->val; + else if (!ipv6cp->ppp->ses.ipv6->peer_intf_id) { + ipv6cp->ppp->ses.ipv6->peer_intf_id = generate_peer_intf_id(ipv6cp->ppp); + if (!ipv6cp->ppp->ses.ipv6->peer_intf_id) return IPV6CP_OPT_TERMACK; } - if (opt64->val && ipv6cp->ppp->ipv6->peer_intf_id == opt64->val && opt64->val != ipv6cp->ppp->ipv6->intf_id) { + if (opt64->val && ipv6cp->ppp->ses.ipv6->peer_intf_id == opt64->val && opt64->val != ipv6cp->ppp->ses.ipv6->intf_id) { ipv6cp->delay_ack = ccp_ipcp_started(ipv6cp->ppp); ipaddr_opt->started = 1; return IPV6CP_OPT_ACK; @@ -247,7 +247,7 @@ static void ipaddr_print(void (*print)(const char *fmt,...), struct ipv6cp_optio if (ptr) *(uint64_t *)(a.s6_addr + 8) = opt64->val; else - *(uint64_t *)(a.s6_addr + 8) = ipaddr_opt->ppp->ipv6->intf_id; + *(uint64_t *)(a.s6_addr + 8) = ipaddr_opt->ppp->ses.ipv6->intf_id; print("<addr %x:%x:%x:%x>", ntohs(a.s6_addr16[4]), ntohs(a.s6_addr16[5]), ntohs(a.s6_addr16[6]), ntohs(a.s6_addr16[7])); } diff --git a/accel-pppd/ppp/lcp_opt_mru.c b/accel-pppd/ppp/lcp_opt_mru.c index 577e0191..10dc54e7 100644 --- a/accel-pppd/ppp/lcp_opt_mru.c +++ b/accel-pppd/ppp/lcp_opt_mru.c @@ -53,10 +53,10 @@ static struct lcp_option_t *mru_init(struct ppp_lcp_t *lcp) { struct mru_option_t *mru_opt=_malloc(sizeof(*mru_opt)); memset(mru_opt, 0, sizeof(*mru_opt)); - mru_opt->mru = (conf_mru && conf_mru <= lcp->ppp->ctrl->max_mtu) ? conf_mru : lcp->ppp->ctrl->max_mtu; + mru_opt->mru = (conf_mru && conf_mru <= lcp->ppp->ses.ctrl->max_mtu) ? conf_mru : lcp->ppp->ses.ctrl->max_mtu; if (mru_opt->mru > conf_max_mtu) mru_opt->mru = conf_max_mtu; - mru_opt->mtu = (conf_mtu && conf_mtu <= lcp->ppp->ctrl->max_mtu) ? conf_mtu : lcp->ppp->ctrl->max_mtu; + mru_opt->mtu = (conf_mtu && conf_mtu <= lcp->ppp->ses.ctrl->max_mtu) ? conf_mtu : lcp->ppp->ses.ctrl->max_mtu; if (mru_opt->mtu > conf_max_mtu) mru_opt->mtu = conf_max_mtu; mru_opt->opt.id = CI_MRU; @@ -107,7 +107,7 @@ static int mru_recv_conf_req(struct ppp_lcp_t *lcp, struct lcp_option_t *opt, ui if (opt16->hdr.len != 4) return LCP_OPT_REJ; - if (ntohs(opt16->val) < conf_min_mtu || ntohs(opt16->val) > lcp->ppp->ctrl->max_mtu || ntohs(opt16->val) > conf_max_mtu) + if (ntohs(opt16->val) < conf_min_mtu || ntohs(opt16->val) > lcp->ppp->ses.ctrl->max_mtu || ntohs(opt16->val) > conf_max_mtu) return LCP_OPT_NAK; mru_opt->mtu = ntohs(opt16->val); @@ -121,7 +121,7 @@ static int mru_recv_conf_ack(struct ppp_lcp_t *lcp, struct lcp_option_t *opt, ui .ifr_mtu = mru_opt->mtu, }; - strcpy(ifr.ifr_name, lcp->ppp->ifname); + strcpy(ifr.ifr_name, lcp->ppp->ses.ifname); if (ioctl(lcp->ppp->unit_fd, PPPIOCSMRU, &mru_opt->mru)) log_ppp_error("lcp:mru: failed to set MRU: %s\n", strerror(errno)); diff --git a/accel-pppd/ppp/ppp.c b/accel-pppd/ppp/ppp.c index 814240cd..9a006676 100644 --- a/accel-pppd/ppp/ppp.c +++ b/accel-pppd/ppp/ppp.c @@ -16,6 +16,7 @@ #include "triton.h" +#include "ap_session.h" #include "events.h" #include "ppp.h" #include "ppp_fsm.h" @@ -27,30 +28,13 @@ #include "memdebug.h" int __export conf_ppp_verbose; -int conf_sid_ucase; int conf_single_session = -1; int conf_unit_cache = 0; -pthread_rwlock_t __export ppp_lock = PTHREAD_RWLOCK_INITIALIZER; -__export LIST_HEAD(ppp_list); - -int __export sock_fd; -int __export sock6_fd; -int __export urandom_fd; - -int __export ppp_shutdown; - static mempool_t buf_pool; static LIST_HEAD(layers); -static unsigned long long seq; -#if __WORDSIZE == 32 -static spinlock_t seq_lock; -#endif - -__export struct ppp_stat_t ppp_stat; - struct layer_node_t { struct list_head entry; @@ -78,37 +62,19 @@ static void start_first_layer(struct ppp_t *); void __export ppp_init(struct ppp_t *ppp) { - memset(ppp,0,sizeof(*ppp)); + memset(ppp, 0, sizeof(*ppp)); INIT_LIST_HEAD(&ppp->layers); INIT_LIST_HEAD(&ppp->chan_handlers); INIT_LIST_HEAD(&ppp->unit_handlers); - INIT_LIST_HEAD(&ppp->pd_list); ppp->fd = -1; ppp->chan_fd = -1; ppp->unit_fd = -1; -} -static void generate_sessionid(struct ppp_t *ppp) -{ - unsigned long long sid; - -#if __WORDSIZE == 32 - spin_lock(&seq_lock); - sid = ++seq; - spin_unlock(&seq_lock); -#else - sid = __sync_add_and_fetch(&seq, 1); -#endif - - if (conf_sid_ucase) - sprintf(ppp->sessionid, "%016llX", sid); - else - sprintf(ppp->sessionid, "%016llx", sid); + ap_session_init(&ppp->ses); } int __export establish_ppp(struct ppp_t *ppp) { - struct ifreq ifr; struct pppunit_cache *uc = NULL; /* Open an instance of /dev/ppp and connect the channel to it */ @@ -142,7 +108,7 @@ int __export establish_ppp(struct ppp_t *ppp) if (uc) { ppp->unit_fd = uc->fd; - ppp->unit_idx = uc->unit_idx; + ppp->ses.unit_idx = uc->unit_idx; mempool_free(uc); } else { ppp->unit_fd = open("/dev/ppp", O_RDWR); @@ -153,8 +119,8 @@ int __export establish_ppp(struct ppp_t *ppp) fcntl(ppp->unit_fd, F_SETFD, fcntl(ppp->unit_fd, F_GETFD) | FD_CLOEXEC); - ppp->unit_idx = -1; - if (ioctl(ppp->unit_fd, PPPIOCNEWUNIT, &ppp->unit_idx) < 0) { + ppp->ses.unit_idx = -1; + if (ioctl(ppp->unit_fd, PPPIOCNEWUNIT, &ppp->ses.unit_idx) < 0) { log_ppp_error("ioctl(PPPIOCNEWUNIT): %s\n", strerror(errno)); goto exit_close_unit; } @@ -165,7 +131,7 @@ int __export establish_ppp(struct ppp_t *ppp) } } - if (ioctl(ppp->chan_fd, PPPIOCCONNECT, &ppp->unit_idx) < 0) { + if (ioctl(ppp->chan_fd, PPPIOCCONNECT, &ppp->ses.unit_idx) < 0) { log_ppp_error("ioctl(PPPIOCCONNECT): %s\n", strerror(errno)); goto exit_close_unit; } @@ -175,20 +141,9 @@ int __export establish_ppp(struct ppp_t *ppp) goto exit_close_unit; } - ppp->start_time = time(NULL); - generate_sessionid(ppp); - sprintf(ppp->ifname, "ppp%i", ppp->unit_idx); - - memset(&ifr, 0, sizeof(ifr)); - strcpy(ifr.ifr_name, ppp->ifname); - - if (ioctl(sock_fd, SIOCGIFINDEX, &ifr)) { - log_ppp_error("ppp: ioctl(SIOCGIFINDEX): %s\n", strerror(errno)); - goto exit_close_unit; - } - ppp->ifindex = ifr.ifr_ifindex; - - log_ppp_info1("connect: %s <--> %s(%s)\n", ppp->ifname, ppp->ctrl->name, ppp->chan_name); + sprintf(ppp->ses.ifname, "ppp%i", ppp->ses.unit_idx); + + log_ppp_info1("connect: %s <--> %s(%s)\n", ppp->ses.ifname, ppp->ses.ctrl->name, ppp->ses.chan_name); init_layers(ppp); @@ -203,22 +158,15 @@ int __export establish_ppp(struct ppp_t *ppp) ppp->chan_hnd.read = ppp_chan_read; ppp->unit_hnd.fd = ppp->unit_fd; ppp->unit_hnd.read = ppp_unit_read; - triton_md_register_handler(ppp->ctrl->ctx, &ppp->chan_hnd); - triton_md_register_handler(ppp->ctrl->ctx, &ppp->unit_hnd); + triton_md_register_handler(ppp->ses.ctrl->ctx, &ppp->chan_hnd); + triton_md_register_handler(ppp->ses.ctrl->ctx, &ppp->unit_hnd); triton_md_enable_handler(&ppp->chan_hnd, MD_MODE_READ); triton_md_enable_handler(&ppp->unit_hnd, MD_MODE_READ); - ppp->state = PPP_STATE_STARTING; - __sync_add_and_fetch(&ppp_stat.starting, 1); - - pthread_rwlock_wrlock(&ppp_lock); - list_add_tail(&ppp->entry, &ppp_list); - pthread_rwlock_unlock(&ppp_lock); - log_ppp_debug("ppp established\n"); - triton_event_fire(EV_PPP_STARTING, ppp); + ap_session_starting(&ppp->ses); start_first_layer(ppp); @@ -239,23 +187,7 @@ static void destablish_ppp(struct ppp_t *ppp) { struct pppunit_cache *uc; - triton_event_fire(EV_PPP_PRE_FINISHED, ppp); - - pthread_rwlock_wrlock(&ppp_lock); - list_del(&ppp->entry); - pthread_rwlock_unlock(&ppp_lock); - - switch (ppp->state) { - case PPP_STATE_ACTIVE: - __sync_sub_and_fetch(&ppp_stat.active, 1); - break; - case PPP_STATE_STARTING: - __sync_sub_and_fetch(&ppp_stat.starting, 1); - break; - case PPP_STATE_FINISHING: - __sync_sub_and_fetch(&ppp_stat.finishing, 1); - break; - } + triton_event_fire(EV_SES_PRE_FINISHED, ppp); triton_md_unregister_handler(&ppp->chan_hnd); triton_md_unregister_handler(&ppp->unit_hnd); @@ -263,7 +195,7 @@ static void destablish_ppp(struct ppp_t *ppp) if (uc_size < conf_unit_cache) { uc = mempool_alloc(uc_pool); uc->fd = ppp->unit_fd; - uc->unit_idx = ppp->unit_idx; + uc->unit_idx = ppp->ses.unit_idx; pthread_mutex_lock(&uc_lock); list_add_tail(&uc->entry, &uc_list); @@ -281,32 +213,11 @@ static void destablish_ppp(struct ppp_t *ppp) _free_layers(ppp); - ppp->terminated = 1; - log_ppp_debug("ppp destablished\n"); - triton_event_fire(EV_PPP_FINISHED, ppp); - ppp->ctrl->finished(ppp); - mempool_free(ppp->buf); - if (ppp->username) { - _free(ppp->username); - ppp->username = NULL; - } - - if (ppp->ipv4_pool_name) { - _free(ppp->ipv4_pool_name); - ppp->ipv4_pool_name = NULL; - } - - if (ppp->ipv6_pool_name) { - _free(ppp->ipv6_pool_name); - ppp->ipv6_pool_name = NULL; - } - - if (ppp_shutdown && !ppp_stat.starting && !ppp_stat.active && !ppp_stat.finishing) - kill(getpid(), SIGTERM); + ap_session_finished(&ppp->ses); } /*void print_buf(uint8_t *buf, int size) @@ -362,7 +273,7 @@ cont: //printf("ppp_chan_read: "); //print_buf(ppp->buf,ppp->buf_size); if (ppp->buf_size == 0) { - ppp_terminate(ppp, TERM_NAS_ERROR, 1); + ap_session_terminate(&ppp->ses, TERM_NAS_ERROR, 1); return 1; } @@ -376,7 +287,7 @@ cont: if (ppp_h->proto == proto) { ppp_h->recv(ppp_h); if (ppp->chan_fd == -1) { - //ppp->ctrl->finished(ppp); + //ppp->ses.ctrl->finished(ppp); return 1; } goto cont; @@ -410,7 +321,7 @@ cont: //print_buf(ppp->buf,ppp->buf_size); /*if (ppp->buf_size == 0) { - ppp_terminate(ppp, TERM_NAS_ERROR, 1); + ap_session_terminate(ppp, TERM_NAS_ERROR, 1); return 1; }*/ @@ -424,7 +335,7 @@ cont: if (ppp_h->proto == proto) { ppp_h->recv(ppp_h); if (ppp->unit_fd == -1) { - //ppp->ctrl->finished(ppp); + //ppp->ses.ctrl->finished(ppp); return 1; } goto cont; @@ -472,18 +383,15 @@ static void __ppp_layer_started(struct ppp_t *ppp, struct ppp_layer_data_t *d) if (n->entry.next == &ppp->layers) { - if (ppp->state == PPP_STATE_STARTING) { - ppp->state = PPP_STATE_ACTIVE; - __sync_sub_and_fetch(&ppp_stat.starting, 1); - __sync_add_and_fetch(&ppp_stat.active, 1); - ppp_ifup(ppp); + if (ppp->ses.state == AP_STATE_STARTING) { + ap_session_activate(&ppp->ses); } } else { n = list_entry(n->entry.next, typeof(*n), entry); list_for_each_entry(d, &n->items, entry) { d->starting = 1; if (d->layer->start(d)) { - ppp_terminate(ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ppp->ses, TERM_NAS_ERROR, 0); return; } } @@ -527,41 +435,13 @@ void __export ppp_layer_finished(struct ppp_t *ppp, struct ppp_layer_data_t *d) destablish_ppp(ppp); } -void __export ppp_terminate(struct ppp_t *ppp, int cause, int hard) +void __export ppp_terminate(struct ap_session *ses, int hard) { + struct ppp_t *ppp = container_of(ses, typeof(*ppp), ses); struct layer_node_t *n; struct ppp_layer_data_t *d; int s = 0; - if (ppp->terminated) - return; - - if (!ppp->stop_time) - time(&ppp->stop_time); - - if (!ppp->terminate_cause) - ppp->terminate_cause = cause; - - if (ppp->terminating) { - if (hard) - destablish_ppp(ppp); - return; - } - - ppp->terminating = 1; - if (ppp->state == PPP_STATE_ACTIVE) - __sync_sub_and_fetch(&ppp_stat.active, 1); - else - __sync_sub_and_fetch(&ppp_stat.starting, 1); - __sync_add_and_fetch(&ppp_stat.finishing, 1); - ppp->state = PPP_STATE_FINISHING; - - log_ppp_debug("ppp_terminate\n"); - - ppp_ifdown(ppp); - - triton_event_fire(EV_PPP_FINISHING, ppp); - if (hard) { destablish_ppp(ppp); return; @@ -577,6 +457,7 @@ void __export ppp_terminate(struct ppp_t *ppp, int cause, int hard) } if (s) return; + destablish_ppp(ppp); } @@ -689,7 +570,7 @@ static void start_first_layer(struct ppp_t *ppp) list_for_each_entry(d, &n->items, entry) { d->starting = 1; if (d->layer->start(d)) { - ppp_terminate(ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ppp->ses, TERM_NAS_ERROR, 0); return; } } @@ -710,44 +591,14 @@ struct ppp_layer_data_t *ppp_find_layer_data(struct ppp_t *ppp, struct ppp_layer return NULL; } -void ppp_shutdown_soft(void) -{ - ppp_shutdown = 1; - - if (!ppp_stat.starting && !ppp_stat.active && !ppp_stat.finishing) - kill(getpid(), SIGTERM); -} - -static void save_seq(void) -{ - FILE *f; - char *opt = conf_get_opt("ppp", "seq-file"); - if (!opt) - opt = "/var/run/accel-ppp/seq"; - - f = fopen(opt, "w"); - if (f) { - fprintf(f, "%llu", seq); - fclose(f); - } -} - static void load_config(void) { - char *opt; + const char *opt; opt = conf_get_opt("ppp", "verbose"); if (opt && atoi(opt) > 0) conf_ppp_verbose = 1; - opt = conf_get_opt("ppp", "sid-case"); - if (opt) { - if (!strcmp(opt, "upper")) - conf_sid_ucase = 1; - else if (strcmp(opt, "lower")) - log_emerg("ppp: sid-case: invalid format\n"); - } - opt = conf_get_opt("ppp", "single-session"); if (opt) { if (!strcmp(opt, "deny")) @@ -766,49 +617,11 @@ static void load_config(void) static void init(void) { - char *opt; - FILE *f; - buf_pool = mempool_create(PPP_MRU); uc_pool = mempool_create(sizeof(struct pppunit_cache)); - sock_fd = socket(AF_INET, SOCK_DGRAM, 0); - if (sock_fd < 0) { - perror("socket"); - _exit(EXIT_FAILURE); - } - - fcntl(sock_fd, F_SETFD, fcntl(sock_fd, F_GETFD) | FD_CLOEXEC); - - sock6_fd = socket(AF_INET6, SOCK_DGRAM, 0); - if (sock6_fd < 0) - log_warn("ppp: kernel doesn't support ipv6\n"); - else - fcntl(sock6_fd, F_SETFD, fcntl(sock6_fd, F_GETFD) | FD_CLOEXEC); - - urandom_fd = open("/dev/urandom", O_RDONLY); - if (urandom_fd < 0) { - log_emerg("failed to open /dev/urandom: %s\n", strerror(errno)); - return; - } - - fcntl(urandom_fd, F_SETFD, fcntl(urandom_fd, F_GETFD) | FD_CLOEXEC); - - opt = conf_get_opt("ppp", "seq-file"); - if (!opt) - opt = "/var/run/accel-ppp/seq"; - - f = fopen(opt, "r"); - if (f) { - fscanf(f, "%llu", &seq); - fclose(f); - } else - seq = (unsigned long long)random() * (unsigned long long)random(); - load_config(); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); - - atexit(save_seq); } DEFINE_INIT(2, init); diff --git a/accel-pppd/ppp/ppp.h b/accel-pppd/ppp/ppp.h index e37c75d3..f5e0922a 100644 --- a/accel-pppd/ppp/ppp.h +++ b/accel-pppd/ppp/ppp.h @@ -8,6 +8,7 @@ #include "triton.h" #include "list.h" +#include "ap_session.h" /* * Packet header = Code, id, length. @@ -15,7 +16,6 @@ #define PPP_HEADERLEN 4 #define PPP_MTU 1500 - /* * Protocol field values. */ @@ -39,60 +39,15 @@ #define PPP_CBCP 0xc029 /* Callback Control Protocol */ #define PPP_EAP 0xc227 /* Extensible Authentication Protocol */ -#define PPP_SESSIONID_LEN 16 -#define PPP_IFNAME_LEN 10 - -#define PPP_STATE_STARTING 1 -#define PPP_STATE_ACTIVE 2 -#define PPP_STATE_FINISHING 3 - -#define TERM_USER_REQUEST 1 -#define TERM_SESSION_TIMEOUT 2 -#define TERM_ADMIN_RESET 3 -#define TERM_USER_ERROR 4 -#define TERM_NAS_ERROR 5 -#define TERM_NAS_REQUEST 6 -#define TERM_NAS_REBOOT 7 -#define TERM_AUTH_ERROR 8 -#define TERM_LOST_CARRIER 9 - -#define CTRL_TYPE_PPTP 1 -#define CTRL_TYPE_L2TP 2 -#define CTRL_TYPE_PPPOE 3 - -#define MPPE_UNSET -2 -#define MPPE_ALLOW -1 -#define MPPE_DENY 0 -#define MPPE_PREFER 1 -#define MPPE_REQUIRE 2 - struct ppp_t; struct ipv4db_item_t; struct ipv6db_item_t; -struct ppp_ctrl_t -{ - struct triton_context_t *ctx; - int type; - const char *name; - int max_mtu; - int mppe; - char *calling_station_id; - char *called_station_id; - void (*started)(struct ppp_t*); - void (*finished)(struct ppp_t*); -}; - -struct ppp_pd_t -{ - struct list_head entry; - void *key; -}; - struct ppp_t { - struct list_head entry; + struct ap_session ses; + struct triton_md_handler_t chan_hnd; struct triton_md_handler_t unit_hnd; int fd; @@ -100,27 +55,6 @@ struct ppp_t int unit_fd; int chan_idx; - int unit_idx; - - int state; - char *chan_name; - char ifname[PPP_IFNAME_LEN]; - int ifindex; - char sessionid[PPP_SESSIONID_LEN+1]; - time_t start_time; - time_t stop_time; - char *username; - struct ipv4db_item_t *ipv4; - struct ipv6db_item_t *ipv6; - char *ipv4_pool_name; - char *ipv6_pool_name; - const char *comp; - - struct ppp_ctrl_t *ctrl; - - int terminating:1; - int terminated:1; - int terminate_cause; void *buf; int buf_size; @@ -130,9 +64,8 @@ struct ppp_t struct list_head layers; + const char *comp; struct ppp_lcp_t *lcp; - - struct list_head pd_list; }; struct ppp_layer_t; @@ -166,13 +99,6 @@ struct ppp_handler_t void (*recv_proto_rej)(struct ppp_handler_t *h); }; -struct ppp_stat_t -{ - unsigned int active; - unsigned int starting; - unsigned int finishing; -}; - struct ppp_t *alloc_ppp(void); void ppp_init(struct ppp_t *ppp); int establish_ppp(struct ppp_t *ppp); @@ -189,7 +115,7 @@ void ppp_layer_started(struct ppp_t *ppp,struct ppp_layer_data_t*); void ppp_layer_finished(struct ppp_t *ppp,struct ppp_layer_data_t*); void ppp_layer_passive(struct ppp_t *ppp,struct ppp_layer_data_t*); -void ppp_terminate(struct ppp_t *ppp, int hard, int cause); +void ppp_terminate(struct ap_session *ses, int hard); void ppp_register_chan_handler(struct ppp_t *, struct ppp_handler_t *); void ppp_register_unit_handler(struct ppp_t * ,struct ppp_handler_t *); @@ -199,20 +125,9 @@ int ppp_register_layer(const char *name, struct ppp_layer_t *); void ppp_unregister_layer(struct ppp_layer_t *); struct ppp_layer_data_t *ppp_find_layer_data(struct ppp_t *, struct ppp_layer_t *); -extern int ppp_shutdown; -void ppp_shutdown_soft(void); - int ppp_ipv6_nd_start(struct ppp_t *ppp, uint64_t intf_id); extern int conf_ppp_verbose; extern int conf_single_session; -extern pthread_rwlock_t ppp_lock; -extern struct list_head ppp_list; - -extern struct ppp_stat_t ppp_stat; - -extern int sock_fd; // internet socket for ioctls -extern int sock6_fd; // internet socket for ioctls -extern int urandom_fd; #endif diff --git a/accel-pppd/ppp/ppp_auth.c b/accel-pppd/ppp/ppp_auth.c index 7603d8cf..805bf741 100644 --- a/accel-pppd/ppp/ppp_auth.c +++ b/accel-pppd/ppp/ppp_auth.c @@ -312,60 +312,60 @@ static void auth_layer_free(struct ppp_layer_data_t *ld) log_ppp_debug("auth_layer_free\n"); - triton_cancel_call(ad->ppp->ctrl->ctx, (triton_event_func)__ppp_auth_started); + triton_cancel_call(ad->ppp->ses.ctrl->ctx, (triton_event_func)__ppp_auth_started); _free(ad); } -static void ppp_terminate_sec(struct ppp_t *ppp) +static void __terminate_sec(struct ap_session *ses) { - ppp_terminate(ppp, TERM_NAS_REQUEST, 0); + ap_session_terminate(ses, TERM_NAS_REQUEST, 0); } static void __ppp_auth_started(struct ppp_t *ppp) { struct auth_layer_data_t *ad = container_of(ppp_find_layer_data(ppp, &auth_layer), typeof(*ad), ld); - if (ppp->terminating) + if (ppp->ses.terminating) return; log_ppp_debug("auth_layer_started\n"); ppp_layer_started(ppp, &ad->ld); - log_ppp_info1("%s: authentication successed\n", ppp->username); - triton_event_fire(EV_PPP_AUTHORIZED, ppp); + log_ppp_info1("%s: authentication successed\n", ppp->ses.username); + triton_event_fire(EV_SES_AUTHORIZED, ppp); } int __export ppp_auth_successed(struct ppp_t *ppp, char *username) { - struct ppp_t *p; + struct ap_session *ses; struct auth_layer_data_t *ad = container_of(ppp_find_layer_data(ppp, &auth_layer), typeof(*ad), ld); if (conf_single_session >= 0) { - pthread_rwlock_rdlock(&ppp_lock); - list_for_each_entry(p, &ppp_list, entry) { - if (p->username && !strcmp(p->username, username)) { + pthread_rwlock_rdlock(&ses_lock); + list_for_each_entry(ses, &ses_list, entry) { + if (ses->username && !strcmp(ses->username, username)) { if (conf_single_session == 0) { - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); log_ppp_info1("%s: second session denied\n", username); return -1; } else { if (conf_single_session == 1) { - ppp_ifdown(p); - triton_context_call(p->ctrl->ctx, (triton_event_func)ppp_terminate_sec, p); + ap_session_ifdown(ses); + triton_context_call(ses->ctrl->ctx, (triton_event_func)__terminate_sec, ses); } } } } - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_unlock(&ses_lock); } - pthread_rwlock_wrlock(&ppp_lock); - ppp->username = username; - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_wrlock(&ses_lock); + ppp->ses.username = username; + pthread_rwlock_unlock(&ses_lock); - triton_context_call(ppp->ctrl->ctx, (triton_event_func)__ppp_auth_started, ppp); + triton_context_call(ppp->ses.ctrl->ctx, (triton_event_func)__ppp_auth_started, ppp); return 0; } @@ -373,16 +373,16 @@ int __export ppp_auth_successed(struct ppp_t *ppp, char *username) void __export ppp_auth_failed(struct ppp_t *ppp, char *username) { if (username) { - pthread_rwlock_wrlock(&ppp_lock); - if (!ppp->username) - ppp->username = _strdup(username); - pthread_rwlock_unlock(&ppp_lock); + pthread_rwlock_wrlock(&ses_lock); + if (!ppp->ses.username) + ppp->ses.username = _strdup(username); + pthread_rwlock_unlock(&ses_lock); log_ppp_info1("%s: authentication failed\n", username); log_info1("%s: authentication failed\n", username); - triton_event_fire(EV_PPP_AUTH_FAILED, ppp); + triton_event_fire(EV_SES_AUTH_FAILED, ppp); } else log_ppp_info1("authentication failed\n"); - ppp_terminate(ppp, TERM_AUTH_ERROR, 0); + ap_session_terminate(&ppp->ses, TERM_AUTH_ERROR, 0); } int __export ppp_auth_register_handler(struct ppp_auth_handler_t *h) diff --git a/accel-pppd/ppp/ppp_auth.h b/accel-pppd/ppp/ppp_auth.h index 6f86d260..b6a7b145 100644 --- a/accel-pppd/ppp/ppp_auth.h +++ b/accel-pppd/ppp/ppp_auth.h @@ -31,7 +31,7 @@ int ppp_auth_register_handler(struct ppp_auth_handler_t*); int ppp_auth_successed(struct ppp_t *ppp, char *username); void ppp_auth_failed(struct ppp_t *ppp, char *username); -int ppp_auth_restart(struct ppp_t *ppp); +int ppp_auth_restart(struct ppp_t *ppp); #endif diff --git a/accel-pppd/ppp/ppp_ccp.c b/accel-pppd/ppp/ppp_ccp.c index 7c4933a4..058e7b38 100644 --- a/accel-pppd/ppp/ppp_ccp.c +++ b/accel-pppd/ppp/ppp_ccp.c @@ -180,8 +180,9 @@ void ccp_layer_free(struct ppp_layer_data_t *ld) struct ppp_ccp_t *ccp = container_of(ld, typeof(*ccp), ld); log_ppp_debug("ccp_layer_free\n"); - - ccp_set_flags(ccp->ppp->unit_fd, 0, 0); + + if (ccp->ppp->unit_fd != -1) + ccp_set_flags(ccp->ppp->unit_fd, 0, 0); ppp_unregister_handler(ccp->ppp, &ccp->hnd); ccp_options_free(ccp); @@ -198,7 +199,7 @@ static void ccp_layer_up(struct ppp_fsm_t *fsm) log_ppp_debug("ccp_layer_started\n"); ccp->started = 1; if (ccp_set_flags(ccp->ppp->unit_fd, 1, 1)) { - ppp_terminate(ccp->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ccp->ppp->ses, TERM_NAS_ERROR, 0); return; } ppp_layer_started(ccp->ppp, &ccp->ld); @@ -213,8 +214,8 @@ static void ccp_layer_finished(struct ppp_fsm_t *fsm) if (!ccp->started) ppp_layer_passive(ccp->ppp, &ccp->ld); - else if (!ccp->ppp->terminating) - ppp_terminate(ccp->ppp, TERM_USER_ERROR, 0); + else if (!ccp->ppp->ses.terminating) + ap_session_terminate(&ccp->ppp->ses, TERM_USER_ERROR, 0); fsm->fsm_state = FSM_Closed; } @@ -633,10 +634,10 @@ static void ccp_recv(struct ppp_handler_t*h) struct ppp_ccp_t *ccp = container_of(h, typeof(*ccp), hnd); int r; - if (!ccp->starting || ccp->fsm.fsm_state == FSM_Closed || ccp->ppp->terminating || ccp->ppp->state == PPP_STATE_ACTIVE) { + if (!ccp->starting || ccp->fsm.fsm_state == FSM_Closed || ccp->ppp->ses.terminating || ccp->ppp->ses.state == AP_STATE_ACTIVE) { if (conf_ppp_verbose) log_ppp_warn("CCP: discarding packet\n"); - if (ccp->fsm.fsm_state == FSM_Closed || !conf_ccp || ccp->ppp->state == PPP_STATE_ACTIVE) + if (ccp->fsm.fsm_state == FSM_Closed || !conf_ccp || ccp->ppp->ses.state == AP_STATE_ACTIVE) lcp_send_proto_rej(ccp->ppp, PPP_CCP); return; } @@ -686,11 +687,11 @@ static void ccp_recv(struct ppp_handler_t*h) ccp_free_conf_req(ccp); if (r == CCP_OPT_FAIL) - ppp_terminate(ccp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ccp->ppp->ses, TERM_USER_ERROR, 0); break; case CONFACK: if (ccp_recv_conf_ack(ccp, (uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(ccp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ccp->ppp->ses, TERM_USER_ERROR, 0); else ppp_fsm_recv_conf_ack(&ccp->fsm); break; @@ -700,7 +701,7 @@ static void ccp_recv(struct ppp_handler_t*h) break; case CONFREJ: if (ccp_recv_conf_rej(ccp, (uint8_t*)(hdr + 1),ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(ccp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ccp->ppp->ses, TERM_USER_ERROR, 0); else ppp_fsm_recv_conf_rej(&ccp->fsm); break; @@ -731,8 +732,8 @@ static void ccp_recv_proto_rej(struct ppp_handler_t *h) { struct ppp_ccp_t *ccp = container_of(h, typeof(*ccp), hnd); - if (!ccp->ld.passive) { - ppp_terminate(ccp->ppp, TERM_USER_ERROR, 0); + if (!ccp->ld.optional) { + ap_session_terminate(&ccp->ppp->ses, TERM_USER_ERROR, 0); return; } diff --git a/accel-pppd/ppp/ppp_fsm.c b/accel-pppd/ppp/ppp_fsm.c index 27021d18..babaf667 100644 --- a/accel-pppd/ppp/ppp_fsm.c +++ b/accel-pppd/ppp/ppp_fsm.c @@ -505,14 +505,14 @@ static void init_req_counter(struct ppp_fsm_t *layer,int timeout) layer->restart_counter = timeout; if (!layer->restart_timer.tpd) - triton_timer_add(layer->ppp->ctrl->ctx, &layer->restart_timer, 0); + triton_timer_add(layer->ppp->ses.ctrl->ctx, &layer->restart_timer, 0); } static void zero_req_counter(struct ppp_fsm_t *layer) { layer->restart_counter=0; if (!layer->restart_timer.tpd) - triton_timer_add(layer->ppp->ctrl->ctx, &layer->restart_timer, 0); + triton_timer_add(layer->ppp->ses.ctrl->ctx, &layer->restart_timer, 0); } static void restart_timer_func(struct triton_timer_t *t) diff --git a/accel-pppd/ppp/ppp_ifcfg.c b/accel-pppd/ppp/ppp_ifcfg.c index f916251d..bcad3f4b 100644 --- a/accel-pppd/ppp/ppp_ifcfg.c +++ b/accel-pppd/ppp/ppp_ifcfg.c @@ -59,52 +59,52 @@ void ppp_ifup(struct ppp_t *ppp) struct npioctl np; struct sockaddr_in addr; - triton_event_fire(EV_PPP_ACCT_START, ppp); + triton_event_fire(EV_SES_ACCT_START, ppp); if (ppp->stop_time) return; - triton_event_fire(EV_PPP_PRE_UP, ppp); + triton_event_fire(EV_SES_PRE_UP, ppp); if (ppp->stop_time) return; memset(&ifr, 0, sizeof(ifr)); strcpy(ifr.ifr_name, ppp->ifname); - if (ppp->ipv4) { + if (ppp->ses.ipv4) { memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; - addr.sin_addr.s_addr = ppp->ipv4->addr; + addr.sin_addr.s_addr = ppp->ses.ipv4->addr; memcpy(&ifr.ifr_addr,&addr,sizeof(addr)); if (ioctl(sock_fd, SIOCSIFADDR, &ifr)) log_ppp_error("ppp: failed to set IPv4 address: %s\n", strerror(errno)); - addr.sin_addr.s_addr = ppp->ipv4->peer_addr; + addr.sin_addr.s_addr = ppp->ses.ipv4->peer_addr; memcpy(&ifr.ifr_dstaddr,&addr,sizeof(addr)); if (ioctl(sock_fd, SIOCSIFDSTADDR, &ifr)) log_ppp_error("ppp: failed to set peer IPv4 address: %s\n", strerror(errno)); } - if (ppp->ipv6) { + if (ppp->ses.ipv6) { devconf(ppp, "accept_ra", "0"); devconf(ppp, "autoconf", "0"); devconf(ppp, "forwarding", "1"); memset(&ifr6, 0, sizeof(ifr6)); ifr6.ifr6_addr.s6_addr32[0] = htons(0xfe80); - *(uint64_t *)(ifr6.ifr6_addr.s6_addr + 8) = ppp->ipv6->intf_id; + *(uint64_t *)(ifr6.ifr6_addr.s6_addr + 8) = ppp->ses.ipv6->intf_id; ifr6.ifr6_prefixlen = 64; ifr6.ifr6_ifindex = ppp->ifindex; if (ioctl(sock6_fd, SIOCSIFADDR, &ifr6)) log_ppp_error("ppp: faild to set LL IPv6 address: %s\n", strerror(errno)); - list_for_each_entry(a, &ppp->ipv6->addr_list, entry) { + list_for_each_entry(a, &ppp->ses.ipv6->addr_list, entry) { if (a->prefix_len == 128) continue; - build_addr(a, ppp->ipv6->intf_id, &ifr6.ifr6_addr); + build_addr(a, ppp->ses.ipv6->intf_id, &ifr6.ifr6_addr); ifr6.ifr6_prefixlen = a->prefix_len; if (ioctl(sock6_fd, SIOCSIFADDR, &ifr6)) @@ -120,7 +120,7 @@ void ppp_ifup(struct ppp_t *ppp) if (ioctl(sock_fd, SIOCSIFFLAGS, &ifr)) log_ppp_error("ppp: failed to set interface flags: %s\n", strerror(errno)); - if (ppp->ipv4) { + if (ppp->ses.ipv4) { np.protocol = PPP_IP; np.mode = NPMODE_PASS; @@ -128,7 +128,7 @@ void ppp_ifup(struct ppp_t *ppp) log_ppp_error("ppp: failed to set NP (IPv4) mode: %s\n", strerror(errno)); } - if (ppp->ipv6) { + if (ppp->ses.ipv6) { np.protocol = PPP_IPV6; np.mode = NPMODE_PASS; @@ -136,9 +136,9 @@ void ppp_ifup(struct ppp_t *ppp) log_ppp_error("ppp: failed to set NP (IPv6) mode: %s\n", strerror(errno)); } - ppp->ctrl->started(ppp); + ppp->ses.ctrl->started(ppp); - triton_event_fire(EV_PPP_STARTED, ppp); + triton_event_fire(EV_SES_STARTED, ppp); } void __export ppp_ifdown(struct ppp_t *ppp) @@ -152,27 +152,27 @@ void __export ppp_ifdown(struct ppp_t *ppp) strcpy(ifr.ifr_name, ppp->ifname); ioctl(sock_fd, SIOCSIFFLAGS, &ifr); - if (ppp->ipv4) { + if (ppp->ses.ipv4) { memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; memcpy(&ifr.ifr_addr,&addr,sizeof(addr)); ioctl(sock_fd, SIOCSIFADDR, &ifr); } - if (ppp->ipv6) { + if (ppp->ses.ipv6) { memset(&ifr6, 0, sizeof(ifr6)); ifr6.ifr6_addr.s6_addr32[0] = htons(0xfe80); - *(uint64_t *)(ifr6.ifr6_addr.s6_addr + 8) = ppp->ipv6->intf_id; + *(uint64_t *)(ifr6.ifr6_addr.s6_addr + 8) = ppp->ses.ipv6->intf_id; ifr6.ifr6_prefixlen = 64; ifr6.ifr6_ifindex = ppp->ifindex; ioctl(sock6_fd, SIOCDIFADDR, &ifr6); - list_for_each_entry(a, &ppp->ipv6->addr_list, entry) { + list_for_each_entry(a, &ppp->ses.ipv6->addr_list, entry) { if (a->prefix_len == 128) continue; - build_addr(a, ppp->ipv6->intf_id, &ifr6.ifr6_addr); + build_addr(a, ppp->ses.ipv6->intf_id, &ifr6.ifr6_addr); ifr6.ifr6_prefixlen = a->prefix_len; ioctl(sock6_fd, SIOCDIFADDR, &ifr6); diff --git a/accel-pppd/ppp/ppp_ipcp.c b/accel-pppd/ppp/ppp_ipcp.c index 9ec92d49..c05e20b1 100644 --- a/accel-pppd/ppp/ppp_ipcp.c +++ b/accel-pppd/ppp/ppp_ipcp.c @@ -121,8 +121,8 @@ static void ipcp_start_timeout(struct triton_timer_t *t) triton_timer_del(t); - if (ipcp->ppp->state == PPP_STATE_STARTING) - ppp_terminate(ipcp->ppp, TERM_USER_ERROR, 0); + if (ipcp->ppp->ses.state == AP_STATE_STARTING) + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_ERROR, 0); } int ipcp_layer_start(struct ppp_layer_data_t *ld) @@ -137,7 +137,7 @@ int ipcp_layer_start(struct ppp_layer_data_t *ld) if (ipcp->ld.passive) { ipcp->timeout.expire = ipcp_start_timeout; ipcp->timeout.expire_tv.tv_sec = START_TIMEOUT; - triton_timer_add(ipcp->ppp->ctrl->ctx, &ipcp->timeout, 0); + triton_timer_add(ipcp->ppp->ses.ctrl->ctx, &ipcp->timeout, 0); } else { ppp_fsm_lower_up(&ipcp->fsm); if (ppp_fsm_open(&ipcp->fsm)) @@ -202,17 +202,17 @@ static void ipcp_layer_finished(struct ppp_fsm_t *fsm) if (!ipcp->started) { if (conf_ipv4 == IPV4_REQUIRE) - ppp_terminate(ipcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_ERROR, 0); else ppp_layer_passive(ipcp->ppp, &ipcp->ld); - } else if (!ipcp->ppp->terminating) - ppp_terminate(ipcp->ppp, TERM_USER_ERROR, 0); + } else if (!ipcp->ppp->ses.terminating) + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_ERROR, 0); fsm->fsm_state = FSM_Closed; - if (ipcp->ppp->ipv4) { - ipdb_put_ipv4(ipcp->ppp, ipcp->ppp->ipv4); - ipcp->ppp->ipv4 = NULL; + if (ipcp->ppp->ses.ipv4) { + ipdb_put_ipv4(&ipcp->ppp->ses, ipcp->ppp->ses.ipv4); + ipcp->ppp->ses.ipv4 = NULL; } } @@ -446,12 +446,12 @@ static int ipcp_recv_conf_req(struct ppp_ipcp_t *ipcp, uint8_t *data, int size) } if (r == IPCP_OPT_CLOSE) { if (conf_ipv4 == IPV4_REQUIRE) - ppp_terminate(ipcp->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ipcp->ppp->ses, TERM_NAS_ERROR, 0); else lcp_send_proto_rej(ipcp->ppp, PPP_IPCP); return 0; } - if (ipcp->ppp->stop_time) + if (ipcp->ppp->ses.stop_time) return -1; lopt->state = r; ropt->state = r; @@ -660,10 +660,10 @@ static void ipcp_recv(struct ppp_handler_t*h) int r; int delay_ack = ipcp->delay_ack; - if (!ipcp->starting || ipcp->fsm.fsm_state == FSM_Closed || ipcp->ppp->terminating || conf_ipv4 == IPV4_DENY) { + if (!ipcp->starting || ipcp->fsm.fsm_state == FSM_Closed || ipcp->ppp->ses.terminating || conf_ipv4 == IPV4_DENY) { if (conf_ppp_verbose) log_ppp_warn("IPCP: discarding packet\n"); - if (ipcp->ppp->terminating) + if (ipcp->ppp->ses.terminating) return; if (ipcp->fsm.fsm_state == FSM_Closed || conf_ipv4 == IPV4_DENY) lcp_send_proto_rej(ipcp->ppp, PPP_IPCP); @@ -689,7 +689,7 @@ static void ipcp_recv(struct ppp_handler_t*h) switch(hdr->code) { case CONFREQ: r = ipcp_recv_conf_req(ipcp,(uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN); - if (ipcp->ppp->stop_time) { + if (ipcp->ppp->ses.stop_time) { ipcp_free_conf_req(ipcp); return; } @@ -721,11 +721,11 @@ static void ipcp_recv(struct ppp_handler_t*h) } ipcp_free_conf_req(ipcp); if (r == IPCP_OPT_FAIL) - ppp_terminate(ipcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_ERROR, 0); break; case CONFACK: if (ipcp_recv_conf_ack(ipcp,(uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(ipcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_ERROR, 0); else ppp_fsm_recv_conf_ack(&ipcp->fsm); break; @@ -735,7 +735,7 @@ static void ipcp_recv(struct ppp_handler_t*h) break; case CONFREJ: if (ipcp_recv_conf_rej(ipcp, (uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(ipcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_ERROR, 0); else ppp_fsm_recv_conf_rej(&ipcp->fsm); break; @@ -743,13 +743,13 @@ static void ipcp_recv(struct ppp_handler_t*h) if (conf_ppp_verbose) log_ppp_info2("recv [IPCP TermReq id=%x]\n", hdr->id); ppp_fsm_recv_term_req(&ipcp->fsm); - ppp_terminate(ipcp->ppp, TERM_USER_REQUEST, 0); + ap_session_terminate(&ipcp->ppp->ses, TERM_USER_REQUEST, 0); break; case TERMACK: if (conf_ppp_verbose) log_ppp_info2("recv [IPCP TermAck id=%x]\n", hdr->id); //ppp_fsm_recv_term_ack(&ipcp->fsm); - //ppp_terminate(ipcp->ppp, 0); + //ap_session_terminate(&ipcp->ppp->ses, 0); break; case CODEREJ: if (conf_ppp_verbose) diff --git a/accel-pppd/ppp/ppp_ipv6cp.c b/accel-pppd/ppp/ppp_ipv6cp.c index 9e7bf784..ee666267 100644 --- a/accel-pppd/ppp/ppp_ipv6cp.c +++ b/accel-pppd/ppp/ppp_ipv6cp.c @@ -119,8 +119,8 @@ static void ipv6cp_start_timeout(struct triton_timer_t *t) triton_timer_del(t); - if (ipv6cp->ppp->state == PPP_STATE_STARTING) - ppp_terminate(ipv6cp->ppp, TERM_USER_ERROR, 0); + if (ipv6cp->ppp->ses.state == AP_STATE_STARTING) + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_ERROR, 0); } int ipv6cp_layer_start(struct ppp_layer_data_t *ld) @@ -137,7 +137,7 @@ int ipv6cp_layer_start(struct ppp_layer_data_t *ld) if (ipv6cp->ld.passive) { ipv6cp->timeout.expire = ipv6cp_start_timeout; ipv6cp->timeout.expire_tv.tv_sec = START_TIMEOUT; - triton_timer_add(ipv6cp->ppp->ctrl->ctx, &ipv6cp->timeout, 0); + triton_timer_add(ipv6cp->ppp->ses.ctrl->ctx, &ipv6cp->timeout, 0); } else { ppp_fsm_lower_up(&ipv6cp->fsm); if (ppp_fsm_open(&ipv6cp->fsm)) @@ -202,17 +202,17 @@ static void ipv6cp_layer_finished(struct ppp_fsm_t *fsm) if (!ipv6cp->started) { if (conf_ipv6 == IPV6_REQUIRE) - ppp_terminate(ipv6cp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_ERROR, 0); else ppp_layer_passive(ipv6cp->ppp, &ipv6cp->ld); - } else if (!ipv6cp->ppp->terminating) - ppp_terminate(ipv6cp->ppp, TERM_USER_ERROR, 0); + } else if (!ipv6cp->ppp->ses.terminating) + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_ERROR, 0); fsm->fsm_state = FSM_Closed; - if (ipv6cp->ppp->ipv6) { - ipdb_put_ipv6(ipv6cp->ppp, ipv6cp->ppp->ipv6); - ipv6cp->ppp->ipv6 = NULL; + if (ipv6cp->ppp->ses.ipv6) { + ipdb_put_ipv6(&ipv6cp->ppp->ses, ipv6cp->ppp->ses.ipv6); + ipv6cp->ppp->ses.ipv6 = NULL; } } @@ -446,12 +446,12 @@ static int ipv6cp_recv_conf_req(struct ppp_ipv6cp_t *ipv6cp, uint8_t *data, int } if (r == IPV6CP_OPT_CLOSE) { if (conf_ipv6 == IPV6_REQUIRE) - ppp_terminate(ipv6cp->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&ipv6cp->ppp->ses, TERM_NAS_ERROR, 0); else lcp_send_proto_rej(ipv6cp->ppp, PPP_IPV6CP); return 0; } - if (ipv6cp->ppp->stop_time) + if (ipv6cp->ppp->ses.stop_time) return -1; lopt->state = r; ropt->state = r; @@ -660,10 +660,10 @@ static void ipv6cp_recv(struct ppp_handler_t*h) int r; int delay_ack = ipv6cp->delay_ack; - if (!ipv6cp->starting || ipv6cp->fsm.fsm_state == FSM_Closed || ipv6cp->ppp->terminating || conf_ipv6 == IPV6_DENY) { + if (!ipv6cp->starting || ipv6cp->fsm.fsm_state == FSM_Closed || ipv6cp->ppp->ses.terminating || conf_ipv6 == IPV6_DENY) { if (conf_ppp_verbose) log_ppp_warn("IPV6CP: discarding packet\n"); - if (ipv6cp->ppp->terminating) + if (ipv6cp->ppp->ses.terminating) return; if (ipv6cp->fsm.fsm_state == FSM_Closed || conf_ipv6 == IPV6_DENY) lcp_send_proto_rej(ipv6cp->ppp, PPP_IPV6CP); @@ -689,7 +689,7 @@ static void ipv6cp_recv(struct ppp_handler_t*h) switch(hdr->code) { case CONFREQ: r = ipv6cp_recv_conf_req(ipv6cp,(uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN); - if (ipv6cp->ppp->stop_time) { + if (ipv6cp->ppp->ses.stop_time) { ipv6cp_free_conf_req(ipv6cp); return; } @@ -721,11 +721,11 @@ static void ipv6cp_recv(struct ppp_handler_t*h) } ipv6cp_free_conf_req(ipv6cp); if (r == IPV6CP_OPT_FAIL) - ppp_terminate(ipv6cp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_ERROR, 0); break; case CONFACK: if (ipv6cp_recv_conf_ack(ipv6cp,(uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(ipv6cp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_ERROR, 0); else ppp_fsm_recv_conf_ack(&ipv6cp->fsm); break; @@ -735,7 +735,7 @@ static void ipv6cp_recv(struct ppp_handler_t*h) break; case CONFREJ: if (ipv6cp_recv_conf_rej(ipv6cp, (uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(ipv6cp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_ERROR, 0); else ppp_fsm_recv_conf_rej(&ipv6cp->fsm); break; @@ -743,13 +743,13 @@ static void ipv6cp_recv(struct ppp_handler_t*h) if (conf_ppp_verbose) log_ppp_info2("recv [IPV6CP TermReq id=%x]\n", hdr->id); ppp_fsm_recv_term_req(&ipv6cp->fsm); - ppp_terminate(ipv6cp->ppp, TERM_USER_REQUEST, 0); + ap_session_terminate(&ipv6cp->ppp->ses, TERM_USER_REQUEST, 0); break; case TERMACK: if (conf_ppp_verbose) log_ppp_info2("recv [IPV6CP TermAck id=%x]\n", hdr->id); //ppp_fsm_recv_term_ack(&ipv6cp->fsm); - //ppp_terminate(ipv6cp->ppp, 0); + //ap_session_terminate(&ipv6cp->ppp->ses, 0); break; case CODEREJ: if (conf_ppp_verbose) diff --git a/accel-pppd/ppp/ppp_lcp.c b/accel-pppd/ppp/ppp_lcp.c index c93c1003..0318e54d 100644 --- a/accel-pppd/ppp/ppp_lcp.c +++ b/accel-pppd/ppp/ppp_lcp.c @@ -138,7 +138,7 @@ void lcp_layer_finish(struct ppp_layer_data_t *ld) stop_echo(lcp); ppp_fsm_close(&lcp->fsm); } else - triton_context_call(lcp->ppp->ctrl->ctx, (triton_event_func)_lcp_layer_finished, lcp); + triton_context_call(lcp->ppp->ses.ctrl->ctx, (triton_event_func)_lcp_layer_finished, lcp); } void lcp_layer_free(struct ppp_layer_data_t *ld) @@ -151,7 +151,7 @@ void lcp_layer_free(struct ppp_layer_data_t *ld) ppp_unregister_handler(lcp->ppp, &lcp->hnd); lcp_options_free(lcp); ppp_fsm_free(&lcp->fsm); - triton_cancel_call(lcp->ppp->ctrl->ctx, (triton_event_func)_lcp_layer_finished); + triton_cancel_call(lcp->ppp->ses.ctrl->ctx, (triton_event_func)_lcp_layer_finished); _free(lcp); } @@ -186,12 +186,12 @@ static void lcp_layer_finished(struct ppp_fsm_t *fsm) stop_echo(lcp); if (lcp->started) { lcp->started = 0; - if (lcp->ppp->terminating) + if (lcp->ppp->ses.terminating) ppp_layer_finished(lcp->ppp, &lcp->ld); else - ppp_terminate(lcp->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_NAS_ERROR, 0); } else - ppp_terminate(lcp->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_NAS_ERROR, 0); } static void print_ropt(struct recv_opt_t *ropt) @@ -590,7 +590,7 @@ static void lcp_update_echo_timer(struct ppp_lcp_t *lcp) if (lcp->echo_timer.tpd) triton_timer_mod(&lcp->echo_timer, 0); else - triton_timer_add(lcp->ppp->ctrl->ctx, &lcp->echo_timer, 0); + triton_timer_add(lcp->ppp->ses.ctrl->ctx, &lcp->echo_timer, 0); } } } @@ -610,7 +610,7 @@ static void lcp_recv_echo_repl(struct ppp_lcp_t *lcp, uint8_t *data, int size) if (magic == lcp->magic) { log_ppp_error("lcp: echo: loop-back detected\n"); - ppp_terminate(lcp->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_NAS_ERROR, 0); } } @@ -659,7 +659,7 @@ static void send_echo_request(struct triton_timer_t *t) if (lcp->echo_sent == 2) { memset(&ifreq, 0, sizeof(ifreq)); ifreq.stats_ptr = (void *)&ifreq.stats; - strcpy(ifreq.ifr__name, lcp->ppp->ifname); + strcpy(ifreq.ifr__name, lcp->ppp->ses.ifname); if (ioctl(sock_fd, SIOCGPPPSTATS, &ifreq) == 0) lcp->last_ipackets = ifreq.stats.p.ppp_ipackets; @@ -669,7 +669,7 @@ static void send_echo_request(struct triton_timer_t *t) time(&ts); memset(&ifreq, 0, sizeof(ifreq)); ifreq.stats_ptr = (void *)&ifreq.stats; - strcpy(ifreq.ifr__name, lcp->ppp->ifname); + strcpy(ifreq.ifr__name, lcp->ppp->ses.ifname); if (ioctl(sock_fd, SIOCGPPPSTATS, &ifreq) == 0 && lcp->last_ipackets != ifreq.stats.p.ppp_ipackets) { lcp->echo_sent = 1; lcp_update_echo_timer(lcp); @@ -685,7 +685,7 @@ static void send_echo_request(struct triton_timer_t *t) if (f) { log_ppp_warn("lcp: no echo reply\n"); - ppp_terminate(lcp->ppp, TERM_LOST_CARRIER, 1); + ap_session_terminate(&lcp->ppp->ses, TERM_LOST_CARRIER, 1); return; } @@ -700,7 +700,7 @@ static void start_echo(struct ppp_lcp_t *lcp) lcp->echo_timer.period = conf_echo_interval * 1000; lcp->echo_timer.expire = send_echo_request; if (lcp->echo_timer.period && !lcp->echo_timer.tpd) - triton_timer_add(lcp->ppp->ctrl->ctx, &lcp->echo_timer, 0); + triton_timer_add(lcp->ppp->ses.ctrl->ctx, &lcp->echo_timer, 0); } static void stop_echo(struct ppp_lcp_t *lcp) { @@ -785,7 +785,7 @@ static void lcp_recv(struct ppp_handler_t*h) if ((hdr->code == CONFACK || hdr->code == CONFNAK || hdr->code == CONFREJ) && lcp->started) return; - if (lcp->fsm.fsm_state == FSM_Initial || lcp->fsm.fsm_state == FSM_Closed || (lcp->ppp->terminating && (hdr->code != TERMACK && hdr->code != TERMREQ))) { + if (lcp->fsm.fsm_state == FSM_Initial || lcp->fsm.fsm_state == FSM_Closed || (lcp->ppp->ses.terminating && (hdr->code != TERMACK && hdr->code != TERMREQ))) { /*if (conf_ppp_verbose) log_ppp_warn("LCP: discaring packet\n"); lcp_send_proto_rej(ccp->ppp, htons(PPP_CCP));*/ @@ -816,11 +816,11 @@ static void lcp_recv(struct ppp_handler_t*h) } lcp_free_conf_req(lcp); if (r == LCP_OPT_FAIL) - ppp_terminate(lcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_USER_ERROR, 0); break; case CONFACK: if (lcp_recv_conf_ack(lcp,(uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(lcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_USER_ERROR, 0); else if (lcp->fsm.recv_id != lcp->fsm.id) break; @@ -834,7 +834,7 @@ static void lcp_recv(struct ppp_handler_t*h) break; case CONFREJ: if (lcp_recv_conf_rej(lcp,(uint8_t*)(hdr + 1), ntohs(hdr->len) - PPP_HDRLEN)) - ppp_terminate(lcp->ppp, TERM_USER_ERROR, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_USER_ERROR, 0); else if (lcp->fsm.recv_id != lcp->fsm.id) break; @@ -844,7 +844,7 @@ static void lcp_recv(struct ppp_handler_t*h) if (conf_ppp_verbose) log_ppp_info2("recv [LCP TermReq id=%x]\n", hdr->id); ppp_fsm_recv_term_req(&lcp->fsm); - ppp_terminate(lcp->ppp, TERM_USER_REQUEST, 0); + ap_session_terminate(&lcp->ppp->ses, TERM_USER_REQUEST, 0); break; case TERMACK: if (conf_ppp_verbose) diff --git a/accel-pppd/pwdb.c b/accel-pppd/pwdb.c index 4c0ab02e..f4440b8f 100644 --- a/accel-pppd/pwdb.c +++ b/accel-pppd/pwdb.c @@ -8,7 +8,7 @@ static LIST_HEAD(pwdb_handlers); -int __export pwdb_check(struct ppp_t *ppp, const char *username, int type, ...) +int __export pwdb_check(struct ap_session *ses, const char *username, int type, ...) { struct pwdb_t *pwdb; int r, res = PWDB_NO_IMPL; @@ -19,7 +19,7 @@ int __export pwdb_check(struct ppp_t *ppp, const char *username, int type, ...) list_for_each_entry(pwdb, &pwdb_handlers, entry) { if (!pwdb->check) continue; - r = pwdb->check(pwdb, ppp, username, type, args); + r = pwdb->check(pwdb, ses, username, type, args); if (r == PWDB_NO_IMPL) continue; if (r == PWDB_SUCCESS) @@ -29,7 +29,7 @@ int __export pwdb_check(struct ppp_t *ppp, const char *username, int type, ...) return res; } -__export char *pwdb_get_passwd(struct ppp_t *ppp, const char *username) +__export char *pwdb_get_passwd(struct ap_session *ses, const char *username) { struct pwdb_t *pwdb; char *r = NULL; @@ -37,7 +37,7 @@ __export char *pwdb_get_passwd(struct ppp_t *ppp, const char *username) list_for_each_entry(pwdb, &pwdb_handlers, entry) { if (!pwdb->get_passwd) continue; - r = pwdb->get_passwd(pwdb, ppp, username); + r = pwdb->get_passwd(pwdb, ses, username); if (r) break; } diff --git a/accel-pppd/pwdb.h b/accel-pppd/pwdb.h index a1c2cedb..8d8ab408 100644 --- a/accel-pppd/pwdb.h +++ b/accel-pppd/pwdb.h @@ -4,7 +4,7 @@ #include <stdarg.h> #include "list.h" -struct ppp_t; +struct ap_session; #define PWDB_SUCCESS 0 #define PWDB_DENIED 1 @@ -13,12 +13,12 @@ struct ppp_t; struct pwdb_t { struct list_head entry; - int (*check)(struct pwdb_t *, struct ppp_t *, const char *username, int type, va_list args); - char* (*get_passwd)(struct pwdb_t *, struct ppp_t *, const char *username); + int (*check)(struct pwdb_t *, struct ap_session *, const char *username, int type, va_list args); + char* (*get_passwd)(struct pwdb_t *, struct ap_session *, const char *username); }; -int pwdb_check(struct ppp_t *, const char *username, int type, ...); -char *pwdb_get_passwd(struct ppp_t *, const char *username); +int pwdb_check(struct ap_session *, const char *username, int type, ...); +char *pwdb_get_passwd(struct ap_session *, const char *username); void pwdb_register(struct pwdb_t *); void pwdb_unregister(struct pwdb_t *); diff --git a/accel-pppd/radius/CMakeLists.txt b/accel-pppd/radius/CMakeLists.txt index 0c1789ef..d139fe1f 100644 --- a/accel-pppd/radius/CMakeLists.txt +++ b/accel-pppd/radius/CMakeLists.txt @@ -10,6 +10,10 @@ SET(sources radius.c ) +IF (BACKUP) + SET(sources ${sources} backup.c) +ENDIF (BACKUP) + ADD_DEFINITIONS(-DDICTIONARY="${CMAKE_INSTALL_PREFIX}/share/accel-ppp/radius/dictionary") ADD_LIBRARY(radius SHARED ${sources}) diff --git a/accel-pppd/radius/acct.c b/accel-pppd/radius/acct.c index 76ad3299..b5f7ff76 100644 --- a/accel-pppd/radius/acct.c +++ b/accel-pppd/radius/acct.c @@ -10,6 +10,8 @@ #include "crypto.h" #include "log.h" +#include "backup.h" +#include "ap_session_backup.h" #include "radius_p.h" #include "memdebug.h" @@ -32,40 +34,42 @@ static int req_set_RA(struct rad_req_t *req, const char *secret) return 0; } -static void req_set_stat(struct rad_req_t *req, struct ppp_t *ppp) +static void req_set_stat(struct rad_req_t *req, struct ap_session *ses) { struct ifpppstatsreq ifreq; time_t stop_time; - if (ppp->stop_time) - stop_time = ppp->stop_time; + if (ses->stop_time) + stop_time = ses->stop_time; else time(&stop_time); memset(&ifreq, 0, sizeof(ifreq)); ifreq.stats_ptr = (void *)&ifreq.stats; - strcpy(ifreq.ifr__name, ppp->ifname); + strcpy(ifreq.ifr__name, ses->ifname); - if (ioctl(sock_fd, SIOCGPPPSTATS, &ifreq)) { - log_ppp_error("radius: failed to get ppp statistics: %s\n", strerror(errno)); - return; - } + if (ses->ctrl->type != CTRL_TYPE_IPOE) { + if (ioctl(sock_fd, SIOCGPPPSTATS, &ifreq)) { + log_ppp_error("radius: failed to get ppp statistics: %s\n", strerror(errno)); + return; + } + + if (ifreq.stats.p.ppp_ibytes < req->rpd->acct_input_octets) + req->rpd->acct_input_gigawords++; + req->rpd->acct_input_octets = ifreq.stats.p.ppp_ibytes; - if (ifreq.stats.p.ppp_ibytes < req->rpd->acct_input_octets) - req->rpd->acct_input_gigawords++; - req->rpd->acct_input_octets = ifreq.stats.p.ppp_ibytes; - - if (ifreq.stats.p.ppp_obytes < req->rpd->acct_output_octets) - req->rpd->acct_output_gigawords++; - req->rpd->acct_output_octets = ifreq.stats.p.ppp_obytes; - - rad_packet_change_int(req->pack, NULL, "Acct-Input-Octets", ifreq.stats.p.ppp_ibytes); - rad_packet_change_int(req->pack, NULL, "Acct-Output-Octets", ifreq.stats.p.ppp_obytes); - rad_packet_change_int(req->pack, NULL, "Acct-Input-Packets", ifreq.stats.p.ppp_ipackets); - rad_packet_change_int(req->pack, NULL, "Acct-Output-Packets", ifreq.stats.p.ppp_opackets); - rad_packet_change_int(req->pack, NULL, "Acct-Input-Gigawords", req->rpd->acct_input_gigawords); - rad_packet_change_int(req->pack, NULL, "Acct-Output-Gigawords", req->rpd->acct_output_gigawords); - rad_packet_change_int(req->pack, NULL, "Acct-Session-Time", stop_time - ppp->start_time); + if (ifreq.stats.p.ppp_obytes < req->rpd->acct_output_octets) + req->rpd->acct_output_gigawords++; + req->rpd->acct_output_octets = ifreq.stats.p.ppp_obytes; + + rad_packet_change_int(req->pack, NULL, "Acct-Input-Octets", ifreq.stats.p.ppp_ibytes); + rad_packet_change_int(req->pack, NULL, "Acct-Output-Octets", ifreq.stats.p.ppp_obytes); + rad_packet_change_int(req->pack, NULL, "Acct-Input-Packets", ifreq.stats.p.ppp_ipackets); + rad_packet_change_int(req->pack, NULL, "Acct-Output-Packets", ifreq.stats.p.ppp_opackets); + rad_packet_change_int(req->pack, NULL, "Acct-Input-Gigawords", req->rpd->acct_input_gigawords); + rad_packet_change_int(req->pack, NULL, "Acct-Output-Gigawords", req->rpd->acct_output_gigawords); + } + rad_packet_change_int(req->pack, NULL, "Acct-Session-Time", stop_time - ses->start_time); } static int rad_acct_read(struct triton_md_handler_t *h) @@ -125,7 +129,7 @@ static void __rad_req_send(struct rad_req_t *req) if (rad_server_realloc(req)) { if (conf_acct_timeout) { log_ppp_warn("radius:acct: no servers available, terminating session...\n"); - ppp_terminate(req->rpd->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(req->rpd->ses, TERM_NAS_ERROR, 0); } break; } @@ -134,7 +138,7 @@ static void __rad_req_send(struct rad_req_t *req) rad_req_send(req, conf_interim_verbose); if (!req->hnd.tpd) { - triton_md_register_handler(req->rpd->ppp->ctrl->ctx, &req->hnd); + triton_md_register_handler(req->rpd->ses->ctrl->ctx, &req->hnd); triton_md_enable_handler(&req->hnd, MD_MODE_READ); } @@ -167,7 +171,7 @@ static void rad_acct_timeout(struct triton_timer_t *t) rad_server_fail(req->serv); if (rad_server_realloc(req)) { log_ppp_warn("radius:acct: no servers available, terminating session...\n"); - ppp_terminate(req->rpd->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(req->rpd->ses, TERM_NAS_ERROR, 0); return; } time(&req->rpd->acct_timestamp); @@ -201,10 +205,10 @@ static void rad_acct_interim_update(struct triton_timer_t *t) return; if (rpd->session_timeout.expire_tv.tv_sec && - rpd->session_timeout.expire_tv.tv_sec - (time(NULL) - rpd->ppp->start_time) < INTERIM_SAFE_TIME) + rpd->session_timeout.expire_tv.tv_sec - (time(NULL) - rpd->ses->start_time) < INTERIM_SAFE_TIME) return; - req_set_stat(rpd->acct_req, rpd->ppp); + req_set_stat(rpd->acct_req, rpd->ses); if (!rpd->acct_interim_interval) return; @@ -221,7 +225,7 @@ static void rad_acct_interim_update(struct triton_timer_t *t) __sync_add_and_fetch(&rpd->acct_req->serv->stat_interim_sent, 1); rpd->acct_req->timeout.period = conf_timeout * 1000; - triton_timer_add(rpd->ppp->ctrl->ctx, &rpd->acct_req->timeout, 0); + triton_timer_add(rpd->ses->ctrl->ctx, &rpd->acct_req->timeout, 0); } int rad_acct_start(struct radius_pd_t *rpd) @@ -233,7 +237,9 @@ int rad_acct_start(struct radius_pd_t *rpd) if (!conf_accounting) return 0; - rpd->acct_req = rad_req_alloc(rpd, CODE_ACCOUNTING_REQUEST, rpd->ppp->username); + if (!rpd->acct_req) + rpd->acct_req = rad_req_alloc(rpd, CODE_ACCOUNTING_REQUEST, rpd->ses->username); + if (!rpd->acct_req) return -1; @@ -244,7 +250,7 @@ int rad_acct_start(struct radius_pd_t *rpd) //if (rad_req_add_val(rpd->acct_req, "Acct-Status-Type", "Start", 4)) // goto out_err; - //if (rad_req_add_str(rpd->acct_req, "Acct-Session-Id", rpd->ppp->sessionid, PPP_SESSIONID_LEN, 1)) + //if (rad_req_add_str(rpd->acct_req, "Acct-Session-Id", rpd->ses->ionid, PPP_SESSIONID_LEN, 1)) // goto out_err; if (rpd->acct_req->reply) { @@ -257,75 +263,81 @@ int rad_acct_start(struct radius_pd_t *rpd) if (req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret)) goto out_err; - while (1) { - - if (rad_server_req_enter(rpd->acct_req)) { - if (rad_server_realloc(rpd->acct_req)) { - log_ppp_warn("radius:acct_start: no servers available\n"); - goto out_err; - } - if (req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret)) - goto out_err; - continue; - } +#ifdef USE_BACKUP + if (rpd->ses->state != AP_STATE_RESTORE || !rpd->ses->backup->internal) { +#endif + while (1) { - for (i = 0; i < conf_max_try; i++) { - if (conf_acct_delay_time) { - time(&ts); - rad_packet_change_int(rpd->acct_req->pack, NULL, "Acct-Delay-Time", ts - rpd->acct_timestamp); + if (rad_server_req_enter(rpd->acct_req)) { + if (rad_server_realloc(rpd->acct_req)) { + log_ppp_warn("radius:acct_start: no servers available\n"); + goto out_err; + } if (req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret)) goto out_err; + continue; } - if (rad_req_send(rpd->acct_req, conf_verbose)) - goto out_err; + for (i = 0; i < conf_max_try; i++) { + if (conf_acct_delay_time) { + time(&ts); + rad_packet_change_int(rpd->acct_req->pack, NULL, "Acct-Delay-Time", ts - rpd->acct_timestamp); + if (req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret)) + goto out_err; + } - __sync_add_and_fetch(&rpd->acct_req->serv->stat_acct_sent, 1); + if (rad_req_send(rpd->acct_req, conf_verbose)) + goto out_err; + + __sync_add_and_fetch(&rpd->acct_req->serv->stat_acct_sent, 1); - rad_req_wait(rpd->acct_req, conf_timeout); + rad_req_wait(rpd->acct_req, conf_timeout); - if (!rpd->acct_req->reply) { - if (conf_acct_delay_time) + if (!rpd->acct_req->reply) { + if (conf_acct_delay_time) + rpd->acct_req->pack->id++; + __sync_add_and_fetch(&rpd->acct_req->serv->stat_acct_lost, 1); + stat_accm_add(rpd->acct_req->serv->stat_acct_lost_1m, 1); + stat_accm_add(rpd->acct_req->serv->stat_acct_lost_5m, 1); + continue; + } + + dt = (rpd->acct_req->reply->tv.tv_sec - rpd->acct_req->pack->tv.tv_sec) * 1000 + + (rpd->acct_req->reply->tv.tv_nsec - rpd->acct_req->pack->tv.tv_nsec) / 1000000; + stat_accm_add(rpd->acct_req->serv->stat_acct_query_1m, dt); + stat_accm_add(rpd->acct_req->serv->stat_acct_query_5m, dt); + + if (rpd->acct_req->reply->id != rpd->acct_req->pack->id || rpd->acct_req->reply->code != CODE_ACCOUNTING_RESPONSE) { + rad_packet_free(rpd->acct_req->reply); + rpd->acct_req->reply = NULL; rpd->acct_req->pack->id++; - __sync_add_and_fetch(&rpd->acct_req->serv->stat_acct_lost, 1); - stat_accm_add(rpd->acct_req->serv->stat_acct_lost_1m, 1); - stat_accm_add(rpd->acct_req->serv->stat_acct_lost_5m, 1); - continue; + __sync_add_and_fetch(&rpd->acct_req->serv->stat_acct_lost, 1); + stat_accm_add(rpd->acct_req->serv->stat_acct_lost_1m, 1); + stat_accm_add(rpd->acct_req->serv->stat_acct_lost_5m, 1); + } else + break; } - dt = (rpd->acct_req->reply->tv.tv_sec - rpd->acct_req->pack->tv.tv_sec) * 1000 + - (rpd->acct_req->reply->tv.tv_nsec - rpd->acct_req->pack->tv.tv_nsec) / 1000000; - stat_accm_add(rpd->acct_req->serv->stat_acct_query_1m, dt); - stat_accm_add(rpd->acct_req->serv->stat_acct_query_5m, dt); - - if (rpd->acct_req->reply->id != rpd->acct_req->pack->id || rpd->acct_req->reply->code != CODE_ACCOUNTING_RESPONSE) { - rad_packet_free(rpd->acct_req->reply); - rpd->acct_req->reply = NULL; - rpd->acct_req->pack->id++; - __sync_add_and_fetch(&rpd->acct_req->serv->stat_acct_lost, 1); - stat_accm_add(rpd->acct_req->serv->stat_acct_lost_1m, 1); - stat_accm_add(rpd->acct_req->serv->stat_acct_lost_5m, 1); - } else - break; - } - - rad_server_req_exit(rpd->acct_req); + rad_server_req_exit(rpd->acct_req); - if (rpd->acct_req->reply) - break; + if (rpd->acct_req->reply) + break; - rad_server_fail(rpd->acct_req->serv); - if (rad_server_realloc(rpd->acct_req)) { - log_ppp_warn("radius:acct_start: no servers available\n"); - goto out_err; + rad_server_fail(rpd->acct_req->serv); + if (rad_server_realloc(rpd->acct_req)) { + log_ppp_warn("radius:acct_start: no servers available\n"); + goto out_err; + } + if (req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret)) + goto out_err; } - if (req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret)) - goto out_err; +#ifdef USE_BACKUP } +#endif rpd->acct_req->hnd.read = rad_acct_read; - triton_md_register_handler(rpd->ppp->ctrl->ctx, &rpd->acct_req->hnd); + triton_md_register_handler(rpd->ses->ctrl->ctx, &rpd->acct_req->hnd); if (triton_md_enable_handler(&rpd->acct_req->hnd, MD_MODE_READ)) goto out_err; @@ -334,7 +346,7 @@ int rad_acct_start(struct radius_pd_t *rpd) rpd->acct_interim_timer.expire = rad_acct_interim_update; rpd->acct_interim_timer.period = rpd->acct_interim_interval ? rpd->acct_interim_interval * 1000 : STAT_UPDATE_INTERVAL; - if (rpd->acct_interim_interval && triton_timer_add(rpd->ppp->ctrl->ctx, &rpd->acct_interim_timer, 0)) { + if (rpd->acct_interim_interval && triton_timer_add(rpd->ses->ctrl->ctx, &rpd->acct_interim_timer, 0)) { triton_md_unregister_handler(&rpd->acct_req->hnd); triton_timer_del(&rpd->acct_req->timeout); goto out_err; @@ -364,7 +376,7 @@ void rad_acct_stop(struct radius_pd_t *rpd) if (rpd->acct_req->timeout.tpd) triton_timer_del(&rpd->acct_req->timeout); - switch (rpd->ppp->terminate_cause) { + switch (rpd->ses->terminate_cause) { case TERM_USER_REQUEST: rad_packet_add_val(rpd->acct_req->pack, NULL, "Acct-Terminate-Cause", "User-Request"); break; @@ -392,7 +404,7 @@ void rad_acct_stop(struct radius_pd_t *rpd) break; } rad_packet_change_val(rpd->acct_req->pack, NULL, "Acct-Status-Type", "Stop"); - req_set_stat(rpd->acct_req, rpd->ppp); + req_set_stat(rpd->acct_req, rpd->ses); req_set_RA(rpd->acct_req, rpd->acct_req->serv->secret); /// !!! rad_req_add_val(rpd->acct_req, "Acct-Terminate-Cause", ""); diff --git a/accel-pppd/radius/auth.c b/accel-pppd/radius/auth.c index 3e5d281d..6cb4e803 100644 --- a/accel-pppd/radius/auth.c +++ b/accel-pppd/radius/auth.c @@ -243,13 +243,13 @@ int rad_auth_pap(struct radius_pd_t *rpd, const char *username, va_list args) _free(epasswd); if (conf_sid_in_auth) - if (rad_packet_add_str(req->pack, NULL, "Acct-Session-Id", rpd->ppp->sessionid)) + if (rad_packet_add_str(req->pack, NULL, "Acct-Session-Id", rpd->ses->sessionid)) return -1; r = rad_auth_send(req); if (r == PWDB_SUCCESS) { struct ev_radius_t ev = { - .ppp = rpd->ppp, + .ses = rpd->ses, .request = req->pack, .reply = req->reply, }; @@ -311,13 +311,13 @@ int rad_auth_chap_md5(struct radius_pd_t *rpd, const char *username, va_list arg } if (conf_sid_in_auth) - if (rad_packet_add_str(rpd->auth_req->pack, NULL, "Acct-Session-Id", rpd->ppp->sessionid)) + if (rad_packet_add_str(rpd->auth_req->pack, NULL, "Acct-Session-Id", rpd->ses->sessionid)) goto out; r = rad_auth_send(rpd->auth_req); if (r == PWDB_SUCCESS) { struct ev_radius_t ev = { - .ppp = rpd->ppp, + .ses = rpd->ses, .request = rpd->auth_req->pack, .reply = rpd->auth_req->reply, }; @@ -339,9 +339,12 @@ static void setup_mppe(struct rad_req_t *req, const uint8_t *challenge) uint8_t mppe_recv_key[16]; uint8_t mppe_send_key[16]; struct ev_mppe_keys_t ev_mppe = { - .ppp = req->rpd->ppp, + .ppp = container_of(req->rpd->ses, typeof(struct ppp_t), ses), }; + if (req->rpd->ses->ctrl->type == CTRL_TYPE_IPOE) + return; + list_for_each_entry(attr, &req->reply->attrs, entry) { if (attr->vendor && attr->vendor->id == Vendor_Microsoft) { switch (attr->attr->id) { @@ -426,14 +429,14 @@ int rad_auth_mschap_v1(struct radius_pd_t *rpd, const char *username, va_list ar } if (conf_sid_in_auth) - if (rad_packet_add_str(rpd->auth_req->pack, NULL, "Acct-Session-Id", rpd->ppp->sessionid)) + if (rad_packet_add_str(rpd->auth_req->pack, NULL, "Acct-Session-Id", rpd->ses->sessionid)) goto out; r = rad_auth_send(rpd->auth_req); if (r == PWDB_SUCCESS) { struct ev_radius_t ev = { - .ppp = rpd->ppp, + .ses = rpd->ses, .request = rpd->auth_req->pack, .reply = rpd->auth_req->reply, }; @@ -508,7 +511,7 @@ int rad_auth_mschap_v2(struct radius_pd_t *rpd, const char *username, va_list ar } if (conf_sid_in_auth) - if (rad_packet_add_str(rpd->auth_req->pack, NULL, "Acct-Session-Id", rpd->ppp->sessionid)) + if (rad_packet_add_str(rpd->auth_req->pack, NULL, "Acct-Session-Id", rpd->ses->sessionid)) goto out; r = rad_auth_send(rpd->auth_req); @@ -522,7 +525,7 @@ int rad_auth_mschap_v2(struct radius_pd_t *rpd, const char *username, va_list ar } if (r == PWDB_SUCCESS) { struct ev_radius_t ev = { - .ppp = rpd->ppp, + .ses = rpd->ses, .request = rpd->auth_req->pack, .reply = rpd->auth_req->reply, }; @@ -549,3 +552,31 @@ out: } +int rad_auth_null(struct radius_pd_t *rpd, const char *username, va_list args) +{ + struct rad_req_t *req; + int r = PWDB_DENIED; + + req = rad_req_alloc(rpd, CODE_ACCESS_REQUEST, username); + if (!req) + return PWDB_DENIED; + + if (conf_sid_in_auth) + if (rad_packet_add_str(req->pack, NULL, "Acct-Session-Id", rpd->ses->sessionid)) + return -1; + + r = rad_auth_send(req); + if (r == PWDB_SUCCESS) { + struct ev_radius_t ev = { + .ses = rpd->ses, + .request = req->pack, + .reply = req->reply, + }; + triton_event_fire(EV_RADIUS_ACCESS_ACCEPT, &ev); + } + + rad_req_free(req); + + return r; +} + diff --git a/accel-pppd/radius/backup.c b/accel-pppd/radius/backup.c new file mode 100644 index 00000000..4c40ec61 --- /dev/null +++ b/accel-pppd/radius/backup.c @@ -0,0 +1,163 @@ +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <netinet/in.h> +#include <net/ethernet.h> + +#include "log.h" +#include "memdebug.h" + +#include "backup.h" +#include "ap_session_backup.h" +#include "radius_p.h" + +#define RAD_TAG_INTERIM_INTERVAL 1 +#define RAD_TAG_SESSION_TIMEOUT 2 +#define RAD_TAG_IPV4_ADDR 3 +#define RAD_TAG_IPV6_ADDR 4 +#define RAD_TAG_IPV6_DP 5 +#define RAD_TAG_ATTR_CLASS 6 +#define RAD_TAG_ATTR_STATE 7 +#define RAD_TAG_TERMINATION_ACTION 8 +#define RAD_TAG_ACCT_SERVER_ADDR 9 +#define RAD_TAG_ACCT_SERVER_PORT 10 + + +#define add_tag(id, data, size) if (!backup_add_tag(m, id, 0, data, size)) return -1; +#define add_tag_int(id, data, size) if (!backup_add_tag(m, id, 1, data, size)) return -1; + +static int session_save(struct ap_session *ses, struct backup_mod *m) +{ + struct radius_pd_t *rpd = find_pd(ses); + uint64_t session_timeout = ses->start_time + rpd->session_timeout.expire_tv.tv_sec; + + if (!rpd) + return 0; + + if (!rpd->authenticated) + return -2; + + add_tag(RAD_TAG_INTERIM_INTERVAL, &rpd->acct_interim_interval, 4); + + if (rpd->session_timeout.tpd) + add_tag(RAD_TAG_SESSION_TIMEOUT, &session_timeout, 8); + + if (ses->ipv4 == &rpd->ipv4_addr) + add_tag(RAD_TAG_IPV4_ADDR, NULL, 0); + + if (ses->ipv6 == &rpd->ipv6_addr) + add_tag(RAD_TAG_IPV6_ADDR, NULL, 0); + + /*if (rpd->ipv6_pd_assigned) { + + }*/ + + if (rpd->attr_class) + add_tag(RAD_TAG_ATTR_CLASS, rpd->attr_class, rpd->attr_class_len); + + if (rpd->attr_state) + add_tag(RAD_TAG_ATTR_CLASS, rpd->attr_state, rpd->attr_state_len); + + add_tag(RAD_TAG_TERMINATION_ACTION, &rpd->termination_action, 4); + + if (rpd->acct_req) { + add_tag(RAD_TAG_ACCT_SERVER_ADDR, &rpd->acct_req->server_addr, 4); + add_tag(RAD_TAG_ACCT_SERVER_PORT, &rpd->acct_req->server_port, 2); + } + + return 0; +} + +static int session_restore(struct ap_session *ses, struct backup_mod *m) +{ + return 0; +} + +static void restore_ipv4_addr(struct ap_session *ses) +{ + struct backup_mod *m = backup_find_mod(ses->backup, MODID_COMMON); + struct backup_tag *tag; + + list_for_each_entry(tag, &m->tag_list, entry) { + switch (tag->id) { + case SES_TAG_IPV4_ADDR: + ses->ipv4->addr = *(in_addr_t *)tag->data; + break; + case SES_TAG_IPV4_PEER_ADDR: + ses->ipv4->peer_addr = *(in_addr_t *)tag->data; + break; + } + } +} + +static void restore_ipv6_addr(struct ap_session *ses) +{ + +} + +void radius_restore_session(struct ap_session *ses, struct radius_pd_t *rpd) +{ + struct backup_mod *m = backup_find_mod(ses->backup, MODID_RADIUS); + struct backup_tag *tag; + in_addr_t acct_addr = 0; + int acct_port; + + if (!m) + return; + + list_for_each_entry(tag, &m->tag_list, entry) { + switch (tag->id) { + case RAD_TAG_INTERIM_INTERVAL: + rpd->acct_interim_interval = *(uint32_t *)tag->data; + break; + case RAD_TAG_SESSION_TIMEOUT: + rpd->session_timeout.expire_tv.tv_sec = *(uint64_t *)tag->data - ses->start_time; + break; + case RAD_TAG_IPV4_ADDR: + ses->ipv4 = &rpd->ipv4_addr; + restore_ipv4_addr(ses); + break; + case RAD_TAG_IPV6_ADDR: + restore_ipv6_addr(ses); + break; + case RAD_TAG_ATTR_CLASS: + rpd->attr_class = _malloc(tag->size); + memcpy(rpd->attr_class, tag->data, tag->size); + rpd->attr_class_len = tag->size; + break; + case RAD_TAG_ATTR_STATE: + rpd->attr_state = _malloc(tag->size); + memcpy(rpd->attr_state, tag->data, tag->size); + rpd->attr_state_len = tag->size; + break; + case RAD_TAG_TERMINATION_ACTION: + rpd->termination_action = *(uint32_t *)tag->data; + break; + case RAD_TAG_ACCT_SERVER_ADDR: + acct_addr = *(in_addr_t *)tag->data; + break; + case RAD_TAG_ACCT_SERVER_PORT: + acct_port = *(uint16_t *)tag->data; + break; + } + } + + if (acct_addr) + rpd->acct_req = rad_req_alloc2(rpd, CODE_ACCOUNTING_REQUEST, rpd->ses->username, acct_addr, acct_port); + + rpd->authenticated = 1; +} + +static struct backup_module mod = { + .id = MODID_RADIUS, + .save = session_save, + .restore = session_restore, +}; + +static void init(void) +{ + backup_register_module(&mod); +} + +DEFINE_INIT(100, init); + diff --git a/accel-pppd/radius/dm_coa.c b/accel-pppd/radius/dm_coa.c index f6197e7c..a2757362 100644 --- a/accel-pppd/radius/dm_coa.c +++ b/accel-pppd/radius/dm_coa.c @@ -140,13 +140,13 @@ static void disconnect_request(struct radius_pd_t *rpd) rpd->dm_coa_req = NULL; pthread_mutex_unlock(&rpd->lock); - ppp_terminate(rpd->ppp, TERM_ADMIN_RESET, 0); + ap_session_terminate(rpd->ses, TERM_ADMIN_RESET, 0); } static void coa_request(struct radius_pd_t *rpd) { struct ev_radius_t ev = { - .ppp = rpd->ppp, + .ses = rpd->ses, .request = rpd->dm_coa_req, }; @@ -171,8 +171,8 @@ static void coa_request(struct radius_pd_t *rpd) void dm_coa_cancel(struct radius_pd_t *rpd) { - triton_cancel_call(rpd->ppp->ctrl->ctx, (triton_event_func)disconnect_request); - triton_cancel_call(rpd->ppp->ctrl->ctx, (triton_event_func)coa_request); + triton_cancel_call(rpd->ses->ctrl->ctx, (triton_event_func)disconnect_request); + triton_cancel_call(rpd->ses->ctrl->ctx, (triton_event_func)coa_request); rad_packet_free(rpd->dm_coa_req); } @@ -227,9 +227,9 @@ static int dm_coa_read(struct triton_md_handler_t *h) memcpy(&rpd->dm_coa_addr, &addr, sizeof(addr)); if (pack->code == CODE_DISCONNECT_REQUEST) - triton_context_call(rpd->ppp->ctrl->ctx, (triton_event_func)disconnect_request, rpd); + triton_context_call(rpd->ses->ctrl->ctx, (triton_event_func)disconnect_request, rpd); else - triton_context_call(rpd->ppp->ctrl->ctx, (triton_event_func)coa_request, rpd); + triton_context_call(rpd->ses->ctrl->ctx, (triton_event_func)coa_request, rpd); pthread_mutex_unlock(&rpd->lock); diff --git a/accel-pppd/radius/radius.c b/accel-pppd/radius/radius.c index 8f17fcc9..82ac979d 100644 --- a/accel-pppd/radius/radius.c +++ b/accel-pppd/radius/radius.c @@ -60,18 +60,18 @@ int rad_proc_attrs(struct rad_req_t *req) struct ev_dns_t dns; int res = 0; - dns.ppp = NULL; + dns.ses = NULL; req->rpd->acct_interim_interval = conf_acct_interim_interval; list_for_each_entry(attr, &req->reply->attrs, entry) { if (attr->vendor && attr->vendor->id == Vendor_Microsoft) { switch (attr->attr->id) { case MS_Primary_DNS_Server: - dns.ppp = req->rpd->ppp; + dns.ses = req->rpd->ses; dns.dns1 = attr->val.ipaddr; break; case MS_Secondary_DNS_Server: - dns.ppp = req->rpd->ppp; + dns.ses = req->rpd->ses; dns.dns2 = attr->val.ipaddr; break; } @@ -84,7 +84,6 @@ int rad_proc_attrs(struct rad_req_t *req) if (!conf_gw_ip_address) log_ppp_warn("radius: gw-ip-address not specified, cann't assign IP address...\n"); else { - req->rpd->ipv4_addr.owner = &ipdb; req->rpd->ipv4_addr.peer_addr = attr->val.ipaddr; req->rpd->ipv4_addr.addr = conf_gw_ip_address; } @@ -132,18 +131,18 @@ int rad_proc_attrs(struct rad_req_t *req) } } - if (dns.ppp) + if (dns.ses) triton_event_fire(EV_DNS, &dns); return res; } -static int check(struct pwdb_t *pwdb, struct ppp_t *ppp, const char *username, int type, va_list _args) +static int check(struct pwdb_t *pwdb, struct ap_session *ses, const char *username, int type, va_list _args) { int r = PWDB_NO_IMPL; va_list args; int chap_type; - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); va_copy(args, _args); @@ -164,7 +163,9 @@ static int check(struct pwdb_t *pwdb, struct ppp_t *ppp, const char *username, i r = rad_auth_mschap_v2(rpd, username, args); break; } - break; + case 0: + r = rad_auth_null(rpd, username, args); + break; } va_end(args); @@ -175,20 +176,19 @@ static int check(struct pwdb_t *pwdb, struct ppp_t *ppp, const char *username, i return r; } -static struct ipv4db_item_t *get_ipv4(struct ppp_t *ppp) +static struct ipv4db_item_t *get_ipv4(struct ap_session *ses) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); if (rpd->ipv4_addr.peer_addr) return &rpd->ipv4_addr; return NULL; } -static struct ipv6db_item_t *get_ipv6(struct ppp_t *ppp) +static struct ipv6db_item_t *get_ipv6(struct ap_session *ses) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); - rpd->ipv6_addr.owner = &ipdb; rpd->ipv6_addr.intf_id = 0; if (!list_empty(&rpd->ipv6_addr.addr_list)) @@ -197,14 +197,14 @@ static struct ipv6db_item_t *get_ipv6(struct ppp_t *ppp) return NULL; } -static struct ipv6db_prefix_t *get_ipv6_prefix(struct ppp_t *ppp) +static struct ipv6db_prefix_t *get_ipv6_prefix(struct ap_session *ses) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); - rpd->ipv6_dp.owner = &ipdb; - - if (!list_empty(&rpd->ipv6_dp.prefix_list)) + if (!list_empty(&rpd->ipv6_dp.prefix_list)) { + rpd->ipv6_dp_assigned = 1; return &rpd->ipv6_dp; + } return NULL; } @@ -214,66 +214,76 @@ static struct ipv6db_prefix_t *get_ipv6_prefix(struct ppp_t *ppp) static void session_timeout(struct triton_timer_t *t) { struct radius_pd_t *rpd = container_of(t, typeof(*rpd), session_timeout); + log_ppp_msg("radius: session timed out\n"); - if (rpd->ppp->stop_time) + if (rpd->ses->stop_time) return; - if (rpd->termination_action == Termination_Action_RADIUS_Request) { - if (ppp_auth_restart(rpd->ppp)) - ppp_terminate(rpd->ppp, TERM_SESSION_TIMEOUT, 0); + if (rpd->termination_action == Termination_Action_RADIUS_Request && rpd->ses->ctrl->type != CTRL_TYPE_IPOE) { + if (ppp_auth_restart(container_of(rpd->ses, struct ppp_t, ses))) + ap_session_terminate(rpd->ses, TERM_SESSION_TIMEOUT, 0); } else - ppp_terminate(rpd->ppp, TERM_SESSION_TIMEOUT, 0); + ap_session_terminate(rpd->ses, TERM_SESSION_TIMEOUT, 0); } -static void ppp_starting(struct ppp_t *ppp) +static void ses_starting(struct ap_session *ses) { struct radius_pd_t *rpd = mempool_alloc(rpd_pool); memset(rpd, 0, sizeof(*rpd)); rpd->pd.key = &pd_key; - rpd->ppp = ppp; + rpd->ses = ses; pthread_mutex_init(&rpd->lock, NULL); INIT_LIST_HEAD(&rpd->plugin_list); INIT_LIST_HEAD(&rpd->ipv6_addr.addr_list); INIT_LIST_HEAD(&rpd->ipv6_dp.prefix_list); + + rpd->ipv4_addr.owner = &ipdb; + rpd->ipv6_addr.owner = &ipdb; + rpd->ipv6_dp.owner = &ipdb; - list_add_tail(&rpd->pd.entry, &ppp->pd_list); + list_add_tail(&rpd->pd.entry, &ses->pd_list); pthread_rwlock_wrlock(&sessions_lock); list_add_tail(&rpd->entry, &sessions); pthread_rwlock_unlock(&sessions_lock); + +#ifdef USE_BACKUP + if (ses->state == AP_STATE_RESTORE && ses->backup) + radius_restore_session(ses, rpd); +#endif } -static void ppp_acct_start(struct ppp_t *ppp) +static void ses_acct_start(struct ap_session *ses) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); if (!rpd->authenticated) return; if (rad_acct_start(rpd)) { - ppp_terminate(rpd->ppp, TERM_NAS_ERROR, 0); + ap_session_terminate(rpd->ses, TERM_NAS_ERROR, 0); return; } if (rpd->session_timeout.expire_tv.tv_sec) { rpd->session_timeout.expire = session_timeout; - triton_timer_add(ppp->ctrl->ctx, &rpd->session_timeout, 0); + triton_timer_add(ses->ctrl->ctx, &rpd->session_timeout, 0); } } -static void ppp_finishing(struct ppp_t *ppp) +static void ses_finishing(struct ap_session *ses) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); if (!rpd->authenticated) return; rad_acct_stop(rpd); } -static void ppp_finished(struct ppp_t *ppp) +static void ses_finished(struct ap_session *ses) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); struct ipv6db_addr_t *a; pthread_rwlock_wrlock(&sessions_lock); @@ -317,12 +327,12 @@ static void ppp_finished(struct ppp_t *ppp) mempool_free(rpd); } -struct radius_pd_t *find_pd(struct ppp_t *ppp) +struct radius_pd_t *find_pd(struct ap_session *ses) { - struct ppp_pd_t *pd; + struct ap_private *pd; struct radius_pd_t *rpd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) { rpd = container_of(pd, typeof(*rpd), pd); return rpd; @@ -339,17 +349,17 @@ struct radius_pd_t *rad_find_session(const char *sessionid, const char *username pthread_rwlock_rdlock(&sessions_lock); list_for_each_entry(rpd, &sessions, entry) { - if (!rpd->ppp->username) + if (!rpd->ses->username) continue; - if (sessionid && strcmp(sessionid, rpd->ppp->sessionid)) + if (sessionid && strcmp(sessionid, rpd->ses->sessionid)) continue; - if (username && strcmp(username, rpd->ppp->username)) + if (username && strcmp(username, rpd->ses->username)) continue; - if (port_id >= 0 && port_id != rpd->ppp->unit_idx) + if (port_id >= 0 && port_id != rpd->ses->unit_idx) continue; - if (ipaddr && rpd->ppp->ipv4 && ipaddr != rpd->ppp->ipv4->peer_addr) + if (ipaddr && rpd->ses->ipv4 && ipaddr != rpd->ses->ipv4->peer_addr) continue; - if (csid && rpd->ppp->ctrl->calling_station_id && strcmp(csid, rpd->ppp->ctrl->calling_station_id)) + if (csid && rpd->ses->ctrl->calling_station_id && strcmp(csid, rpd->ses->ctrl->calling_station_id)) continue; pthread_mutex_lock(&rpd->lock); pthread_rwlock_unlock(&sessions_lock); @@ -422,9 +432,9 @@ int rad_check_nas_pack(struct rad_packet_t *pack) return 0; } -void __export rad_register_plugin(struct ppp_t *ppp, struct rad_plugin_t *plugin) +void __export rad_register_plugin(struct ap_session *ses, struct rad_plugin_t *plugin) { - struct radius_pd_t *rpd = find_pd(ppp); + struct radius_pd_t *rpd = find_pd(ses); if (!rpd) return; @@ -575,10 +585,10 @@ static void radius_init(void) pwdb_register(&pwdb); ipdb_register(&ipdb); - triton_event_register_handler(EV_PPP_STARTING, (triton_event_func)ppp_starting); - triton_event_register_handler(EV_PPP_ACCT_START, (triton_event_func)ppp_acct_start); - triton_event_register_handler(EV_PPP_FINISHING, (triton_event_func)ppp_finishing); - triton_event_register_handler(EV_PPP_FINISHED, (triton_event_func)ppp_finished); + triton_event_register_handler(EV_SES_STARTING, (triton_event_func)ses_starting); + triton_event_register_handler(EV_SES_ACCT_START, (triton_event_func)ses_acct_start); + triton_event_register_handler(EV_SES_FINISHING, (triton_event_func)ses_finishing); + triton_event_register_handler(EV_SES_FINISHED, (triton_event_func)ses_finished); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); } diff --git a/accel-pppd/radius/radius.h b/accel-pppd/radius/radius.h index 296fd133..ad27b898 100644 --- a/accel-pppd/radius/radius.h +++ b/accel-pppd/radius/radius.h @@ -102,9 +102,9 @@ struct rad_plugin_t int (*send_accounting_request)(struct rad_plugin_t *, struct rad_packet_t *pack); }; -struct ppp_t; +struct ap_session; -void rad_register_plugin(struct ppp_t *, struct rad_plugin_t *); +void rad_register_plugin(struct ap_session *, struct rad_plugin_t *); struct rad_dict_attr_t *rad_dict_find_attr(const char *name); struct rad_dict_attr_t *rad_dict_find_attr_id(struct rad_dict_vendor_t *vendor, int type); diff --git a/accel-pppd/radius/radius_p.h b/accel-pppd/radius/radius_p.h index 7422bbf3..07093517 100644 --- a/accel-pppd/radius/radius_p.h +++ b/accel-pppd/radius/radius_p.h @@ -15,10 +15,11 @@ struct rad_server_t; struct radius_pd_t { struct list_head entry; - struct ppp_pd_t pd; - struct ppp_t *ppp; + struct ap_private pd; + struct ap_session *ses; pthread_mutex_t lock; int authenticated:1; + int ipv6_dp_assigned:1; struct rad_req_t *auth_req; struct rad_req_t *acct_req; @@ -142,18 +143,20 @@ int rad_dict_load(const char *fname); void rad_dict_free(struct rad_dict_t *dict); struct rad_req_t *rad_req_alloc(struct radius_pd_t *rpd, int code, const char *username); +struct rad_req_t *rad_req_alloc2(struct radius_pd_t *rpd, int code, const char *username, in_addr_t addr, int port); int rad_req_acct_fill(struct rad_req_t *); void rad_req_free(struct rad_req_t *); int rad_req_send(struct rad_req_t *, int verbose); int rad_req_wait(struct rad_req_t *, int); -struct radius_pd_t *find_pd(struct ppp_t *ppp); +struct radius_pd_t *find_pd(struct ap_session *ses); int rad_proc_attrs(struct rad_req_t *req); int rad_auth_pap(struct radius_pd_t *rpd, const char *username, va_list args); int rad_auth_chap_md5(struct radius_pd_t *rpd, const char *username, va_list args); int rad_auth_mschap_v1(struct radius_pd_t *rpd, const char *username, va_list args); int rad_auth_mschap_v2(struct radius_pd_t *rpd, const char *username, va_list args); +int rad_auth_null(struct radius_pd_t *rpd, const char *username, va_list args); int rad_acct_start(struct radius_pd_t *rpd); void rad_acct_stop(struct radius_pd_t *rpd); @@ -168,6 +171,7 @@ int rad_packet_send(struct rad_packet_t *pck, int fd, struct sockaddr_in *addr); void dm_coa_cancel(struct radius_pd_t *pd); struct rad_server_t *rad_server_get(int); +struct rad_server_t *rad_server_get2(int, in_addr_t, int); void rad_server_put(struct rad_server_t *, int); int rad_server_req_enter(struct rad_req_t *); void rad_server_req_exit(struct rad_req_t *); @@ -175,6 +179,8 @@ int rad_server_realloc(struct rad_req_t *); void rad_server_fail(struct rad_server_t *); void rad_server_timeout(struct rad_server_t *); void rad_server_reply(struct rad_server_t *); + +void radius_restore_session(struct ap_session *ses, struct radius_pd_t *rpd); struct stat_accm_t; struct stat_accm_t *stat_accm_create(unsigned int time); diff --git a/accel-pppd/radius/req.c b/accel-pppd/radius/req.c index cc0b261a..cd9a170b 100644 --- a/accel-pppd/radius/req.c +++ b/accel-pppd/radius/req.c @@ -16,10 +16,12 @@ static int rad_req_read(struct triton_md_handler_t *h); static void rad_req_timeout(struct triton_timer_t *t); +static int make_socket(struct rad_req_t *req); -struct rad_req_t *rad_req_alloc(struct radius_pd_t *rpd, int code, const char *username) +static struct rad_req_t *__rad_req_alloc(struct radius_pd_t *rpd, int code, const char *username, in_addr_t addr, int port) { struct rad_plugin_t *plugin; + struct ppp_t *ppp = NULL; struct rad_req_t *req = _malloc(sizeof(*req)); if (!req) { @@ -27,6 +29,9 @@ struct rad_req_t *rad_req_alloc(struct radius_pd_t *rpd, int code, const char *u return NULL; } + if (rpd->ses->ctrl->type != CTRL_TYPE_IPOE) + ppp = container_of(rpd->ses, typeof(*ppp), ses); + memset(req, 0, sizeof(*req)); req->rpd = rpd; req->hnd.fd = -1; @@ -34,7 +39,11 @@ struct rad_req_t *rad_req_alloc(struct radius_pd_t *rpd, int code, const char *u req->type = code == CODE_ACCESS_REQUEST ? RAD_SERV_AUTH : RAD_SERV_ACCT; - req->serv = rad_server_get(req->type); + if (addr) + req->serv = rad_server_get2(req->type, addr, port); + else + req->serv = rad_server_get(req->type); + if (!req->serv) goto out_err; @@ -63,19 +72,30 @@ struct rad_req_t *rad_req_alloc(struct radius_pd_t *rpd, int code, const char *u if (conf_nas_ip_address) if (rad_packet_add_ipaddr(req->pack, NULL, "NAS-IP-Address", conf_nas_ip_address)) goto out_err; - if (rad_packet_add_int(req->pack, NULL, "NAS-Port", rpd->ppp->unit_idx)) - goto out_err; - if (rad_packet_add_val(req->pack, NULL, "NAS-Port-Type", "Virtual")) - goto out_err; - if (rad_packet_add_val(req->pack, NULL, "Service-Type", "Framed-User")) - goto out_err; - if (rad_packet_add_val(req->pack, NULL, "Framed-Protocol", "PPP")) - goto out_err; - if (rpd->ppp->ctrl->calling_station_id) - if (rad_packet_add_str(req->pack, NULL, "Calling-Station-Id", rpd->ppp->ctrl->calling_station_id)) + if (ppp) { + if (rad_packet_add_int(req->pack, NULL, "NAS-Port", ppp->ses.unit_idx)) + goto out_err; + } + + if (req->rpd->ses->ctrl->type == CTRL_TYPE_IPOE) { + if (rad_packet_add_val(req->pack, NULL, "NAS-Port-Type", "Ethernet")) + goto out_err; + } else { + if (rad_packet_add_val(req->pack, NULL, "NAS-Port-Type", "Virtual")) + goto out_err; + + if (rad_packet_add_val(req->pack, NULL, "Service-Type", "Framed-User")) + goto out_err; + + if (rad_packet_add_val(req->pack, NULL, "Framed-Protocol", "PPP")) goto out_err; - if (rpd->ppp->ctrl->called_station_id) - if (rad_packet_add_str(req->pack, NULL, "Called-Station-Id", rpd->ppp->ctrl->called_station_id)) + } + + if (rpd->ses->ctrl->calling_station_id) + if (rad_packet_add_str(req->pack, NULL, "Calling-Station-Id", rpd->ses->ctrl->calling_station_id)) + goto out_err; + if (rpd->ses->ctrl->called_station_id) + if (rad_packet_add_str(req->pack, NULL, "Called-Station-Id", rpd->ses->ctrl->called_station_id)) goto out_err; if (rpd->attr_class) if (rad_packet_add_octets(req->pack, NULL, "Class", rpd->attr_class, rpd->attr_class_len)) @@ -105,6 +125,26 @@ out_err: return NULL; } +struct rad_req_t *rad_req_alloc(struct radius_pd_t *rpd, int code, const char *username) +{ + return __rad_req_alloc(rpd, code, username, 0, 0); +} + +struct rad_req_t *rad_req_alloc2(struct radius_pd_t *rpd, int code, const char *username, in_addr_t addr, int port) +{ + struct rad_req_t *req = __rad_req_alloc(rpd, code, username, addr, port); + + if (!req) + return NULL; + + if (code == CODE_ACCOUNTING_REQUEST) + req->server_port = req->serv->acct_port; + + make_socket(req); + + return req; +} + int rad_req_acct_fill(struct rad_req_t *req) { struct ipv6db_addr_t *a; @@ -117,7 +157,7 @@ int rad_req_acct_fill(struct rad_req_t *req) return -1; if (rad_packet_add_val(req->pack, NULL, "Acct-Authentic", "RADIUS")) return -1; - if (rad_packet_add_str(req->pack, NULL, "Acct-Session-Id", req->rpd->ppp->sessionid)) + if (rad_packet_add_str(req->pack, NULL, "Acct-Session-Id", req->rpd->ses->sessionid)) return -1; if (rad_packet_add_int(req->pack, NULL, "Acct-Session-Time", 0)) return -1; @@ -137,14 +177,14 @@ int rad_req_acct_fill(struct rad_req_t *req) if (rad_packet_add_int(req->pack, NULL, "Acct-Delay-Time", 0)) return -1; } - if (req->rpd->ppp->ipv4) { - if (rad_packet_add_ipaddr(req->pack, NULL, "Framed-IP-Address", req->rpd->ppp->ipv4->peer_addr)) + if (req->rpd->ses->ipv4) { + if (rad_packet_add_ipaddr(req->pack, NULL, "Framed-IP-Address", req->rpd->ses->ipv4->peer_addr)) return -1; } - if (req->rpd->ppp->ipv6) { - if (rad_packet_add_ifid(req->pack, NULL, "Framed-Interface-Id", req->rpd->ppp->ipv6->peer_intf_id)) + if (req->rpd->ses->ipv6) { + if (rad_packet_add_ifid(req->pack, NULL, "Framed-Interface-Id", req->rpd->ses->ipv6->peer_intf_id)) return -1; - list_for_each_entry(a, &req->rpd->ppp->ipv6->addr_list, entry) { + list_for_each_entry(a, &req->rpd->ses->ipv6->addr_list, entry) { if (rad_packet_add_ipv6prefix(req->pack, NULL, "Framed-IPv6-Prefix", &a->addr, a->prefix_len)) return -1; } @@ -235,7 +275,7 @@ out_err: static void req_wakeup(struct rad_req_t *req) { - struct triton_context_t *ctx = req->rpd->ppp->ctrl->ctx; + struct triton_context_t *ctx = req->rpd->ses->ctrl->ctx; if (req->timeout.tpd) triton_timer_del(&req->timeout); triton_md_unregister_handler(&req->hnd); @@ -277,7 +317,7 @@ int rad_req_wait(struct rad_req_t *req, int timeout) req->hnd.read = rad_req_read; req->timeout.expire = rad_req_timeout; - triton_context_register(&req->ctx, req->rpd->ppp); + triton_context_register(&req->ctx, req->rpd->ses); triton_context_set_priority(&req->ctx, 1); triton_md_register_handler(&req->ctx, &req->hnd); triton_md_enable_handler(&req->hnd, MD_MODE_READ); diff --git a/accel-pppd/radius/serv.c b/accel-pppd/radius/serv.c index 30219f75..aae37190 100644 --- a/accel-pppd/radius/serv.c +++ b/accel-pppd/radius/serv.c @@ -24,9 +24,9 @@ static LIST_HEAD(serv_list); static void __free_server(struct rad_server_t *); -static struct rad_server_t *__rad_server_get(int type, struct rad_server_t *exclude) +static struct rad_server_t *__rad_server_get(int type, struct rad_server_t *exclude, in_addr_t addr, int port) { - struct rad_server_t *s, *s0 = NULL; + struct rad_server_t *s, *s0 = NULL, *s1 = NULL; struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); @@ -43,6 +43,15 @@ static struct rad_server_t *__rad_server_get(int type, struct rad_server_t *excl else if (type == RAD_SERV_ACCT && !s->acct_port) continue; + if (s->addr == addr) { + if (type == RAD_SERV_AUTH && port == s->auth_port) + s1 = s; + else if (type == RAD_SERV_ACCT && port == s->acct_port) + s1 = s; + else if (!s1) + s1 = s; + } + if (!s0) { s0 = s; continue; @@ -52,7 +61,9 @@ static struct rad_server_t *__rad_server_get(int type, struct rad_server_t *excl s0 = s; } - if (!s0) + if (s1) + s0 = s1; + else if (!s0) return NULL; __sync_add_and_fetch(&s0->client_cnt[type], 1); @@ -62,7 +73,12 @@ static struct rad_server_t *__rad_server_get(int type, struct rad_server_t *excl struct rad_server_t *rad_server_get(int type) { - return __rad_server_get(type, NULL); + return __rad_server_get(type, NULL, 0, 0); +} + +struct rad_server_t *rad_server_get2(int type, in_addr_t addr, int port) +{ + return __rad_server_get(type, NULL, addr, port); } void rad_server_put(struct rad_server_t *s, int type) @@ -132,12 +148,12 @@ void rad_server_req_exit(struct rad_req_t *req) pthread_mutex_unlock(&req->serv->lock); if (r) - triton_context_wakeup(r->rpd->ppp->ctrl->ctx); + triton_context_wakeup(r->rpd->ses->ctrl->ctx); } int rad_server_realloc(struct rad_req_t *req) { - struct rad_server_t *s = __rad_server_get(req->type, req->serv); + struct rad_server_t *s = __rad_server_get(req->type, req->serv, 0, 0); if (!s) return -1; @@ -182,7 +198,7 @@ void rad_server_fail(struct rad_server_t *s) while (!list_empty(&s->req_queue)) { r = list_entry(s->req_queue.next, typeof(*r), entry); list_del(&r->entry); - triton_context_wakeup(r->rpd->ppp->ctrl->ctx); + triton_context_wakeup(r->rpd->ses->ctrl->ctx); } } @@ -559,7 +575,7 @@ static void load_config(void) while (!list_empty(&s->req_queue)) { r = list_entry(s->req_queue.next, typeof(*r), entry); list_del(&r->entry); - triton_context_wakeup(r->rpd->ppp->ctrl->ctx); + triton_context_wakeup(r->rpd->ses->ctrl->ctx); } if (!s->client_cnt[0] && !s->client_cnt[1]) diff --git a/accel-pppd/session.c b/accel-pppd/session.c new file mode 100644 index 00000000..0ca115c3 --- /dev/null +++ b/accel-pppd/session.c @@ -0,0 +1,290 @@ +#include <stdio.h> +#include <unistd.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> +#include <errno.h> +#include <signal.h> +#include <fcntl.h> +#include <pthread.h> +#include <arpa/inet.h> +#include <features.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <linux/if.h> + +#include "triton.h" +#include "log.h" +#include "events.h" +#include "ap_session.h" +#include "backup.h" +#include "spinlock.h" +#include "mempool.h" +#include "memdebug.h" + +int conf_sid_ucase; +pthread_rwlock_t __export ses_lock = PTHREAD_RWLOCK_INITIALIZER; +__export LIST_HEAD(ses_list); + +#if __WORDSIZE == 32 +static spinlock_t seq_lock; +#endif + +int __export sock_fd; +int __export sock6_fd; +int __export urandom_fd; +int __export ap_shutdown; + +static long long unsigned seq; + +struct ap_session_stat __export ap_session_stat; + +static void (*shutdown_cb)(void); + +static void generate_sessionid(struct ap_session *ses); + +void __export ap_session_init(struct ap_session *ses) +{ + memset(ses, 0, sizeof(*ses)); + INIT_LIST_HEAD(&ses->pd_list); + ses->ifindex = -1; +} + +int __export ap_session_starting(struct ap_session *ses) +{ + struct ifreq ifr; + + if (ses->ifindex == -1) { + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, ses->ifname); + + if (ioctl(sock_fd, SIOCGIFINDEX, &ifr)) { + log_ppp_error("ioctl(SIOCGIFINDEX): %s\n", strerror(errno)); + return -1; + } + ses->ifindex = ifr.ifr_ifindex; + } + + if (ses->state != AP_STATE_RESTORE) { + ses->start_time = time(NULL); + generate_sessionid(ses); + + ses->state = AP_STATE_STARTING; + } + + __sync_add_and_fetch(&ap_session_stat.starting, 1); + + pthread_rwlock_wrlock(&ses_lock); + list_add_tail(&ses->entry, &ses_list); + pthread_rwlock_unlock(&ses_lock); + + triton_event_fire(EV_SES_STARTING, ses); + + return 0; +} + +void __export ap_session_activate(struct ap_session *ses) +{ + ap_session_ifup(ses); + + ses->state = AP_STATE_ACTIVE; + __sync_sub_and_fetch(&ap_session_stat.starting, 1); + __sync_add_and_fetch(&ap_session_stat.active, 1); + +#ifdef USE_BACKUP + if (!ses->backup) + backup_save_session(ses); +#endif +} + +void __export ap_session_finished(struct ap_session *ses) +{ + ses->terminated = 1; + + pthread_rwlock_wrlock(&ses_lock); + list_del(&ses->entry); + pthread_rwlock_unlock(&ses_lock); + + switch (ses->state) { + case AP_STATE_ACTIVE: + __sync_sub_and_fetch(&ap_session_stat.active, 1); + break; + case AP_STATE_RESTORE: + case AP_STATE_STARTING: + __sync_sub_and_fetch(&ap_session_stat.starting, 1); + break; + case AP_STATE_FINISHING: + __sync_sub_and_fetch(&ap_session_stat.finishing, 1); + break; + } + + triton_event_fire(EV_SES_FINISHED, ses); + ses->ctrl->finished(ses); + + if (ses->username) { + _free(ses->username); + ses->username = NULL; + } + + if (ses->ipv4_pool_name) { + _free(ses->ipv4_pool_name); + ses->ipv4_pool_name = NULL; + } + + if (ses->ipv6_pool_name) { + _free(ses->ipv6_pool_name); + ses->ipv6_pool_name = NULL; + } + +#ifdef USE_BACKUP + if (ses->backup) + ses->backup->storage->free(ses->backup); +#endif + + if (ap_shutdown && !ap_session_stat.starting && !ap_session_stat.active && !ap_session_stat.finishing) { + if (shutdown_cb) + shutdown_cb(); + else + kill(getpid(), SIGTERM); + } +} + +void __export ap_session_terminate(struct ap_session *ses, int cause, int hard) +{ + if (ses->terminated) + return; + + if (!ses->stop_time) + time(&ses->stop_time); + + if (!ses->terminate_cause) + ses->terminate_cause = cause; + + if (ses->terminating) { + if (hard) + ses->ctrl->terminate(ses, hard); + return; + } + + if (ses->state == AP_STATE_ACTIVE) + __sync_sub_and_fetch(&ap_session_stat.active, 1); + else + __sync_sub_and_fetch(&ap_session_stat.starting, 1); + + __sync_add_and_fetch(&ap_session_stat.finishing, 1); + ses->terminating = 1; + ses->state = AP_STATE_FINISHING; + + log_ppp_debug("terminate\n"); + + ap_session_ifdown(ses); + + triton_event_fire(EV_SES_FINISHING, ses); + + ses->ctrl->terminate(ses, hard); +} + +void ap_shutdown_soft(void (*cb)(void)) +{ + ap_shutdown = 1; + shutdown_cb = cb; + + if (!ap_session_stat.starting && !ap_session_stat.active && !ap_session_stat.finishing) { + if (shutdown_cb) + shutdown_cb(); + else + kill(getpid(), SIGTERM); + } +} + +static void generate_sessionid(struct ap_session *ses) +{ + unsigned long long sid; + +#if __WORDSIZE == 32 + spin_lock(&seq_lock); + sid = ++seq; + spin_unlock(&seq_lock); +#else + sid = __sync_add_and_fetch(&seq, 1); +#endif + + if (conf_sid_ucase) + sprintf(ses->sessionid, "%016llX", sid); + else + sprintf(ses->sessionid, "%016llx", sid); +} + +static void save_seq(void) +{ + FILE *f; + char *opt = conf_get_opt("ppp", "seq-file"); + if (!opt) + opt = "/var/run/accel-ppp/seq"; + + f = fopen(opt, "w"); + if (f) { + fprintf(f, "%llu", seq); + fclose(f); + } +} + +static void load_config(void) +{ + char *opt; + + opt = conf_get_opt("common", "sid-case"); + if (opt) { + if (!strcmp(opt, "upper")) + conf_sid_ucase = 1; + else if (strcmp(opt, "lower")) + log_emerg("sid-case: invalid format\n"); + } +} + +static void init(void) +{ + const char *opt; + FILE *f; + + sock_fd = socket(AF_INET, SOCK_DGRAM, 0); + if (sock_fd < 0) { + perror("socket"); + _exit(EXIT_FAILURE); + } + + fcntl(sock_fd, F_SETFD, fcntl(sock_fd, F_GETFD) | FD_CLOEXEC); + + sock6_fd = socket(AF_INET6, SOCK_DGRAM, 0); + if (sock6_fd < 0) + log_warn("ppp: kernel doesn't support ipv6\n"); + else + fcntl(sock6_fd, F_SETFD, fcntl(sock6_fd, F_GETFD) | FD_CLOEXEC); + + urandom_fd = open("/dev/urandom", O_RDONLY); + if (urandom_fd < 0) { + log_emerg("failed to open /dev/urandom: %s\n", strerror(errno)); + return; + } + + fcntl(urandom_fd, F_SETFD, fcntl(urandom_fd, F_GETFD) | FD_CLOEXEC); + + opt = conf_get_opt("session", "seq-file"); + if (!opt) + opt = "/var/run/accel-ppp/seq"; + + f = fopen(opt, "r"); + if (f) { + fscanf(f, "%llu", &seq); + fclose(f); + } else + seq = (unsigned long long)random() * (unsigned long long)random(); + + load_config(); + triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); + + atexit(save_seq); +} + +DEFINE_INIT(2, init); + diff --git a/accel-pppd/session_backup.c b/accel-pppd/session_backup.c new file mode 100644 index 00000000..86073623 --- /dev/null +++ b/accel-pppd/session_backup.c @@ -0,0 +1,136 @@ +#include <string.h> +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <unistd.h> +#include <netinet/in.h> + +#include "events.h" +#include "triton.h" +#include "log.h" +#include "ap_session.h" +#include "backup.h" +#include "ap_session_backup.h" +#include "ipdb.h" + +#include "memdebug.h" + +#ifdef USE_BACKUP + +#define add_tag(id, data, size) if (!backup_add_tag(m, id, 0, data, size)) return -1; +#define add_tag_int(id, data, size) if (!backup_add_tag(m, id, 1, data, size)) return -1; + +static int session_save(struct ap_session *ses, struct backup_mod *m) +{ + struct ipv6db_addr_t *a; + struct ses_tag_ipv6 ipv6; + + add_tag(SES_TAG_USERNAME, ses->username, strlen(ses->username)); + add_tag(SES_TAG_SESSIONID, ses->sessionid, AP_SESSIONID_LEN); + add_tag(SES_TAG_START_TIME, &ses->start_time, sizeof(time_t)); + add_tag(SES_TAG_IFNAME, ses->ifname, strlen(ses->ifname)); + add_tag_int(SES_TAG_IFINDEX, &ses->ifindex, 4); + + if (ses->ipv4) { + add_tag(SES_TAG_IPV4_ADDR, &ses->ipv4->addr, 4); + add_tag(SES_TAG_IPV4_PEER_ADDR, &ses->ipv4->peer_addr, 4); + } + + if (ses->ipv6) { + add_tag(SES_TAG_IPV6_INTFID, &ses->ipv6->intf_id, 8); + add_tag(SES_TAG_IPV6_PEER_INTFID, &ses->ipv6->peer_intf_id, 8); + list_for_each_entry(a, &ses->ipv6->addr_list, entry) { + ipv6.addr = a->addr; + ipv6.prefix_len = a->prefix_len; + add_tag(SES_TAG_IPV6_ADDR, &ipv6, sizeof(ipv6)); + } + } + + //add_tag_int(PPP_TAG_FD, &ses->fd, sizeof(ses->fd)); + //add_tag_int(PPP_TAG_CHAN_FD, &ses->chan_fd, sizeof(ses->chan_fd)); + //add_tag_int(PPP_TAG_UNIT_FD, &ses->unit_fd, sizeof(ses->unit_fd)); + //add_tag_int(PPP_TAG_UNIT, &ses->unit_idx, sizeof(ses->unit_idx)); + + //triton_event_fire(EV_PPP_SESSION_SAVE, &ev); + + return 0; +} + +static int session_restore(struct ap_session *ses, struct backup_mod *m) +{ + struct backup_tag *t; + + list_for_each_entry(t, &m->tag_list, entry) { + switch(t->id) { + case SES_TAG_USERNAME: + ses->username = _malloc(t->size + 1); + if (!ses->username) { + log_emerg("out of memory"); + return -1; + } + memcpy(ses->username, t->data, t->size); + ses->username[t->size] = 0; + break; + case SES_TAG_SESSIONID: + memcpy(ses->sessionid, t->data, AP_SESSIONID_LEN); + break; + case SES_TAG_IFNAME: + memcpy(ses->ifname, t->data, t->size); + ses->ifname[t->size] = 0; + break; + case SES_TAG_START_TIME: + ses->start_time = *(time_t *)t->data; + break; + case SES_TAG_IFINDEX: + if (ses->backup->internal) + ses->ifindex = *(uint32_t *)t->data; + break; + /*case PPP_TAG_FD: + ses->fd = *(int *)t->data; + break; + case PPP_TAG_CHAN_FD: + ses->chan_fd = *(int *)t->data; + break; + case PPP_TAG_UNIT_FD: + ses->chan_fd = *(int *)t->data; + break; + case PPP_TAG_UNIT: + ses->unit_idx = *(int *)t->data; + break; + case PPP_TAG_IPV4_ADDR: + if (!ses->ipv4) { + ses->ipv4 = _malloc(sizeof(*ses->ipv4)); + memset(ses->ipv4, 0, sizeof(*ses->ipv4)); + ses->ipv4->owner = &ipdb; + } + ses->ipv4->addr = *(in_addr_t *)t->data; + break; + case PPP_TAG_IPV4_PEER_ADDR: + if (!ses->ipv4) { + ses->ipv4 = _malloc(sizeof(*ses->ipv4)); + memset(ses->ipv4, 0, sizeof(*ses->ipv4)); + ses->ipv4->owner = &ipdb; + } + ses->ipv4->peer_addr = *(in_addr_t *)t->data; + break;*/ + } + } + + return 0; + //return establish_ses(ses); +} + +static struct backup_module mod = { + .id = MODID_COMMON, + .save = session_save, + .restore = session_restore, +}; + +static void init(void) +{ + backup_register_module(&mod); +} + +DEFINE_INIT(101, init); + +#endif diff --git a/accel-pppd/shaper/CMakeLists.txt b/accel-pppd/shaper/CMakeLists.txt index 515fd839..3c1ac951 100644 --- a/accel-pppd/shaper/CMakeLists.txt +++ b/accel-pppd/shaper/CMakeLists.txt @@ -1,4 +1,4 @@ -ADD_LIBRARY(shaper SHARED shaper.c limiter.c leaf_qdisc.c tc_core.c libnetlink.c) +ADD_LIBRARY(shaper SHARED shaper.c limiter.c leaf_qdisc.c tc_core.c) INSTALL(TARGETS shaper LIBRARY DESTINATION lib/accel-ppp diff --git a/accel-pppd/shaper/limiter.c b/accel-pppd/shaper/limiter.c index 5a3142bd..15eef19a 100644 --- a/accel-pppd/shaper/limiter.c +++ b/accel-pppd/shaper/limiter.c @@ -413,7 +413,7 @@ static int remove_htb_ifb(struct rtnl_handle *rth, int ifindex, int priority) return tc_qdisc_modify(rth, conf_ifb_ifindex, RTM_DELTCLASS, 0, &opt); } -int install_limiter(struct ppp_t *ppp, int down_speed, int down_burst, int up_speed, int up_burst) +int install_limiter(struct ap_session *ses, int down_speed, int down_burst, int up_speed, int up_burst) { struct rtnl_handle rth; int r; @@ -429,19 +429,19 @@ int install_limiter(struct ppp_t *ppp, int down_speed, int down_burst, int up_sp up_burst = up_burst ? up_burst : conf_up_burst_factor * up_speed; if (conf_down_limiter == LIM_TBF) - r = install_tbf(&rth, ppp->ifindex, down_speed, down_burst); + r = install_tbf(&rth, ses->ifindex, down_speed, down_burst); else { - r = install_htb(&rth, ppp->ifindex, down_speed, down_burst); + r = install_htb(&rth, ses->ifindex, down_speed, down_burst); if (r == 0) - r = install_leaf_qdisc(&rth, ppp->ifindex, 0x00010001, 0x00020000); + r = install_leaf_qdisc(&rth, ses->ifindex, 0x00010001, 0x00020000); } if (conf_up_limiter == LIM_POLICE) - r = install_police(&rth, ppp->ifindex, up_speed, up_burst); + r = install_police(&rth, ses->ifindex, up_speed, up_burst); else { - r = install_htb_ifb(&rth, ppp->ifindex, ppp->unit_idx + 1, up_speed, up_burst); + r = install_htb_ifb(&rth, ses->ifindex, ses->unit_idx + 1, up_speed, up_burst); if (r == 0) - r = install_leaf_qdisc(&rth, conf_ifb_ifindex, 0x00010001 + ppp->unit_idx + 1, (1 + ppp->unit_idx + 1) << 16); + r = install_leaf_qdisc(&rth, conf_ifb_ifindex, 0x00010001 + ses->unit_idx + 1, (1 + ses->unit_idx + 1) << 16); } rtnl_close(&rth); @@ -449,7 +449,7 @@ int install_limiter(struct ppp_t *ppp, int down_speed, int down_burst, int up_sp return r; } -int remove_limiter(struct ppp_t *ppp) +int remove_limiter(struct ap_session *ses) { struct rtnl_handle rth; @@ -458,11 +458,11 @@ int remove_limiter(struct ppp_t *ppp) return -1; } - remove_root(&rth, ppp->ifindex); - remove_ingress(&rth, ppp->ifindex); + remove_root(&rth, ses->ifindex); + remove_ingress(&rth, ses->ifindex); if (conf_up_limiter == LIM_HTB) - remove_htb_ifb(&rth, ppp->ifindex, ppp->unit_idx + 1); + remove_htb_ifb(&rth, ses->ifindex, ses->unit_idx + 1); return 0; } diff --git a/accel-pppd/shaper/shaper.c b/accel-pppd/shaper/shaper.c index 8ae2630a..2b749c9b 100644 --- a/accel-pppd/shaper/shaper.c +++ b/accel-pppd/shaper/shaper.c @@ -59,8 +59,8 @@ struct time_range_pd_t; struct shaper_pd_t { struct list_head entry; - struct ppp_t *ppp; - struct ppp_pd_t pd; + struct ap_session *ses; + struct ap_private pd; int temp_down_speed; int temp_up_speed; int down_speed; @@ -98,12 +98,12 @@ static struct triton_context_t shaper_ctx = { .before_switch = log_switch, }; -static struct shaper_pd_t *find_pd(struct ppp_t *ppp, int create) +static struct shaper_pd_t *find_pd(struct ap_session *ses, int create) { - struct ppp_pd_t *pd; + struct ap_private *pd; struct shaper_pd_t *spd; - list_for_each_entry(pd, &ppp->pd_list, entry) { + list_for_each_entry(pd, &ses->pd_list, entry) { if (pd->key == &pd_key) { spd = container_of(pd, typeof(*spd), pd); return spd; @@ -118,8 +118,8 @@ static struct shaper_pd_t *find_pd(struct ppp_t *ppp, int create) } memset(spd, 0, sizeof(*spd)); - spd->ppp = ppp; - list_add_tail(&spd->pd.entry, &ppp->pd_list); + spd->ses = ses; + list_add_tail(&spd->pd.entry, &ses->pd_list); spd->pd.key = &pd_key; INIT_LIST_HEAD(&spd->tr_list); @@ -279,7 +279,7 @@ static void ev_radius_access_accept(struct ev_radius_t *ev) { int down_speed, down_burst; int up_speed, up_burst; - struct shaper_pd_t *pd = find_pd(ev->ppp, 1); + struct shaper_pd_t *pd = find_pd(ev->ses, 1); if (!pd) return; @@ -307,7 +307,7 @@ static void ev_radius_access_accept(struct ev_radius_t *ev) } if (down_speed > 0 && up_speed > 0) { - if (!install_limiter(ev->ppp, down_speed, down_burst, up_speed, up_burst)) { + if (!install_limiter(ev->ses, down_speed, down_burst, up_speed, up_burst)) { if (conf_verbose) log_ppp_info2("shaper: installed shaper %i/%i (Kbit)\n", down_speed, up_speed); } @@ -316,7 +316,7 @@ static void ev_radius_access_accept(struct ev_radius_t *ev) static void ev_radius_coa(struct ev_radius_t *ev) { - struct shaper_pd_t *pd = find_pd(ev->ppp, 0); + struct shaper_pd_t *pd = find_pd(ev->ses, 0); if (!pd) { ev->res = -1; @@ -335,7 +335,7 @@ static void ev_radius_coa(struct ev_radius_t *ev) pd->up_speed = 0; if (conf_verbose) log_ppp_info2("shaper: removed shaper\n"); - remove_limiter(ev->ppp); + remove_limiter(ev->ses); } return; } @@ -344,13 +344,13 @@ static void ev_radius_coa(struct ev_radius_t *ev) pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - if (remove_limiter(ev->ppp)) { + if (remove_limiter(ev->ses)) { ev->res = -1; return; } if (pd->down_speed > 0 || pd->up_speed > 0) { - if (install_limiter(ev->ppp, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { + if (install_limiter(ev->ses, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { ev->res= -1; return; } else { @@ -367,7 +367,7 @@ static void ev_radius_coa(struct ev_radius_t *ev) static void ev_shaper(struct ev_shaper_t *ev) { - struct shaper_pd_t *pd = find_pd(ev->ppp, 1); + struct shaper_pd_t *pd = find_pd(ev->ses, 1); int down_speed = 0, down_burst = 0; int up_speed = 0, up_burst = 0; int tr_id = 0; @@ -402,16 +402,16 @@ static void ev_shaper(struct ev_shaper_t *ev) } if (pd->down_speed > 0 && pd->up_speed > 0) { - if (!install_limiter(ev->ppp, down_speed, down_burst, up_speed, up_burst)) { + if (!install_limiter(ev->ses, down_speed, down_burst, up_speed, up_burst)) { if (conf_verbose) log_ppp_info2("shaper: installed shaper %i/%i (Kbit)\n", down_speed, up_speed); } } } -static void ev_ppp_pre_up(struct ppp_t *ppp) +static void ev_ppp_pre_up(struct ap_session *ses) { - struct shaper_pd_t *pd = find_pd(ppp, 1); + struct shaper_pd_t *pd = find_pd(ses, 1); if (!pd) return; @@ -420,16 +420,16 @@ static void ev_ppp_pre_up(struct ppp_t *ppp) pd->temp_up_speed = temp_up_speed; pd->down_speed = temp_down_speed; pd->up_speed = temp_up_speed; - if (!install_limiter(ppp, temp_down_speed, 0, temp_up_speed, 0)) { + if (!install_limiter(ses, temp_down_speed, 0, temp_up_speed, 0)) { if (conf_verbose) log_ppp_info2("shaper: installed shaper %i/%i (Kbit)\n", temp_down_speed, temp_up_speed); } } } -static void ev_ppp_finishing(struct ppp_t *ppp) +static void ev_ppp_finishing(struct ap_session *ses) { - struct shaper_pd_t *pd = find_pd(ppp, 0); + struct shaper_pd_t *pd = find_pd(ses, 0); if (pd) { clear_tr_pd(pd); @@ -439,7 +439,7 @@ static void ev_ppp_finishing(struct ppp_t *ppp) list_del(&pd->pd.entry); if (pd->down_speed || pd->up_speed) - remove_limiter(ppp); + remove_limiter(ses); _free(pd); } @@ -454,16 +454,16 @@ static void shaper_change_help(char * const *f, int f_cnt, void *cli) static void shaper_change(struct shaper_pd_t *pd) { if (pd->down_speed || pd->up_speed) - remove_limiter(pd->ppp); + remove_limiter(pd->ses); if (pd->temp_down_speed || pd->temp_up_speed) { pd->down_speed = pd->temp_down_speed; pd->up_speed = pd->temp_up_speed; - install_limiter(pd->ppp, pd->temp_down_speed, 0, pd->temp_up_speed, 0); + install_limiter(pd->ses, pd->temp_down_speed, 0, pd->temp_up_speed, 0); } else if (pd->cur_tr->down_speed || pd->cur_tr->up_speed) { pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - install_limiter(pd->ppp, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); + install_limiter(pd->ses, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); } else { pd->down_speed = 0; pd->up_speed = 0; @@ -503,7 +503,7 @@ static int shaper_change_exec(const char *cmd, char * const *f, int f_cnt, void pthread_rwlock_rdlock(&shaper_lock); list_for_each_entry(pd, &shaper_list, entry) { - if (all || !strcmp(f[2], pd->ppp->ifname)) { + if (all || !strcmp(f[2], pd->ses->ifname)) { if (temp) { pd->temp_down_speed = down_speed; pd->temp_up_speed = up_speed; @@ -517,7 +517,7 @@ static int shaper_change_exec(const char *cmd, char * const *f, int f_cnt, void pd->cur_tr->up_speed = up_speed; pd->cur_tr->up_burst = up_burst; } - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)shaper_change, pd); + triton_context_call(pd->ses->ctrl->ctx, (triton_event_func)shaper_change, pd); if (!all) { found = 1; break; @@ -540,12 +540,12 @@ static void shaper_restore_help(char * const *f, int f_cnt, void *cli) static void shaper_restore(struct shaper_pd_t *pd) { - remove_limiter(pd->ppp); + remove_limiter(pd->ses); if (pd->cur_tr) { pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - install_limiter(pd->ppp, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); + install_limiter(pd->ses, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst); } else { pd->down_speed = 0; pd->up_speed = 0; @@ -576,10 +576,10 @@ static int shaper_restore_exec(const char *cmd, char * const *f, int f_cnt, void list_for_each_entry(pd, &shaper_list, entry) { if (!pd->temp_down_speed) continue; - if (all || !strcmp(f[2], pd->ppp->ifname)) { + if (all || !strcmp(f[2], pd->ses->ifname)) { pd->temp_down_speed = 0; pd->temp_up_speed = 0; - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)shaper_restore, pd); + triton_context_call(pd->ses->ctrl->ctx, (triton_event_func)shaper_restore, pd); if (!all) { found = 1; break; @@ -594,9 +594,9 @@ static int shaper_restore_exec(const char *cmd, char * const *f, int f_cnt, void return CLI_CMD_OK; } -static void print_rate(const struct ppp_t *ppp, char *buf) +static void print_rate(const struct ap_session *ses, char *buf) { - struct shaper_pd_t *pd = find_pd((struct ppp_t *)ppp, 0); + struct shaper_pd_t *pd = find_pd((struct ap_session *)ses, 0); if (pd && (pd->down_speed || pd->up_speed)) sprintf(buf, "%i/%i", pd->down_speed, pd->up_speed); @@ -625,7 +625,7 @@ static void update_shaper_tr(struct shaper_pd_t *pd) { struct time_range_pd_t *tr; - if (pd->ppp->terminating) + if (pd->ses->terminating) return; list_for_each_entry(tr, &pd->tr_list, entry) { @@ -641,13 +641,13 @@ static void update_shaper_tr(struct shaper_pd_t *pd) if (pd->down_speed || pd->up_speed) { if (pd->cur_tr && pd->down_speed == pd->cur_tr->down_speed && pd->up_speed == pd->cur_tr->up_speed) return; - remove_limiter(pd->ppp); + remove_limiter(pd->ses); } if (pd->cur_tr && (pd->cur_tr->down_speed || pd->cur_tr->up_speed)) { pd->down_speed = pd->cur_tr->down_speed; pd->up_speed = pd->cur_tr->up_speed; - if (!install_limiter(pd->ppp, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { + if (!install_limiter(pd->ses, pd->cur_tr->down_speed, pd->cur_tr->down_burst, pd->cur_tr->up_speed, pd->cur_tr->up_burst)) { if (conf_verbose) log_ppp_info2("shaper: changed shaper %i/%i (Kbit)\n", pd->cur_tr->down_speed, pd->cur_tr->up_speed); } @@ -667,7 +667,7 @@ static void time_range_begin_timer(struct triton_timer_t *t) pthread_rwlock_rdlock(&shaper_lock); list_for_each_entry(pd, &shaper_list, entry) - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)update_shaper_tr, pd); + triton_context_call(pd->ses->ctrl->ctx, (triton_event_func)update_shaper_tr, pd); pthread_rwlock_unlock(&shaper_lock); } @@ -681,7 +681,7 @@ static void time_range_end_timer(struct triton_timer_t *t) pthread_rwlock_rdlock(&shaper_lock); list_for_each_entry(pd, &shaper_list, entry) - triton_context_call(pd->ppp->ctrl->ctx, (triton_event_func)update_shaper_tr, pd); + triton_context_call(pd->ses->ctrl->ctx, (triton_event_func)update_shaper_tr, pd); pthread_rwlock_unlock(&shaper_lock); } @@ -935,8 +935,8 @@ static void init(void) triton_event_register_handler(EV_RADIUS_COA, (triton_event_func)ev_radius_coa); } #endif - triton_event_register_handler(EV_PPP_PRE_UP, (triton_event_func)ev_ppp_pre_up); - triton_event_register_handler(EV_PPP_FINISHING, (triton_event_func)ev_ppp_finishing); + triton_event_register_handler(EV_SES_PRE_UP, (triton_event_func)ev_ppp_pre_up); + triton_event_register_handler(EV_SES_FINISHING, (triton_event_func)ev_ppp_finishing); //triton_event_register_handler(EV_CTRL_FINISHED, (triton_event_func)ev_ctrl_finished); triton_event_register_handler(EV_SHAPER, (triton_event_func)ev_shaper); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); diff --git a/accel-pppd/shaper/shaper.h b/accel-pppd/shaper/shaper.h index 41e6e437..6322a6ba 100644 --- a/accel-pppd/shaper/shaper.h +++ b/accel-pppd/shaper/shaper.h @@ -39,8 +39,8 @@ extern int conf_lq_arg1; extern int conf_lq_arg2; extern int conf_lq_arg3; -int install_limiter(struct ppp_t *ppp, int down_speed, int down_burst, int up_speed, int up_burst); -int remove_limiter(struct ppp_t *ppp); +int install_limiter(struct ap_session *ses, int down_speed, int down_burst, int up_speed, int up_burst); +int remove_limiter(struct ap_session *ses); int install_leaf_qdisc(struct rtnl_handle *rth, int ifindex, int parent, int handle); int init_ifb(const char *); diff --git a/drivers/ipoe/CMakeLists.txt b/drivers/ipoe/CMakeLists.txt new file mode 100644 index 00000000..14ac12eb --- /dev/null +++ b/drivers/ipoe/CMakeLists.txt @@ -0,0 +1,19 @@ +if (NOT DEFINED KDIR) + set(KDIR "/usr/src/linux") +endif (NOT DEFINED KDIR) + +ADD_CUSTOM_COMMAND(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/driver/ipoe.ko + COMMAND rm -rf ${CMAKE_CURRENT_BINARY_DIR}/driver + COMMAND mkdir ${CMAKE_CURRENT_BINARY_DIR}/driver + COMMAND ln -sf ${CMAKE_CURRENT_SOURCE_DIR}/* ${CMAKE_CURRENT_BINARY_DIR}/driver + COMMAND make -C ${KDIR} M=${CMAKE_CURRENT_BINARY_DIR}/driver modules + DEPENDS ipoe.c ipoe.h +) + +ADD_CUSTOM_TARGET(ipoe_drv ALL + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/driver/ipoe.ko +) + + +INSTALL(CODE "EXECUTE_PROCESS(COMMAND make -C ${KDIR} M=${CMAKE_CURRENT_BINARY_DIR}/drivers/ipoe modules_install)") + diff --git a/drivers/ipoe/Makefile b/drivers/ipoe/Makefile new file mode 100644 index 00000000..22ea273d --- /dev/null +++ b/drivers/ipoe/Makefile @@ -0,0 +1,4 @@ +obj-m += ipoe.o + +default: + make -C $(KDIR) M=$(PWD) modules diff --git a/drivers/ipoe/ipoe.c b/drivers/ipoe/ipoe.c new file mode 100644 index 00000000..18f0f708 --- /dev/null +++ b/drivers/ipoe/ipoe.c @@ -0,0 +1,1535 @@ +#include <linux/capability.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/in.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/if_arp.h> +#include <linux/mroute.h> +#include <linux/init.h> +#include <linux/if_ether.h> +#include <linux/semaphore.h> +#include <linux/version.h> + +#include <net/genetlink.h> +#include <net/route.h> +#include <net/sock.h> +#include <net/ip.h> +#include <net/icmp.h> +#include <net/flow.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> + +#include "ipoe.h" + +#define BEGIN_UPDATE 1 +#define UPDATE 2 +#define END_UPDATE 3 + +#define HASH_BITS 0xff + +#define IPOE_MAGIC 0x55aa + +#define IPOE_QUEUE_LEN 100 +#define IPOE_RATE_U 3000 //3s +#define IPOE_TIMEOUT_U 30 //5s + +#define IPOE_NLMSG_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN - 128) + +#ifndef DEFINE_SEMAPHORE +#define DEFINE_SEMAPHORE(name) struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) +struct ipoe_stats +{ + struct u64_stats_sync sync; + u64 packets; + u64 bytes; +}; +#endif + +struct ipoe_session +{ + struct list_head entry; + struct list_head entry2; + + __be32 addr; + __be32 peer_addr; + __u8 hwaddr[ETH_ALEN]; + + struct net_device *dev; + struct net_device *link_dev; + + atomic_t refs; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + struct ipoe_stats __percpu *rx_stats; + struct ipoe_stats __percpu *tx_stats; +#endif +}; + +struct ipoe_network +{ + struct rcu_head rcu_head; + struct list_head entry; + + __be32 addr; + __be32 mask; +}; + +struct ipoe_entry_u +{ + struct rcu_head rcu_head; + struct list_head entry1; + struct list_head entry2; + + __be32 addr; + unsigned long tstamp; +}; + +static struct list_head ipoe_list[HASH_BITS + 1]; +static struct list_head ipoe_list1_u[HASH_BITS + 1]; +static LIST_HEAD(ipoe_list2); +static LIST_HEAD(ipoe_list2_u); +static DEFINE_SEMAPHORE(ipoe_wlock); +static LIST_HEAD(ipoe_networks); +static struct work_struct ipoe_queue_work; +static struct sk_buff_head ipoe_queue; + +static void ipoe_start_queue_work(unsigned long); +static DEFINE_TIMER(ipoe_timer_u, ipoe_start_queue_work, 0, 0); + +static struct ipoe_session *ipoe_lookup(__be32 addr); +static int ipoe_do_nat(struct sk_buff *skb, __be32 new_addr, int to_peer); +static void ipoe_queue_u(struct sk_buff *skb, __be32 addr); +static int ipoe_lookup1_u(__be32 addr, unsigned long *ts); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) +static const struct net_device_ops ipoe_netdev_ops; +#endif + +static struct genl_family ipoe_nl_family; +static struct genl_multicast_group ipoe_nl_mcg; + +static inline int hash_addr(__be32 addr) +{ +#ifdef __LITTLE_ENDIAN + return ((addr >> 24) ^ (addr >> 16)) & HASH_BITS; +#else + return (addr ^ (addr >> 8)) & HASH_BITS; +#endif +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) +static void ipoe_update_stats(struct sk_buff *skb, struct ipoe_stats *st) +{ + u64_stats_update_begin(&st->sync); + st->packets++; + st->bytes += skb->len; + u64_stats_update_end(&st->sync); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) +static void __kfree_rcu(struct rcu_head *head) +{ + kfree(head); +} +#endif + +static int ipoe_check_network(__be32 addr) +{ + struct ipoe_network *n; + int r = 0; + + rcu_read_lock(); + + list_for_each_entry_rcu(n, &ipoe_networks, entry) { + if ((addr & n->mask) == n->addr) { + r = 1; + break; + } + } + + rcu_read_unlock(); + + return r; +} + +static int ipoe_do_nat(struct sk_buff *skb, __be32 new_addr, int to_peer) +{ + struct iphdr *iph; + int noff; + int ihl; + __be32 addr; + + noff = skb_network_offset(skb); + + iph = ip_hdr(skb); + + if (to_peer) + addr = iph->daddr; + else + addr = iph->saddr; + + if (skb_cloned(skb) && + !skb_clone_writable(skb, sizeof(*iph) + noff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + return -1; + + iph = ip_hdr(skb); + + if (to_peer) + iph->daddr = new_addr; + else + iph->saddr = new_addr; + + csum_replace4(&iph->check, addr, new_addr); + + ihl = iph->ihl * 4; + + switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || + (skb_cloned(skb) && + !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + return -1; + + tcph = (void *)(skb_network_header(skb) + ihl); + inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || + (skb_cloned(skb) && + !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + return -1; + + udph = (void *)(skb_network_header(skb) + ihl); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace4(&udph->check, skb, addr, new_addr, 1); + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } + break; + } + case IPPROTO_ICMP: + { + struct icmphdr *icmph; + + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) + return -1; + + icmph = (void *)(skb_network_header(skb) + ihl); + + if ((icmph->type != ICMP_DEST_UNREACH) && + (icmph->type != ICMP_TIME_EXCEEDED) && + (icmph->type != ICMP_PARAMETERPROB)) + break; + + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + + noff)) + return -1; + + icmph = (void *)(skb_network_header(skb) + ihl); + iph = (void *)(icmph + 1); + + if (skb_cloned(skb) && + !skb_clone_writable(skb, ihl + sizeof(*icmph) + + sizeof(*iph) + noff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + return -1; + + icmph = (void *)(skb_network_header(skb) + ihl); + iph = (void *)(icmph + 1); + if (to_peer) + iph->saddr = new_addr; + else + iph->daddr = new_addr; + + inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, 0); + break; + } + default: + break; + } + + return 0; +} + +static struct net *pick_net(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_NS + const struct dst_entry *dst; + + if (skb->dev != NULL) + return dev_net(skb->dev); + dst = skb_dst(skb); + if (dst != NULL && dst->dev != NULL) + return dev_net(dst->dev); +#endif + return &init_net; +} + +static int ipoe_route4(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct net *net = pick_net(skb); + struct rtable *rt; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,37) + struct flowi fl4; +#else + struct flowi4 fl4; +#endif + + memset(&fl4, 0, sizeof(fl4)); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,37) + fl4.fl4_dst = iph->daddr; + fl4.fl4_tos = RT_TOS(iph->tos); + fl4.fl4_scope = RT_SCOPE_UNIVERSE; + if (ip_route_output_key(net, &rt, &fl4)) + return -1; +#else + fl4.daddr = iph->daddr; + fl4.flowi4_tos = RT_TOS(iph->tos); + fl4.flowi4_scope = RT_SCOPE_UNIVERSE; + rt = ip_route_output_key(net, &fl4); + if (IS_ERR(rt)) + return -1; +#endif + + skb_dst_drop(skb); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,37) + skb_dst_set(skb, &rt->u.dst); + skb->dev = rt->u.dst.dev; +#else + skb_dst_set(skb, &rt->dst); + skb->dev = rt->dst.dev; +#endif + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) +static int ipoe_xmit(struct sk_buff *skb, struct net_device *dev) +#else +static netdev_tx_t ipoe_xmit(struct sk_buff *skb, struct net_device *dev) +#endif +{ + struct ipoe_session *ses = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct iphdr *iph; + struct ethhdr *eth; + struct sk_buff *skb1; + /*struct arphdr *arp; + unsigned char *arp_ptr; + __be32 tip;*/ + int noff; + + if (!ses->peer_addr) + goto drop; + + noff = skb_network_offset(skb); + + if (skb->protocol == htons(ETH_P_IP)) { + if (!pskb_may_pull(skb, sizeof(*iph) + noff)) + goto drop; + + iph = ip_hdr(skb); + + //pr_info("ipoe: xmit %08x %08x\n", iph->saddr, iph->daddr); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + ipoe_update_stats(skb, this_cpu_ptr(ses->tx_stats)); +#else + stats->tx_packets++; + stats->tx_bytes += skb->len; +#endif + + if (iph->daddr == ses->addr) { + if (skb_shared(skb)) { + skb1 = skb_clone(skb, GFP_ATOMIC); + if (!skb1) + goto drop; + skb = skb1; + } + + if (ipoe_do_nat(skb, ses->peer_addr, 1)) + goto drop; + + if (!ses->link_dev) { + iph = ip_hdr(skb); + + ip_send_check(iph); + + if (ipoe_route4(skb)) + goto drop; + + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); + + ip_local_out(skb); + + return NETDEV_TX_OK; + } else { + eth = (struct ethhdr *)skb->data; + + memcpy(eth->h_dest, ses->hwaddr, ETH_ALEN); + memcpy(eth->h_source, ses->link_dev->dev_addr, ETH_ALEN); + } + } + } /*else if (skb->protocol == htons(ETH_P_ARP)) { + if (!pskb_may_pull(skb, arp_hdr_len(dev) + noff)) + goto drop; + + arp = arp_hdr(skb); + arp_ptr = (unsigned char *)(arp + 1); + + if (arp->ar_op == htons(ARPOP_REQUEST)) { + memcpy(&tip, arp_ptr + ETH_ALEN + 4 + ETH_ALEN, 4); + if (tip == ses->addr) { + if (skb_cloned(skb) && + !skb_clone_writable(skb, arp_hdr_len(dev) + noff) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto drop; + + arp = arp_hdr(skb); + arp_ptr = (unsigned char *)(arp + 1); + memcpy(arp_ptr + ETH_ALEN + 4 + ETH_ALEN, &ses->peer_addr, 4); + } + } + }*/ + + if (ses->link_dev) { + skb->dev = ses->link_dev; + //skb->skb_iif = dev->ifindex; + dev_queue_xmit(skb); + + return NETDEV_TX_OK; + } +drop: + stats->tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int ipoe_rcv_arp(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +{ + struct ipoe_session *ses = NULL; + struct arphdr *arp; + unsigned char *arp_ptr; + int noff; + __be32 sip; + struct sk_buff *skb1; + unsigned char *cb_ptr; + struct net_device_stats *stats; + + //pr_info("ipoe: recv arp\n"); + + if (skb->pkt_type == PACKET_OTHERHOST) + goto drop; + + cb_ptr = skb->cb + sizeof(skb->cb) - 2; + + if (*(__u16 *)cb_ptr == IPOE_MAGIC) + goto drop; + + noff = skb_network_offset(skb); + + if (!pskb_may_pull(skb, arp_hdr_len(dev) + noff)) + goto drop; + + arp = arp_hdr(skb); + arp_ptr = (unsigned char *)(arp + 1); + + if (arp->ar_pro != htons(ETH_P_IP)) + goto drop; + + memcpy(&sip, arp_ptr + ETH_ALEN, 4); + + if (!sip) + goto drop; + //pr_info("ipoe: recv arp %08x\n", sip); + + ses = ipoe_lookup(sip); + + if (!ses) + goto drop; + + stats = &ses->dev->stats; + + if (ses->addr || skb->dev == ses->dev) { + atomic_dec(&ses->refs); + goto drop; + } + + skb1 = skb_clone(skb, GFP_ATOMIC); + if (!skb1) { + stats->rx_dropped++; + goto drop_unlock; + } + + skb1->dev = ses->dev; + skb1->skb_iif = ses->dev->ifindex; + + cb_ptr = skb1->cb + sizeof(skb1->cb) - 2; + *(__u16 *)cb_ptr = IPOE_MAGIC; + + netif_rx(skb1); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + ipoe_update_stats(skb, this_cpu_ptr(ses->rx_stats)); +#else + stats->rx_packets++; + stats->rx_bytes += skb->len; +#endif + +drop_unlock: + atomic_dec(&ses->refs); + skb->pkt_type = PACKET_OTHERHOST; + +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int ipoe_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) +{ + struct ipoe_session *ses = NULL; + struct iphdr *iph; + struct ethhdr *eth; + int noff; + struct sk_buff *skb1; + unsigned char *cb_ptr; + struct net_device_stats *stats; + + if (skb->pkt_type == PACKET_OTHERHOST) + goto drop; + + cb_ptr = skb->cb + sizeof(skb->cb) - 2; + + if (*(__u16 *)cb_ptr == IPOE_MAGIC) + goto drop; + + noff = skb_network_offset(skb); + + if (!pskb_may_pull(skb, sizeof(*iph) + noff)) + goto drop; + + iph = ip_hdr(skb); + + if (!iph->saddr) + goto drop; + + //pr_info("ipoe: recv %08x %08x\n", iph->saddr, iph->daddr); + if (!ipoe_check_network(iph->saddr)) + goto drop; + + ses = ipoe_lookup(iph->saddr); + + if (!ses) { + ipoe_queue_u(skb, iph->saddr); + goto drop; + } + + //pr_info("ipoe: recv cb=%x\n", *(__u16 *)cb_ptr); + + if (ses->link_dev) { + eth = eth_hdr(skb); + if (memcmp(eth->h_source, ses->hwaddr, ETH_ALEN)) + goto drop_unlock; + } + + stats = &ses->dev->stats; + + if (skb->dev == ses->dev) { + //pr_info("ipoe: dup\n"); + atomic_dec(&ses->refs); + goto drop; + } + + if (ses->addr && ipoe_check_network(iph->daddr)) { + atomic_dec(&ses->refs); + goto drop; + } + + skb1 = skb_clone(skb, GFP_ATOMIC); + if (!skb1) { + stats->rx_dropped++; + goto drop_unlock; + } + + if (ses->addr && ipoe_do_nat(skb1, ses->addr, 0)) { + kfree_skb(skb1); + goto drop_unlock; + } + + skb1->dev = ses->dev; + //skb1->skb_iif = ses->dev->ifindex; + + cb_ptr = skb1->cb + sizeof(skb1->cb) - 2; + *(__u16 *)cb_ptr = IPOE_MAGIC; + + netif_rx(skb1); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + ipoe_update_stats(skb, this_cpu_ptr(ses->rx_stats)); +#else + stats->rx_packets++; + stats->rx_bytes += skb->len; +#endif + +drop_unlock: + atomic_dec(&ses->refs); + skb->pkt_type = PACKET_OTHERHOST; + +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int ipoe_lookup1_u(__be32 addr, unsigned long *ts) +{ + struct ipoe_entry_u *e; + struct list_head *head = &ipoe_list1_u[hash_addr(addr)]; + int r = 0; + + rcu_read_lock(); + + list_for_each_entry_rcu(e, head, entry1) { + if (e->addr == addr) { + *ts = e->tstamp; + r = 1; + break; + } + } + + rcu_read_unlock(); + + return r; +} + +static struct ipoe_entry_u *ipoe_lookup2_u(__be32 addr) +{ + struct ipoe_entry_u *e; + struct list_head *head = &ipoe_list1_u[hash_addr(addr)]; + + list_for_each_entry_rcu(e, head, entry1) { + if (e->addr == addr) + return e; + } + + return NULL; +} + + +static void ipoe_queue_u(struct sk_buff *skb, __u32 addr) +{ + unsigned long ts; + + if (ipoe_lookup1_u(addr, &ts) && jiffies_to_msecs(jiffies - ts) < IPOE_RATE_U) { + //pr_info("not queue %08x\n", addr); + return; + } + + if (skb_queue_len(&ipoe_queue) > IPOE_QUEUE_LEN) + return; + + skb = skb_clone(skb, GFP_ATOMIC); + if (!skb) + return; + + //pr_info("queue %08x\n", addr); + + skb_queue_tail(&ipoe_queue, skb); + schedule_work(&ipoe_queue_work); +} + +static void ipoe_start_queue_work(unsigned long dummy) +{ + schedule_work(&ipoe_queue_work); +} + +static void ipoe_process_queue(struct work_struct *w) +{ + struct sk_buff *skb; + struct ipoe_entry_u *e; + struct ethhdr *eth; + struct iphdr *iph; + struct sk_buff *report_skb = NULL; + void *header = NULL; + struct nlattr *ns; + int id = 1; + + do { + while ((skb = skb_dequeue(&ipoe_queue))) { + eth = eth_hdr(skb); + iph = ip_hdr(skb); + + e = ipoe_lookup2_u(iph->saddr); + + if (!e) { + e = kmalloc(sizeof(*e), GFP_KERNEL); + e->addr = iph->saddr; + e->tstamp = jiffies; + + list_add_tail_rcu(&e->entry1, &ipoe_list1_u[hash_addr(iph->saddr)]); + list_add_tail(&e->entry2, &ipoe_list2_u); + + //pr_info("create %08x\n", e->addr); + } else if (jiffies_to_msecs(jiffies - e->tstamp) < IPOE_RATE_U) { + //pr_info("skip %08x\n", e->addr); + kfree_skb(skb); + continue; + } else { + e->tstamp = jiffies; + list_move_tail(&e->entry2, &ipoe_list2_u); + //pr_info("update %08x\n", e->addr); + } + + if (!report_skb) { + report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (report_skb) + header = genlmsg_put(report_skb, 0, ipoe_nl_mcg.id, &ipoe_nl_family, 0, IPOE_REP_PKT); + } + + if (report_skb) { + ns = nla_nest_start(report_skb, id++); + if (!ns) + goto nl_err; + + if (nla_put_u32(report_skb, IPOE_ATTR_IFINDEX, skb->dev ? skb->dev->ifindex : skb->skb_iif)) + goto nl_err; + + if (nla_put(report_skb, IPOE_ATTR_ETH_HDR, sizeof(*eth), eth)) + goto nl_err; + + if (nla_put(report_skb, IPOE_ATTR_IP_HDR, sizeof(*iph), iph)) + goto nl_err; + + if (nla_nest_end(report_skb, ns) >= IPOE_NLMSG_SIZE) { + genlmsg_end(report_skb, header); + genlmsg_multicast(report_skb, 0, ipoe_nl_mcg.id, GFP_KERNEL); + report_skb = NULL; + } + + kfree_skb(skb); + continue; + +nl_err: + nlmsg_free(report_skb); + report_skb = NULL; + } + + kfree_skb(skb); + } + + while (!list_empty(&ipoe_list2_u)) { + e = list_entry(ipoe_list2_u.next, typeof(*e), entry2); + if (jiffies_to_msecs(jiffies - e->tstamp) < IPOE_TIMEOUT_U * 1000) + break; + + //pr_info("free %08x\n", e->addr); + list_del(&e->entry2); + list_del_rcu(&e->entry1); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + kfree_rcu(e, rcu_head); +#else + call_rcu(&e->rcu_head, __kfree_rcu); +#endif + } + + synchronize_rcu(); + } while (skb_queue_len(&ipoe_queue)); + + if (report_skb) { + genlmsg_end(report_skb, header); + genlmsg_multicast(report_skb, 0, ipoe_nl_mcg.id, GFP_KERNEL); + } + + if (!list_empty(&ipoe_list2_u)) + mod_timer(&ipoe_timer_u, jiffies + IPOE_TIMEOUT_U * HZ); + else + del_timer(&ipoe_timer_u); +} + +static struct ipoe_session *ipoe_lookup(__be32 addr) +{ + struct ipoe_session *ses; + struct list_head *head; + + head = &ipoe_list[hash_addr(addr)]; + + rcu_read_lock(); + + list_for_each_entry_rcu(ses, head, entry) { + if (ses->peer_addr == addr) { + atomic_inc(&ses->refs); + rcu_read_unlock(); + return ses; + } + } + + rcu_read_unlock(); + + return NULL; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) +static struct rtnl_link_stats64 *ipoe_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ipoe_session *ses = netdev_priv(dev); + struct ipoe_stats *st; + unsigned int start; + int i; + u64 packets, bytes; + u64 rx_packets = 0, rx_bytes = 0, tx_packets = 0, tx_bytes = 0; + + for_each_possible_cpu(i) { + st = per_cpu_ptr(ses->rx_stats, i); + + do { + start = u64_stats_fetch_begin_bh(&st->sync); + packets = st->packets; + bytes = st->bytes; + } while (u64_stats_fetch_retry_bh(&st->sync, start)); + + rx_packets += packets; + rx_bytes += bytes; + + st = per_cpu_ptr(ses->tx_stats, i); + + do { + start = u64_stats_fetch_begin_bh(&st->sync); + packets = st->packets; + bytes = st->bytes; + } while (u64_stats_fetch_retry_bh(&st->sync, start)); + + tx_packets += packets; + tx_bytes += bytes; + } + + stats->rx_packets = rx_packets; + stats->rx_bytes = rx_bytes; + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + + return stats; +} +#endif + +static void ipoe_free_netdev(struct net_device *dev) +{ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + struct ipoe_session *ses = netdev_priv(dev); + + if (ses->rx_stats) + free_percpu(ses->rx_stats); + if (ses->tx_stats) + free_percpu(ses->tx_stats); +#endif + + free_netdev(dev); +} + +static int ipoe_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) +{ + const struct ipoe_session *ses = netdev_priv(dev); + + if (ses->link_dev) + return dev_hard_header(skb, ses->link_dev, type, daddr, + saddr, len); + else + return eth_header(skb, dev, type, daddr, saddr, len); +} + +static const struct header_ops ipoe_hard_header_ops = { + .create = ipoe_hard_header, + .rebuild = eth_rebuild_header, + .parse = eth_header_parse, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; + +static void ipoe_netdev_setup(struct net_device *dev) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + dev->hard_start_xmit = ipoe_xmit; +#else + dev->netdev_ops = &ipoe_netdev_ops; +#endif + dev->destructor = ipoe_free_netdev; + + dev->type = ARPHRD_ETHER; + dev->hard_header_len = 0; + dev->mtu = ETH_DATA_LEN; + dev->flags = IFF_MULTICAST | IFF_POINTOPOINT; + dev->iflink = 0; + dev->addr_len = ETH_ALEN; + dev->features |= NETIF_F_NETNS_LOCAL; + dev->header_ops = &ipoe_hard_header_ops, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; +#endif +} + +static int ipoe_create(__be32 peer_addr, __be32 addr, const char *link_ifname, const __u8 *hwaddr) +{ + struct ipoe_session *ses; + struct net_device *dev, *link_dev = NULL; + char name[IFNAMSIZ]; + int r = -EINVAL; + int h = hash_addr(peer_addr); + + if (link_ifname) { + link_dev = dev_get_by_name(&init_net, link_ifname); + if (!link_dev) + return -EINVAL; + sprintf(name, "%s.ipoe%%d", link_ifname); + } else + sprintf(name, "ipoe%%d"); + + dev = alloc_netdev(sizeof(*ses), name, ipoe_netdev_setup); + if (dev == NULL) { + r = -ENOMEM; + goto failed; + } + + dev_net_set(dev, &init_net); + + r = dev_alloc_name(dev, name); + if (r < 0) { + r = -ENOMEM; + goto failed_free; + } + + ses = netdev_priv(dev); + atomic_set(&ses->refs, 0); + ses->dev = dev; + ses->addr = addr; + ses->peer_addr = peer_addr; + ses->link_dev = link_dev; + memcpy(ses->hwaddr, hwaddr, ETH_ALEN); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + ses->rx_stats = alloc_percpu(struct ipoe_stats); + ses->tx_stats = alloc_percpu(struct ipoe_stats); + if (!ses->rx_stats || !ses->tx_stats) { + r = -ENOMEM; + goto failed_free; + } +#endif + + if (link_dev) { + dev->features = link_dev->features; + memcpy(dev->dev_addr, link_dev->dev_addr, ETH_ALEN); + memcpy(dev->broadcast, link_dev->broadcast, ETH_ALEN); + } + + if (addr) + dev->flags |= IFF_NOARP; + else + dev->flags &= ~IFF_NOARP; + + rtnl_lock(); + r = register_netdevice(dev); + rtnl_unlock(); + if (r < 0) + goto failed_free; + + down(&ipoe_wlock); + if (peer_addr) + list_add_tail_rcu(&ses->entry, &ipoe_list[h]); + list_add_tail(&ses->entry2, &ipoe_list2); + r = dev->ifindex; + up(&ipoe_wlock); + + return r; + +failed_free: + free_netdev(dev); +failed: + if (link_dev) + dev_put(link_dev); + return r; +} + +static int ipoe_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + void *hdr; + int ret = -ENOBUFS; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto out; + } + + hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + &ipoe_nl_family, 0, IPOE_CMD_NOOP); + if (IS_ERR(hdr)) { + ret = PTR_ERR(hdr); + goto err_out; + } + + genlmsg_end(msg, hdr); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + return genlmsg_unicast(msg, info->snd_pid); +#else + return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); +#endif + +err_out: + nlmsg_free(msg); + +out: + return ret; +} + +static int ipoe_nl_cmd_create(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + void *hdr; + __be32 peer_addr = 0, addr = 0; + int ret = 0; + char ifname[IFNAMSIZ]; + __u8 hwaddr[ETH_ALEN]; + struct ipoe_session *ses; + //struct net *net = genl_info_net(info); + + if (info->attrs[IPOE_ATTR_PEER_ADDR]) { + peer_addr = nla_get_be32(info->attrs[IPOE_ATTR_PEER_ADDR]); + if (peer_addr) { + ses = ipoe_lookup(peer_addr); + if (ses) { + atomic_dec(&ses->refs); + return -EEXIST; + } + } + } + + if (info->attrs[IPOE_ATTR_ADDR]) + addr = nla_get_be32(info->attrs[IPOE_ATTR_ADDR]); + + if (info->attrs[IPOE_ATTR_IFNAME]) + nla_strlcpy(ifname, info->attrs[IPOE_ATTR_IFNAME], IFNAMSIZ - 1); + + if (info->attrs[IPOE_ATTR_HWADDR]) + nla_memcpy(hwaddr, info->attrs[IPOE_ATTR_HWADDR], ETH_ALEN); + else + memset(hwaddr, 0, sizeof(hwaddr)); + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto out; + } + + hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + &ipoe_nl_family, 0, IPOE_CMD_CREATE); + if (IS_ERR(hdr)) { + ret = PTR_ERR(hdr); + goto err_out; + } + + //pr_info("ipoe: create %08x %08x %s\n", peer_addr, addr, info->attrs[IPOE_ATTR_IFNAME] ? ifname : "-"); + + ret = ipoe_create(peer_addr, addr, info->attrs[IPOE_ATTR_IFNAME] ? ifname : NULL, hwaddr); + + if (ret < 0) { + nlmsg_free(msg); + return ret; + } + + nla_put_u32(msg, IPOE_ATTR_IFINDEX, ret); + + genlmsg_end(msg, hdr); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + return genlmsg_unicast(msg, info->snd_pid); +#else + return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); +#endif + +err_out: + nlmsg_free(msg); + +out: + return ret; +} + +static int ipoe_nl_cmd_delete(struct sk_buff *skb, struct genl_info *info) +{ + struct net_device *dev; + struct ipoe_session *ses; + int ifindex; + int r = 0; + int ret = -EINVAL; + + if (!info->attrs[IPOE_ATTR_IFINDEX]) + return -EINVAL; + + ifindex = nla_get_u32(info->attrs[IPOE_ATTR_IFINDEX]); + + down(&ipoe_wlock); + + rcu_read_lock(); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + dev = dev_get_by_index_rcu(ifindex); +#else + dev = dev_get_by_index_rcu(&init_net, ifindex); +#endif + if (!dev || dev->header_ops != &ipoe_hard_header_ops) + r = 1; + rcu_read_unlock(); + + if (r) + goto out_unlock; + + ses = netdev_priv(dev); + + //pr_info("ipoe: delete %08x\n", ses->peer_addr); + + if (ses->peer_addr) + list_del_rcu(&ses->entry); + list_del(&ses->entry2); + + up(&ipoe_wlock); + + synchronize_rcu(); + + while (atomic_read(&ses->refs)) + schedule_timeout_uninterruptible(1); + + if (ses->link_dev) + dev_put(ses->link_dev); + + unregister_netdev(ses->dev); + + ret = 0; + +out_unlock: + up(&ipoe_wlock); + return ret; +} + +static int ipoe_nl_cmd_modify(struct sk_buff *skb, struct genl_info *info) +{ + int ret = -EINVAL, r = 0; + char ifname[IFNAMSIZ]; + struct net_device *dev, *link_dev, *old_dev; + struct ipoe_session *ses, *ses1; + int ifindex; + __be32 peer_addr; + + if (!info->attrs[IPOE_ATTR_IFINDEX]) + return -EINVAL; + + down(&ipoe_wlock); + + ifindex = nla_get_u32(info->attrs[IPOE_ATTR_IFINDEX]); + + rcu_read_lock(); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + dev = dev_get_by_index_rcu(ifindex); +#else + dev = dev_get_by_index_rcu(&init_net, ifindex); +#endif + if (!dev || dev->header_ops != &ipoe_hard_header_ops) + r = 1; + rcu_read_unlock(); + + if (r) + goto out_unlock; + + ses = netdev_priv(dev); + + if (info->attrs[IPOE_ATTR_PEER_ADDR]) { + peer_addr = nla_get_be32(info->attrs[IPOE_ATTR_PEER_ADDR]); + if (peer_addr) { + ses1 = ipoe_lookup(peer_addr); + if (ses1) { + atomic_dec(&ses1->refs); + if (ses1 != ses) { + ret = -EEXIST; + goto out_unlock; + } + } + } + + if (ses->peer_addr) { + list_del_rcu(&ses->entry); + synchronize_rcu(); + } + + ses->peer_addr = peer_addr; + + if (peer_addr) + list_add_tail_rcu(&ses->entry, &ipoe_list[hash_addr(peer_addr)]); + } + + if (info->attrs[IPOE_ATTR_IFNAME]) { + nla_strlcpy(ifname, info->attrs[IPOE_ATTR_IFNAME], IFNAMSIZ - 1); + + if (*ifname) { + link_dev = dev_get_by_name(&init_net, ifname); + + if (!link_dev) + goto out_unlock; + } else + link_dev = NULL; + + old_dev = ses->link_dev; + ses->link_dev = link_dev; + + if (link_dev) { + ses->dev->features = link_dev->features; + memcpy(dev->dev_addr, link_dev->dev_addr, ETH_ALEN); + memcpy(dev->broadcast, link_dev->broadcast, ETH_ALEN); + } + + if (old_dev) + dev_put(old_dev); + } + + if (info->attrs[IPOE_ATTR_ADDR]) { + ses->addr = nla_get_be32(info->attrs[IPOE_ATTR_ADDR]); + if (ses->addr) + dev->flags |= IFF_NOARP; + else + dev->flags &= ~IFF_NOARP; + } + + if (info->attrs[IPOE_ATTR_HWADDR]) + nla_memcpy(ses->hwaddr, info->attrs[IPOE_ATTR_HWADDR], ETH_ALEN); + + //pr_info("ipoe: modify %08x %08x\n", ses->peer_addr, ses->addr); + + ret = 0; + +out_unlock: + up(&ipoe_wlock); + return ret; +} + +static int fill_info(struct sk_buff *skb, struct ipoe_session *ses, u32 pid, u32 seq) +{ + void *hdr; + + hdr = genlmsg_put(skb, pid, seq, &ipoe_nl_family, NLM_F_MULTI, IPOE_CMD_GET); + if (!hdr) + return -EMSGSIZE; + + NLA_PUT_U32(skb, IPOE_ATTR_IFINDEX, ses->dev->ifindex); + NLA_PUT_U32(skb, IPOE_ATTR_PEER_ADDR, ses->peer_addr); + NLA_PUT_U32(skb, IPOE_ATTR_ADDR, ses->addr); + + return genlmsg_end(skb, hdr); + +nla_put_failure: + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +static int ipoe_nl_cmd_dump_sessions(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct ipoe_session *ses; + int idx = 0, start_idx = cb->args[0]; + + down(&ipoe_wlock); + + list_for_each_entry(ses, &ipoe_list2, entry2) { + if (idx > start_idx) + start_idx = 0; + + if (idx++ < start_idx) + continue; + + if (fill_info(skb, ses, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq) < 0) + break; + } + + up(&ipoe_wlock); + + cb->args[0] = idx; + + return skb->len; +} + +static int ipoe_nl_cmd_add_net(struct sk_buff *skb, struct genl_info *info) +{ + struct ipoe_network *n; + + if (!info->attrs[IPOE_ATTR_ADDR] || !info->attrs[IPOE_ATTR_MASK]) + return -EINVAL; + + n = kmalloc(sizeof(*n), GFP_KERNEL); + if (!n) + return -ENOMEM; + + n->addr = nla_get_u32(info->attrs[IPOE_ATTR_ADDR]); + n->mask = nla_get_u32(info->attrs[IPOE_ATTR_MASK]); + //pr_info("add net %08x/%08x\n", n->addr, n->mask); + + down(&ipoe_wlock); + list_add_tail_rcu(&n->entry, &ipoe_networks); + up(&ipoe_wlock); + + return 0; +} + +static int ipoe_nl_cmd_del_net(struct sk_buff *skb, struct genl_info *info) +{ + struct ipoe_network *n; + __be32 addr; + + if (!info->attrs[IPOE_ATTR_ADDR]) + return -EINVAL; + + addr = nla_get_u32(info->attrs[IPOE_ATTR_ADDR]); + + rcu_read_lock(); + list_for_each_entry_rcu(n, &ipoe_networks, entry) { + if (!addr || addr == n->addr) { + //pr_info("del net %08x/%08x\n", n->addr, n->mask); + list_del_rcu(&n->entry); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + kfree_rcu(n, rcu_head); +#else + call_rcu(&n->rcu_head, __kfree_rcu); +#endif + } + } + rcu_read_unlock(); + + synchronize_rcu(); + + return 0; +} + + +static struct nla_policy ipoe_nl_policy[IPOE_ATTR_MAX + 1] = { + [IPOE_ATTR_NONE] = { .type = NLA_UNSPEC, }, + [IPOE_ATTR_ADDR] = { .type = NLA_U32, }, + [IPOE_ATTR_PEER_ADDR] = { .type = NLA_U32, }, + [IPOE_ATTR_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, + [IPOE_ATTR_HWADDR] = { .type = NLA_U64 }, + [IPOE_ATTR_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, + [IPOE_ATTR_MASK] = { .type = NLA_U32, }, +}; + +static struct genl_ops ipoe_nl_ops[] = { + { + .cmd = IPOE_CMD_NOOP, + .doit = ipoe_nl_cmd_noop, + .policy = ipoe_nl_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = IPOE_CMD_CREATE, + .doit = ipoe_nl_cmd_create, + .policy = ipoe_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = IPOE_CMD_DELETE, + .doit = ipoe_nl_cmd_delete, + .policy = ipoe_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = IPOE_CMD_MODIFY, + .doit = ipoe_nl_cmd_modify, + .policy = ipoe_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = IPOE_CMD_GET, + .dumpit = ipoe_nl_cmd_dump_sessions, + .policy = ipoe_nl_policy, + }, + { + .cmd = IPOE_CMD_ADD_NET, + .doit = ipoe_nl_cmd_add_net, + .policy = ipoe_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = IPOE_CMD_DEL_NET, + .doit = ipoe_nl_cmd_del_net, + .policy = ipoe_nl_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static struct genl_family ipoe_nl_family = { + .id = GENL_ID_GENERATE, + .name = IPOE_GENL_NAME, + .version = IPOE_GENL_VERSION, + .hdrsize = 0, + .maxattr = IPOE_ATTR_MAX, +}; + +static struct genl_multicast_group ipoe_nl_mcg = { + .name = IPOE_GENL_MCG_PKT, +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) +static const struct net_device_ops ipoe_netdev_ops = { + .ndo_start_xmit = ipoe_xmit, +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) + .ndo_get_stats64 = ipoe_stats64, +#endif +}; +#endif + +static struct packet_type ip_packet_type = { + .type = __constant_htons(ETH_P_IP), + .func = ipoe_rcv, +}; + +static struct packet_type arp_packet_type = { + .type = __constant_htons(ETH_P_ARP), + .func = ipoe_rcv_arp, +}; + +/*static struct pernet_operations ipoe_net_ops = { + .init = ipoe_init_net, + .exit = ipoe_exit_net, + .id = &ipoe_net_id, + .size = sizeof(struct ipoe_net), +};*/ + +static int __init ipoe_init(void) +{ + int err, i; + + printk("IPoE session driver v0.1\n"); + + /*err = register_pernet_device(&ipoe_net_ops); + if (err < 0) + return err;*/ + for (i = 0; i < HASH_BITS + 1; i++) { + INIT_LIST_HEAD(&ipoe_list[i]); + INIT_LIST_HEAD(&ipoe_list1_u[i]); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + err = genl_register_family(&ipoe_nl_family); + if (err < 0) { + printk(KERN_INFO "ipoe: can't register netlink interface\n"); + goto out; + } + + for (i = 0; i < ARRAY_SIZE(ipoe_nl_ops); i++) { + err = genl_register_ops(&ipoe_nl_family, &ipoe_nl_ops[i]); + if (err) + break; + } + + if (err < 0) { + printk(KERN_INFO "ipoe: can't register netlink interface\n"); + goto out_unreg; + } +#else + err = genl_register_family_with_ops(&ipoe_nl_family, ipoe_nl_ops, + ARRAY_SIZE(ipoe_nl_ops)); + if (err < 0) { + printk(KERN_INFO "ipoe: can't register netlink interface\n"); + goto out; + } +#endif + + err = genl_register_mc_group(&ipoe_nl_family, &ipoe_nl_mcg); + if (err < 0) { + printk(KERN_INFO "ipoe: can't register netlink multicast group\n"); + goto out_unreg; + } + + skb_queue_head_init(&ipoe_queue); + INIT_WORK(&ipoe_queue_work, ipoe_process_queue); + + dev_add_pack(&ip_packet_type); + dev_add_pack(&arp_packet_type); + + return 0; + +out_unreg: + genl_unregister_family(&ipoe_nl_family); +out: + return err; +} + +static void __exit ipoe_fini(void) +{ + struct ipoe_network *n; + struct ipoe_entry_u *e; + struct ipoe_session *ses; + int i; + + genl_unregister_mc_group(&ipoe_nl_family, &ipoe_nl_mcg); + genl_unregister_family(&ipoe_nl_family); + + dev_remove_pack(&ip_packet_type); + dev_remove_pack(&arp_packet_type); + + flush_work(&ipoe_queue_work); + skb_queue_purge(&ipoe_queue); + + del_timer(&ipoe_timer_u); + + down(&ipoe_wlock); + up(&ipoe_wlock); + + for (i = 0; i < HASH_BITS; i++) + rcu_assign_pointer(ipoe_list[i].next, &ipoe_list[i]); + + rcu_barrier(); + + while (!list_empty(&ipoe_list2)) { + ses = list_entry(ipoe_list2.next, typeof(*ses), entry2); + list_del(&ses->entry2); + + if (ses->link_dev) + dev_put(ses->link_dev); + + unregister_netdev(ses->dev); + } + + while (!list_empty(&ipoe_networks)) { + n = list_entry(ipoe_networks.next, typeof(*n), entry); + list_del(&n->entry); + kfree(n); + } + + while (!list_empty(&ipoe_list2_u)) { + e = list_entry(ipoe_list2_u.next, typeof(*e), entry2); + list_del(&e->entry2); + kfree(e); + } +} + +module_init(ipoe_init); +module_exit(ipoe_fini); +MODULE_LICENSE("GPL"); diff --git a/drivers/ipoe/ipoe.h b/drivers/ipoe/ipoe.h new file mode 100644 index 00000000..d40ac21a --- /dev/null +++ b/drivers/ipoe/ipoe.h @@ -0,0 +1,43 @@ +#ifndef __LINUX_IPOE_H +#define __LINUX_IPOE_H + +#include <linux/types.h> + +enum { + IPOE_CMD_NOOP, + IPOE_CMD_CREATE, + IPOE_CMD_DELETE, + IPOE_CMD_MODIFY, + IPOE_CMD_GET, + IPOE_CMD_ADD_NET, + IPOE_CMD_DEL_NET, + IPOE_REP_PKT, + __IPOE_CMD_MAX, +}; + +#define IPOE_CMD_MAX (__IPOE_CMD_MAX - 1) + +enum { + IPOE_ATTR_NONE, /* no data */ + IPOE_ATTR_ADDR, /* u32 */ + IPOE_ATTR_PEER_ADDR, /* u32 */ + IPOE_ATTR_IFNAME, /* u32 */ + IPOE_ATTR_HWADDR, /* u32 */ + IPOE_ATTR_MASK, /* u32 */ + IPOE_ATTR_IFINDEX, /* u32 */ + IPOE_ATTR_ETH_HDR, /* u32 */ + IPOE_ATTR_IP_HDR, /* u32 */ + __IPOE_ATTR_MAX, +}; + +#define IPOE_ATTR_MAX (__IPOE_ATTR_MAX - 1) + +/* + * NETLINK_GENERIC related info + */ +#define IPOE_GENL_NAME "IPoE" +#define IPOE_GENL_MCG_PKT "Packet" +#define IPOE_GENL_VERSION 0x1 + +#endif + diff --git a/driver/CMakeLists.txt b/drivers/pptp/CMakeLists.txt index fd732e6a..fd732e6a 100644 --- a/driver/CMakeLists.txt +++ b/drivers/pptp/CMakeLists.txt diff --git a/driver/Makefile b/drivers/pptp/Makefile index 8ccbbedf..8ccbbedf 100644 --- a/driver/Makefile +++ b/drivers/pptp/Makefile diff --git a/driver/gre.c b/drivers/pptp/gre.c index 77886d5d..77886d5d 100644 --- a/driver/gre.c +++ b/drivers/pptp/gre.c diff --git a/driver/gre.h b/drivers/pptp/gre.h index 2ca7f749..2ca7f749 100644 --- a/driver/gre.h +++ b/drivers/pptp/gre.h diff --git a/driver/if_pppox.h b/drivers/pptp/if_pppox.h index bc05b533..bc05b533 100644 --- a/driver/if_pppox.h +++ b/drivers/pptp/if_pppox.h diff --git a/driver/pptp.c b/drivers/pptp/pptp.c index 78853fcb..78853fcb 100644 --- a/driver/pptp.c +++ b/drivers/pptp/pptp.c diff --git a/rfc/rfc2131.txt b/rfc/rfc2131.txt new file mode 100644 index 00000000..f45d9b86 --- /dev/null +++ b/rfc/rfc2131.txt @@ -0,0 +1,2523 @@ + + + + + + +Network Working Group R. Droms +Request for Comments: 2131 Bucknell University +Obsoletes: 1541 March 1997 +Category: Standards Track + + Dynamic Host Configuration Protocol + +Status of this memo + + This document specifies an Internet standards track protocol for the + Internet community, and requests discussion and suggestions for + improvements. Please refer to the current edition of the "Internet + Official Protocol Standards" (STD 1) for the standardization state + and status of this protocol. Distribution of this memo is unlimited. + +Abstract + + The Dynamic Host Configuration Protocol (DHCP) provides a framework + for passing configuration information to hosts on a TCPIP network. + DHCP is based on the Bootstrap Protocol (BOOTP) [7], adding the + capability of automatic allocation of reusable network addresses and + additional configuration options [19]. DHCP captures the behavior of + BOOTP relay agents [7, 21], and DHCP participants can interoperate + with BOOTP participants [9]. + +Table of Contents + + 1. Introduction. . . . . . . . . . . . . . . . . . . . . . . . . 2 + 1.1 Changes to RFC1541. . . . . . . . . . . . . . . . . . . . . . 3 + 1.2 Related Work. . . . . . . . . . . . . . . . . . . . . . . . . 4 + 1.3 Problem definition and issues . . . . . . . . . . . . . . . . 4 + 1.4 Requirements. . . . . . . . . . . . . . . . . . . . . . . . . 5 + 1.5 Terminology . . . . . . . . . . . . . . . . . . . . . . . . . 6 + 1.6 Design goals. . . . . . . . . . . . . . . . . . . . . . . . . 6 + 2. Protocol Summary. . . . . . . . . . . . . . . . . . . . . . . 8 + 2.1 Configuration parameters repository . . . . . . . . . . . . . 11 + 2.2 Dynamic allocation of network addresses . . . . . . . . . . . 12 + 3. The Client-Server Protocol. . . . . . . . . . . . . . . . . . 13 + 3.1 Client-server interaction - allocating a network address. . . 13 + 3.2 Client-server interaction - reusing a previously allocated + network address . . . . . . . . . . . . . . . . . . . . . . . 17 + 3.3 Interpretation and representation of time values. . . . . . . 20 + 3.4 Obtaining parameters with externally configured network + address . . . . . . . . . . . . . . . . . . . . . . . . . . . 20 + 3.5 Client parameters in DHCP . . . . . . . . . . . . . . . . . . 21 + 3.6 Use of DHCP in clients with multiple interfaces . . . . . . . 22 + 3.7 When clients should use DHCP. . . . . . . . . . . . . . . . . 22 + 4. Specification of the DHCP client-server protocol. . . . . . . 22 + + + +Droms Standards Track [Page 1] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + 4.1 Constructing and sending DHCP messages. . . . . . . . . . . . 22 + 4.2 DHCP server administrative controls . . . . . . . . . . . . . 25 + 4.3 DHCP server behavior. . . . . . . . . . . . . . . . . . . . . 26 + 4.4 DHCP client behavior. . . . . . . . . . . . . . . . . . . . . 34 + 5. Acknowledgments. . . . . . . . . . . . . . . . . . . . . . . .42 + 6. References . . . . . . . . . . . . . . . . . . . . . . . . . .42 + 7. Security Considerations. . . . . . . . . . . . . . . . . . . .43 + 8. Author's Address . . . . . . . . . . . . . . . . . . . . . . .44 + A. Host Configuration Parameters . . . . . . . . . . . . . . . .45 +List of Figures + 1. Format of a DHCP message . . . . . . . . . . . . . . . . . . . 9 + 2. Format of the 'flags' field. . . . . . . . . . . . . . . . . . 11 + 3. Timeline diagram of messages exchanged between DHCP client and + servers when allocating a new network address. . . . . . . . . 15 + 4. Timeline diagram of messages exchanged between DHCP client and + servers when reusing a previously allocated network address. . 18 + 5. State-transition diagram for DHCP clients. . . . . . . . . . . 34 +List of Tables + 1. Description of fields in a DHCP message. . . . . . . . . . . . 10 + 2. DHCP messages. . . . . . . . . . . . . . . . . . . . . . . . . 14 + 3. Fields and options used by DHCP servers. . . . . . . . . . . . 28 + 4. Client messages from various states. . . . . . . . . . . . . . 33 + 5. Fields and options used by DHCP clients. . . . . . . . . . . . 37 + +1. Introduction + + The Dynamic Host Configuration Protocol (DHCP) provides configuration + parameters to Internet hosts. DHCP consists of two components: a + protocol for delivering host-specific configuration parameters from a + DHCP server to a host and a mechanism for allocation of network + addresses to hosts. + + DHCP is built on a client-server model, where designated DHCP server + hosts allocate network addresses and deliver configuration parameters + to dynamically configured hosts. Throughout the remainder of this + document, the term "server" refers to a host providing initialization + parameters through DHCP, and the term "client" refers to a host + requesting initialization parameters from a DHCP server. + + A host should not act as a DHCP server unless explicitly configured + to do so by a system administrator. The diversity of hardware and + protocol implementations in the Internet would preclude reliable + operation if random hosts were allowed to respond to DHCP requests. + For example, IP requires the setting of many parameters within the + protocol implementation software. Because IP can be used on many + dissimilar kinds of network hardware, values for those parameters + cannot be guessed or assumed to have correct defaults. Also, + distributed address allocation schemes depend on a polling/defense + + + +Droms Standards Track [Page 2] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + mechanism for discovery of addresses that are already in use. IP + hosts may not always be able to defend their network addresses, so + that such a distributed address allocation scheme cannot be + guaranteed to avoid allocation of duplicate network addresses. + + DHCP supports three mechanisms for IP address allocation. In + "automatic allocation", DHCP assigns a permanent IP address to a + client. In "dynamic allocation", DHCP assigns an IP address to a + client for a limited period of time (or until the client explicitly + relinquishes the address). In "manual allocation", a client's IP + address is assigned by the network administrator, and DHCP is used + simply to convey the assigned address to the client. A particular + network will use one or more of these mechanisms, depending on the + policies of the network administrator. + + Dynamic allocation is the only one of the three mechanisms that + allows automatic reuse of an address that is no longer needed by the + client to which it was assigned. Thus, dynamic allocation is + particularly useful for assigning an address to a client that will be + connected to the network only temporarily or for sharing a limited + pool of IP addresses among a group of clients that do not need + permanent IP addresses. Dynamic allocation may also be a good choice + for assigning an IP address to a new client being permanently + connected to a network where IP addresses are sufficiently scarce + that it is important to reclaim them when old clients are retired. + Manual allocation allows DHCP to be used to eliminate the error-prone + process of manually configuring hosts with IP addresses in + environments where (for whatever reasons) it is desirable to manage + IP address assignment outside of the DHCP mechanisms. + + The format of DHCP messages is based on the format of BOOTP messages, + to capture the BOOTP relay agent behavior described as part of the + BOOTP specification [7, 21] and to allow interoperability of existing + BOOTP clients with DHCP servers. Using BOOTP relay agents eliminates + the necessity of having a DHCP server on each physical network + segment. + +1.1 Changes to RFC 1541 + + This document updates the DHCP protocol specification that appears in + RFC1541. A new DHCP message type, DHCPINFORM, has been added; see + section 3.4, 4.3 and 4.4 for details. The classing mechanism for + identifying DHCP clients to DHCP servers has been extended to include + "vendor" classes as defined in sections 4.2 and 4.3. The minimum + lease time restriction has been removed. Finally, many editorial + changes have been made to clarify the text as a result of experience + gained in DHCP interoperability tests. + + + + +Droms Standards Track [Page 3] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +1.2 Related Work + + There are several Internet protocols and related mechanisms that + address some parts of the dynamic host configuration problem. The + Reverse Address Resolution Protocol (RARP) [10] (through the + extensions defined in the Dynamic RARP (DRARP) [5]) explicitly + addresses the problem of network address discovery, and includes an + automatic IP address assignment mechanism. The Trivial File Transfer + Protocol (TFTP) [20] provides for transport of a boot image from a + boot server. The Internet Control Message Protocol (ICMP) [16] + provides for informing hosts of additional routers via "ICMP + redirect" messages. ICMP also can provide subnet mask information + through the "ICMP mask request" message and other information through + the (obsolete) "ICMP information request" message. Hosts can locate + routers through the ICMP router discovery mechanism [8]. + + BOOTP is a transport mechanism for a collection of configuration + information. BOOTP is also extensible, and official extensions [17] + have been defined for several configuration parameters. Morgan has + proposed extensions to BOOTP for dynamic IP address assignment [15]. + The Network Information Protocol (NIP), used by the Athena project at + MIT, is a distributed mechanism for dynamic IP address assignment + [19]. The Resource Location Protocol RLP [1] provides for location + of higher level services. Sun Microsystems diskless workstations use + a boot procedure that employs RARP, TFTP and an RPC mechanism called + "bootparams" to deliver configuration information and operating + system code to diskless hosts. (Sun Microsystems, Sun Workstation + and SunOS are trademarks of Sun Microsystems, Inc.) Some Sun + networks also use DRARP and an auto-installation mechanism to + automate the configuration of new hosts in an existing network. + + In other related work, the path minimum transmission unit (MTU) + discovery algorithm can determine the MTU of an arbitrary internet + path [14]. The Address Resolution Protocol (ARP) has been proposed + as a transport protocol for resource location and selection [6]. + Finally, the Host Requirements RFCs [3, 4] mention specific + requirements for host reconfiguration and suggest a scenario for + initial configuration of diskless hosts. + +1.3 Problem definition and issues + + DHCP is designed to supply DHCP clients with the configuration + parameters defined in the Host Requirements RFCs. After obtaining + parameters via DHCP, a DHCP client should be able to exchange packets + with any other host in the Internet. The TCP/IP stack parameters + supplied by DHCP are listed in Appendix A. + + + + + +Droms Standards Track [Page 4] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + Not all of these parameters are required for a newly initialized + client. A client and server may negotiate for the transmission of + only those parameters required by the client or specific to a + particular subnet. + + DHCP allows but does not require the configuration of client + parameters not directly related to the IP protocol. DHCP also does + not address registration of newly configured clients with the Domain + Name System (DNS) [12, 13]. + + DHCP is not intended for use in configuring routers. + +1.4 Requirements + + Throughout this document, the words that are used to define the + significance of particular requirements are capitalized. These words + are: + + o "MUST" + + This word or the adjective "REQUIRED" means that the + item is an absolute requirement of this specification. + + o "MUST NOT" + + This phrase means that the item is an absolute prohibition + of this specification. + + o "SHOULD" + + This word or the adjective "RECOMMENDED" means that there + may exist valid reasons in particular circumstances to ignore + this item, but the full implications should be understood and + the case carefully weighed before choosing a different course. + + o "SHOULD NOT" + + This phrase means that there may exist valid reasons in + particular circumstances when the listed behavior is acceptable + or even useful, but the full implications should be understood + and the case carefully weighed before implementing any behavior + described with this label. + + + + + + + + + +Droms Standards Track [Page 5] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + o "MAY" + + This word or the adjective "OPTIONAL" means that this item is + truly optional. One vendor may choose to include the item + because a particular marketplace requires it or because it + enhances the product, for example; another vendor may omit the + same item. + +1.5 Terminology + + This document uses the following terms: + + o "DHCP client" + + A DHCP client is an Internet host using DHCP to obtain + configuration parameters such as a network address. + + o "DHCP server" + + A DHCP server is an Internet host that returns configuration + parameters to DHCP clients. + + o "BOOTP relay agent" + + A BOOTP relay agent or relay agent is an Internet host or router + that passes DHCP messages between DHCP clients and DHCP servers. + DHCP is designed to use the same relay agent behavior as specified + in the BOOTP protocol specification. + + o "binding" + + A binding is a collection of configuration parameters, including + at least an IP address, associated with or "bound to" a DHCP + client. Bindings are managed by DHCP servers. + +1.6 Design goals + + The following list gives general design goals for DHCP. + + o DHCP should be a mechanism rather than a policy. DHCP must + allow local system administrators control over configuration + parameters where desired; e.g., local system administrators + should be able to enforce local policies concerning allocation + and access to local resources where desired. + + + + + + + +Droms Standards Track [Page 6] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + o Clients should require no manual configuration. Each client + should be able to discover appropriate local configuration + parameters without user intervention and incorporate those + parameters into its own configuration. + + o Networks should require no manual configuration for individual + clients. Under normal circumstances, the network manager + should not have to enter any per-client configuration + parameters. + + o DHCP should not require a server on each subnet. To allow for + scale and economy, DHCP must work across routers or through the + intervention of BOOTP relay agents. + + o A DHCP client must be prepared to receive multiple responses + to a request for configuration parameters. Some installations + may include multiple, overlapping DHCP servers to enhance + reliability and increase performance. + + o DHCP must coexist with statically configured, non-participating + hosts and with existing network protocol implementations. + + o DHCP must interoperate with the BOOTP relay agent behavior as + described by RFC 951 and by RFC 1542 [21]. + + o DHCP must provide service to existing BOOTP clients. + + The following list gives design goals specific to the transmission of + the network layer parameters. DHCP must: + + o Guarantee that any specific network address will not be in + use by more than one DHCP client at a time, + + o Retain DHCP client configuration across DHCP client reboot. A + DHCP client should, whenever possible, be assigned the same + configuration parameters (e.g., network address) in response + to each request, + + o Retain DHCP client configuration across server reboots, and, + whenever possible, a DHCP client should be assigned the same + configuration parameters despite restarts of the DHCP mechanism, + + o Allow automated assignment of configuration parameters to new + clients to avoid hand configuration for new clients, + + o Support fixed or permanent allocation of configuration + parameters to specific clients. + + + + +Droms Standards Track [Page 7] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +2. Protocol Summary + + From the client's point of view, DHCP is an extension of the BOOTP + mechanism. This behavior allows existing BOOTP clients to + interoperate with DHCP servers without requiring any change to the + clients' initialization software. RFC 1542 [2] details the + interactions between BOOTP and DHCP clients and servers [9]. There + are some new, optional transactions that optimize the interaction + between DHCP clients and servers that are described in sections 3 and + 4. + + Figure 1 gives the format of a DHCP message and table 1 describes + each of the fields in the DHCP message. The numbers in parentheses + indicate the size of each field in octets. The names for the fields + given in the figure will be used throughout this document to refer to + the fields in DHCP messages. + + There are two primary differences between DHCP and BOOTP. First, + DHCP defines mechanisms through which clients can be assigned a + network address for a finite lease, allowing for serial reassignment + of network addresses to different clients. Second, DHCP provides the + mechanism for a client to acquire all of the IP configuration + parameters that it needs in order to operate. + + DHCP introduces a small change in terminology intended to clarify the + meaning of one of the fields. What was the "vendor extensions" field + in BOOTP has been re-named the "options" field in DHCP. Similarly, + the tagged data items that were used inside the BOOTP "vendor + extensions" field, which were formerly referred to as "vendor + extensions," are now termed simply "options." + + + + + + + + + + + + + + + + + + + + + +Droms Standards Track [Page 8] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | op (1) | htype (1) | hlen (1) | hops (1) | + +---------------+---------------+---------------+---------------+ + | xid (4) | + +-------------------------------+-------------------------------+ + | secs (2) | flags (2) | + +-------------------------------+-------------------------------+ + | ciaddr (4) | + +---------------------------------------------------------------+ + | yiaddr (4) | + +---------------------------------------------------------------+ + | siaddr (4) | + +---------------------------------------------------------------+ + | giaddr (4) | + +---------------------------------------------------------------+ + | | + | chaddr (16) | + | | + | | + +---------------------------------------------------------------+ + | | + | sname (64) | + +---------------------------------------------------------------+ + | | + | file (128) | + +---------------------------------------------------------------+ + | | + | options (variable) | + +---------------------------------------------------------------+ + + Figure 1: Format of a DHCP message + + DHCP defines a new 'client identifier' option that is used to pass an + explicit client identifier to a DHCP server. This change eliminates + the overloading of the 'chaddr' field in BOOTP messages, where + 'chaddr' is used both as a hardware address for transmission of BOOTP + reply messages and as a client identifier. The 'client identifier' + is an opaque key, not to be interpreted by the server; for example, + the 'client identifier' may contain a hardware address, identical to + the contents of the 'chaddr' field, or it may contain another type of + identifier, such as a DNS name. The 'client identifier' chosen by a + DHCP client MUST be unique to that client within the subnet to which + the client is attached. If the client uses a 'client identifier' in + one message, it MUST use that same identifier in all subsequent + messages, to ensure that all servers correctly identify the client. + + + + +Droms Standards Track [Page 9] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + DHCP clarifies the interpretation of the 'siaddr' field as the + address of the server to use in the next step of the client's + bootstrap process. A DHCP server may return its own address in the + 'siaddr' field, if the server is prepared to supply the next + bootstrap service (e.g., delivery of an operating system executable + image). A DHCP server always returns its own address in the 'server + identifier' option. + + FIELD OCTETS DESCRIPTION + ----- ------ ----------- + + op 1 Message op code / message type. + 1 = BOOTREQUEST, 2 = BOOTREPLY + htype 1 Hardware address type, see ARP section in "Assigned + Numbers" RFC; e.g., '1' = 10mb ethernet. + hlen 1 Hardware address length (e.g. '6' for 10mb + ethernet). + hops 1 Client sets to zero, optionally used by relay agents + when booting via a relay agent. + xid 4 Transaction ID, a random number chosen by the + client, used by the client and server to associate + messages and responses between a client and a + server. + secs 2 Filled in by client, seconds elapsed since client + began address acquisition or renewal process. + flags 2 Flags (see figure 2). + ciaddr 4 Client IP address; only filled in if client is in + BOUND, RENEW or REBINDING state and can respond + to ARP requests. + yiaddr 4 'your' (client) IP address. + siaddr 4 IP address of next server to use in bootstrap; + returned in DHCPOFFER, DHCPACK by server. + giaddr 4 Relay agent IP address, used in booting via a + relay agent. + chaddr 16 Client hardware address. + sname 64 Optional server host name, null terminated string. + file 128 Boot file name, null terminated string; "generic" + name or null in DHCPDISCOVER, fully qualified + directory-path name in DHCPOFFER. + options var Optional parameters field. See the options + documents for a list of defined options. + + Table 1: Description of fields in a DHCP message + + The 'options' field is now variable length. A DHCP client must be + prepared to receive DHCP messages with an 'options' field of at least + length 312 octets. This requirement implies that a DHCP client must + be prepared to receive a message of up to 576 octets, the minimum IP + + + +Droms Standards Track [Page 10] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + datagram size an IP host must be prepared to accept [3]. DHCP + clients may negotiate the use of larger DHCP messages through the + 'maximum DHCP message size' option. The options field may be further + extended into the 'file' and 'sname' fields. + + In the case of a client using DHCP for initial configuration (before + the client's TCP/IP software has been completely configured), DHCP + requires creative use of the client's TCP/IP software and liberal + interpretation of RFC 1122. The TCP/IP software SHOULD accept and + forward to the IP layer any IP packets delivered to the client's + hardware address before the IP address is configured; DHCP servers + and BOOTP relay agents may not be able to deliver DHCP messages to + clients that cannot accept hardware unicast datagrams before the + TCP/IP software is configured. + + To work around some clients that cannot accept IP unicast datagrams + before the TCP/IP software is configured as discussed in the previous + paragraph, DHCP uses the 'flags' field [21]. The leftmost bit is + defined as the BROADCAST (B) flag. The semantics of this flag are + discussed in section 4.1 of this document. The remaining bits of the + flags field are reserved for future use. They MUST be set to zero by + clients and ignored by servers and relay agents. Figure 2 gives the + format of the 'flags' field. + + 1 1 1 1 1 1 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |B| MBZ | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + B: BROADCAST flag + + MBZ: MUST BE ZERO (reserved for future use) + + Figure 2: Format of the 'flags' field + +2.1 Configuration parameters repository + + The first service provided by DHCP is to provide persistent storage + of network parameters for network clients. The model of DHCP + persistent storage is that the DHCP service stores a key-value entry + for each client, where the key is some unique identifier (for + example, an IP subnet number and a unique identifier within the + subnet) and the value contains the configuration parameters for the + client. + + For example, the key might be the pair (IP-subnet-number, hardware- + address) (note that the "hardware-address" should be typed by the + + + +Droms Standards Track [Page 11] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + type of hardware to accommodate possible duplication of hardware + addresses resulting from bit-ordering problems in a mixed-media, + bridged network) allowing for serial or concurrent reuse of a + hardware address on different subnets, and for hardware addresses + that may not be globally unique. Alternately, the key might be the + pair (IP-subnet-number, hostname), allowing the server to assign + parameters intelligently to a DHCP client that has been moved to a + different subnet or has changed hardware addresses (perhaps because + the network interface failed and was replaced). The protocol defines + that the key will be (IP-subnet-number, hardware-address) unless the + client explicitly supplies an identifier using the 'client + identifier' option. A client can query the DHCP service to + retrieve its configuration parameters. The client interface to the + configuration parameters repository consists of protocol messages to + request configuration parameters and responses from the server + carrying the configuration parameters. + +2.2 Dynamic allocation of network addresses + + The second service provided by DHCP is the allocation of temporary or + permanent network (IP) addresses to clients. The basic mechanism for + the dynamic allocation of network addresses is simple: a client + requests the use of an address for some period of time. The + allocation mechanism (the collection of DHCP servers) guarantees not + to reallocate that address within the requested time and attempts to + return the same network address each time the client requests an + address. In this document, the period over which a network address + is allocated to a client is referred to as a "lease" [11]. The + client may extend its lease with subsequent requests. The client may + issue a message to release the address back to the server when the + client no longer needs the address. The client may ask for a + permanent assignment by asking for an infinite lease. Even when + assigning "permanent" addresses, a server may choose to give out + lengthy but non-infinite leases to allow detection of the fact that + the client has been retired. + + In some environments it will be necessary to reassign network + addresses due to exhaustion of available addresses. In such + environments, the allocation mechanism will reuse addresses whose + lease has expired. The server should use whatever information is + available in the configuration information repository to choose an + address to reuse. For example, the server may choose the least + recently assigned address. As a consistency check, the allocating + server SHOULD probe the reused address before allocating the address, + e.g., with an ICMP echo request, and the client SHOULD probe the + newly received address, e.g., with ARP. + + + + + +Droms Standards Track [Page 12] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +3. The Client-Server Protocol + + DHCP uses the BOOTP message format defined in RFC 951 and given in + table 1 and figure 1. The 'op' field of each DHCP message sent from + a client to a server contains BOOTREQUEST. BOOTREPLY is used in the + 'op' field of each DHCP message sent from a server to a client. + + The first four octets of the 'options' field of the DHCP message + contain the (decimal) values 99, 130, 83 and 99, respectively (this + is the same magic cookie as is defined in RFC 1497 [17]). The + remainder of the 'options' field consists of a list of tagged + parameters that are called "options". All of the "vendor extensions" + listed in RFC 1497 are also DHCP options. RFC 1533 gives the + complete set of options defined for use with DHCP. + + Several options have been defined so far. One particular option - + the "DHCP message type" option - must be included in every DHCP + message. This option defines the "type" of the DHCP message. + Additional options may be allowed, required, or not allowed, + depending on the DHCP message type. + + Throughout this document, DHCP messages that include a 'DHCP message + type' option will be referred to by the type of the message; e.g., a + DHCP message with 'DHCP message type' option type 1 will be referred + to as a "DHCPDISCOVER" message. + +3.1 Client-server interaction - allocating a network address + + The following summary of the protocol exchanges between clients and + servers refers to the DHCP messages described in table 2. The + timeline diagram in figure 3 shows the timing relationships in a + typical client-server interaction. If the client already knows its + address, some steps may be omitted; this abbreviated interaction is + described in section 3.2. + + 1. The client broadcasts a DHCPDISCOVER message on its local physical + subnet. The DHCPDISCOVER message MAY include options that suggest + values for the network address and lease duration. BOOTP relay + agents may pass the message on to DHCP servers not on the same + physical subnet. + + 2. Each server may respond with a DHCPOFFER message that includes an + available network address in the 'yiaddr' field (and other + configuration parameters in DHCP options). Servers need not + reserve the offered network address, although the protocol will + work more efficiently if the server avoids allocating the offered + network address to another client. When allocating a new address, + servers SHOULD check that the offered network address is not + + + +Droms Standards Track [Page 13] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + already in use; e.g., the server may probe the offered address + with an ICMP Echo Request. Servers SHOULD be implemented so that + network administrators MAY choose to disable probes of newly + allocated addresses. The server transmits the DHCPOFFER message + to the client, using the BOOTP relay agent if necessary. + + Message Use + ------- --- + + DHCPDISCOVER - Client broadcast to locate available servers. + + DHCPOFFER - Server to client in response to DHCPDISCOVER with + offer of configuration parameters. + + DHCPREQUEST - Client message to servers either (a) requesting + offered parameters from one server and implicitly + declining offers from all others, (b) confirming + correctness of previously allocated address after, + e.g., system reboot, or (c) extending the lease on a + particular network address. + + DHCPACK - Server to client with configuration parameters, + including committed network address. + + DHCPNAK - Server to client indicating client's notion of network + address is incorrect (e.g., client has moved to new + subnet) or client's lease as expired + + DHCPDECLINE - Client to server indicating network address is already + in use. + + DHCPRELEASE - Client to server relinquishing network address and + cancelling remaining lease. + + DHCPINFORM - Client to server, asking only for local configuration + parameters; client already has externally configured + network address. + + Table 2: DHCP messages + + + + + + + + + + + + +Droms Standards Track [Page 14] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + Server Client Server + (not selected) (selected) + + v v v + | | | + | Begins initialization | + | | | + | _____________/|\____________ | + |/DHCPDISCOVER | DHCPDISCOVER \| + | | | + Determines | Determines + configuration | configuration + | | | + |\ | ____________/ | + | \________ | /DHCPOFFER | + | DHCPOFFER\ |/ | + | \ | | + | Collects replies | + | \| | + | Selects configuration | + | | | + | _____________/|\____________ | + |/ DHCPREQUEST | DHCPREQUEST\ | + | | | + | | Commits configuration + | | | + | | _____________/| + | |/ DHCPACK | + | | | + | Initialization complete | + | | | + . . . + . . . + | | | + | Graceful shutdown | + | | | + | |\ ____________ | + | | DHCPRELEASE \| + | | | + | | Discards lease + | | | + v v v + Figure 3: Timeline diagram of messages exchanged between DHCP + client and servers when allocating a new network address + + + + + + + +Droms Standards Track [Page 15] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + 3. The client receives one or more DHCPOFFER messages from one or more + servers. The client may choose to wait for multiple responses. + The client chooses one server from which to request configuration + parameters, based on the configuration parameters offered in the + DHCPOFFER messages. The client broadcasts a DHCPREQUEST message + that MUST include the 'server identifier' option to indicate which + server it has selected, and that MAY include other options + specifying desired configuration values. The 'requested IP + address' option MUST be set to the value of 'yiaddr' in the + DHCPOFFER message from the server. This DHCPREQUEST message is + broadcast and relayed through DHCP/BOOTP relay agents. To help + ensure that any BOOTP relay agents forward the DHCPREQUEST message + to the same set of DHCP servers that received the original + DHCPDISCOVER message, the DHCPREQUEST message MUST use the same + value in the DHCP message header's 'secs' field and be sent to the + same IP broadcast address as the original DHCPDISCOVER message. + The client times out and retransmits the DHCPDISCOVER message if + the client receives no DHCPOFFER messages. + + 4. The servers receive the DHCPREQUEST broadcast from the client. + Those servers not selected by the DHCPREQUEST message use the + message as notification that the client has declined that server's + offer. The server selected in the DHCPREQUEST message commits the + binding for the client to persistent storage and responds with a + DHCPACK message containing the configuration parameters for the + requesting client. The combination of 'client identifier' or + 'chaddr' and assigned network address constitute a unique + identifier for the client's lease and are used by both the client + and server to identify a lease referred to in any DHCP messages. + Any configuration parameters in the DHCPACK message SHOULD NOT + conflict with those in the earlier DHCPOFFER message to which the + client is responding. The server SHOULD NOT check the offered + network address at this point. The 'yiaddr' field in the DHCPACK + messages is filled in with the selected network address. + + If the selected server is unable to satisfy the DHCPREQUEST message + (e.g., the requested network address has been allocated), the + server SHOULD respond with a DHCPNAK message. + + A server MAY choose to mark addresses offered to clients in + DHCPOFFER messages as unavailable. The server SHOULD mark an + address offered to a client in a DHCPOFFER message as available if + the server receives no DHCPREQUEST message from that client. + + 5. The client receives the DHCPACK message with configuration + parameters. The client SHOULD perform a final check on the + parameters (e.g., ARP for allocated network address), and notes the + duration of the lease specified in the DHCPACK message. At this + + + +Droms Standards Track [Page 16] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + point, the client is configured. If the client detects that the + address is already in use (e.g., through the use of ARP), the + client MUST send a DHCPDECLINE message to the server and restarts + the configuration process. The client SHOULD wait a minimum of ten + seconds before restarting the configuration process to avoid + excessive network traffic in case of looping. + + If the client receives a DHCPNAK message, the client restarts the + configuration process. + + The client times out and retransmits the DHCPREQUEST message if the + client receives neither a DHCPACK or a DHCPNAK message. The client + retransmits the DHCPREQUEST according to the retransmission + algorithm in section 4.1. The client should choose to retransmit + the DHCPREQUEST enough times to give adequate probability of + contacting the server without causing the client (and the user of + that client) to wait overly long before giving up; e.g., a client + retransmitting as described in section 4.1 might retransmit the + DHCPREQUEST message four times, for a total delay of 60 seconds, + before restarting the initialization procedure. If the client + receives neither a DHCPACK or a DHCPNAK message after employing the + retransmission algorithm, the client reverts to INIT state and + restarts the initialization process. The client SHOULD notify the + user that the initialization process has failed and is restarting. + + 6. The client may choose to relinquish its lease on a network address + by sending a DHCPRELEASE message to the server. The client + identifies the lease to be released with its 'client identifier', + or 'chaddr' and network address in the DHCPRELEASE message. If the + client used a 'client identifier' when it obtained the lease, it + MUST use the same 'client identifier' in the DHCPRELEASE message. + +3.2 Client-server interaction - reusing a previously allocated network + address + + If a client remembers and wishes to reuse a previously allocated + network address, a client may choose to omit some of the steps + described in the previous section. The timeline diagram in figure 4 + shows the timing relationships in a typical client-server interaction + for a client reusing a previously allocated network address. + + + + + + + + + + + +Droms Standards Track [Page 17] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + 1. The client broadcasts a DHCPREQUEST message on its local subnet. + The message includes the client's network address in the + 'requested IP address' option. As the client has not received its + network address, it MUST NOT fill in the 'ciaddr' field. BOOTP + relay agents pass the message on to DHCP servers not on the same + subnet. If the client used a 'client identifier' to obtain its + address, the client MUST use the same 'client identifier' in the + DHCPREQUEST message. + + 2. Servers with knowledge of the client's configuration parameters + respond with a DHCPACK message to the client. Servers SHOULD NOT + check that the client's network address is already in use; the + client may respond to ICMP Echo Request messages at this point. + + Server Client Server + + v v v + | | | + | Begins | + | initialization | + | | | + | /|\ | + | _________ __/ | \__________ | + | /DHCPREQU EST | DHCPREQUEST\ | + |/ | \| + | | | + Locates | Locates + configuration | configuration + | | | + |\ | /| + | \ | ___________/ | + | \ | / DHCPACK | + | \ _______ |/ | + | DHCPACK\ | | + | Initialization | + | complete | + | \| | + | | | + | (Subsequent | + | DHCPACKS | + | ignored) | + | | | + | | | + v v v + + Figure 4: Timeline diagram of messages exchanged between DHCP + client and servers when reusing a previously allocated + network address + + + +Droms Standards Track [Page 18] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + If the client's request is invalid (e.g., the client has moved + to a new subnet), servers SHOULD respond with a DHCPNAK message to + the client. Servers SHOULD NOT respond if their information is not + guaranteed to be accurate. For example, a server that identifies a + request for an expired binding that is owned by another server SHOULD + NOT respond with a DHCPNAK unless the servers are using an explicit + mechanism to maintain coherency among the servers. + + If 'giaddr' is 0x0 in the DHCPREQUEST message, the client is on + the same subnet as the server. The server MUST + broadcast the DHCPNAK message to the 0xffffffff broadcast address + because the client may not have a correct network address or subnet + mask, and the client may not be answering ARP requests. + Otherwise, the server MUST send the DHCPNAK message to the IP + address of the BOOTP relay agent, as recorded in 'giaddr'. The + relay agent will, in turn, forward the message directly to the + client's hardware address, so that the DHCPNAK can be delivered even + if the client has moved to a new network. + + 3. The client receives the DHCPACK message with configuration + parameters. The client performs a final check on the parameters + (as in section 3.1), and notes the duration of the lease specified + in the DHCPACK message. The specific lease is implicitly identified + by the 'client identifier' or 'chaddr' and the network address. At + this point, the client is configured. + + If the client detects that the IP address in the DHCPACK message + is already in use, the client MUST send a DHCPDECLINE message to the + server and restarts the configuration process by requesting a + new network address. This action corresponds to the client + moving to the INIT state in the DHCP state diagram, which is + described in section 4.4. + + If the client receives a DHCPNAK message, it cannot reuse its + remembered network address. It must instead request a new + address by restarting the configuration process, this time + using the (non-abbreviated) procedure described in section + 3.1. This action also corresponds to the client moving to + the INIT state in the DHCP state diagram. + + The client times out and retransmits the DHCPREQUEST message if + the client receives neither a DHCPACK nor a DHCPNAK message. The + client retransmits the DHCPREQUEST according to the retransmission + algorithm in section 4.1. The client should choose to retransmit + the DHCPREQUEST enough times to give adequate probability of + contacting the server without causing the client (and the user of + that client) to wait overly long before giving up; e.g., a client + retransmitting as described in section 4.1 might retransmit the + + + +Droms Standards Track [Page 19] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + DHCPREQUEST message four times, for a total delay of 60 seconds, + before restarting the initialization procedure. If the client + receives neither a DHCPACK or a DHCPNAK message after employing + the retransmission algorithm, the client MAY choose to use the + previously allocated network address and configuration parameters + for the remainder of the unexpired lease. This corresponds to + moving to BOUND state in the client state transition diagram shown + in figure 5. + + 4. The client may choose to relinquish its lease on a network + address by sending a DHCPRELEASE message to the server. The + client identifies the lease to be released with its + 'client identifier', or 'chaddr' and network address in the + DHCPRELEASE message. + + Note that in this case, where the client retains its network + address locally, the client will not normally relinquish its + lease during a graceful shutdown. Only in the case where the + client explicitly needs to relinquish its lease, e.g., the client + is about to be moved to a different subnet, will the client send + a DHCPRELEASE message. + +3.3 Interpretation and representation of time values + + A client acquires a lease for a network address for a fixed period of + time (which may be infinite). Throughout the protocol, times are to + be represented in units of seconds. The time value of 0xffffffff is + reserved to represent "infinity". + + As clients and servers may not have synchronized clocks, times are + represented in DHCP messages as relative times, to be interpreted + with respect to the client's local clock. Representing relative + times in units of seconds in an unsigned 32 bit word gives a range of + relative times from 0 to approximately 100 years, which is sufficient + for the relative times to be measured using DHCP. + + The algorithm for lease duration interpretation given in the previous + paragraph assumes that client and server clocks are stable relative + to each other. If there is drift between the two clocks, the server + may consider the lease expired before the client does. To + compensate, the server may return a shorter lease duration to the + client than the server commits to its local database of client + information. + +3.4 Obtaining parameters with externally configured network address + + If a client has obtained a network address through some other means + (e.g., manual configuration), it may use a DHCPINFORM request message + + + +Droms Standards Track [Page 20] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + to obtain other local configuration parameters. Servers receiving a + DHCPINFORM message construct a DHCPACK message with any local + configuration parameters appropriate for the client without: + allocating a new address, checking for an existing binding, filling + in 'yiaddr' or including lease time parameters. The servers SHOULD + unicast the DHCPACK reply to the address given in the 'ciaddr' field + of the DHCPINFORM message. + + The server SHOULD check the network address in a DHCPINFORM message + for consistency, but MUST NOT check for an existing lease. The + server forms a DHCPACK message containing the configuration + parameters for the requesting client and sends the DHCPACK message + directly to the client. + +3.5 Client parameters in DHCP + + Not all clients require initialization of all parameters listed in + Appendix A. Two techniques are used to reduce the number of + parameters transmitted from the server to the client. First, most of + the parameters have defaults defined in the Host Requirements RFCs; + if the client receives no parameters from the server that override + the defaults, a client uses those default values. Second, in its + initial DHCPDISCOVER or DHCPREQUEST message, a client may provide the + server with a list of specific parameters the client is interested + in. If the client includes a list of parameters in a DHCPDISCOVER + message, it MUST include that list in any subsequent DHCPREQUEST + messages. + + The client SHOULD include the 'maximum DHCP message size' option to + let the server know how large the server may make its DHCP messages. + The parameters returned to a client may still exceed the space + allocated to options in a DHCP message. In this case, two additional + options flags (which must appear in the 'options' field of the + message) indicate that the 'file' and 'sname' fields are to be used + for options. + + The client can inform the server which configuration parameters the + client is interested in by including the 'parameter request list' + option. The data portion of this option explicitly lists the options + requested by tag number. + + In addition, the client may suggest values for the network address + and lease time in the DHCPDISCOVER message. The client may include + the 'requested IP address' option to suggest that a particular IP + address be assigned, and may include the 'IP address lease time' + option to suggest the lease time it would like. Other options + representing "hints" at configuration parameters are allowed in a + DHCPDISCOVER or DHCPREQUEST message. However, additional options may + + + +Droms Standards Track [Page 21] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + be ignored by servers, and multiple servers may, therefore, not + return identical values for some options. The 'requested IP address' + option is to be filled in only in a DHCPREQUEST message when the + client is verifying network parameters obtained previously. The + client fills in the 'ciaddr' field only when correctly configured + with an IP address in BOUND, RENEWING or REBINDING state. + + If a server receives a DHCPREQUEST message with an invalid 'requested + IP address', the server SHOULD respond to the client with a DHCPNAK + message and may choose to report the problem to the system + administrator. The server may include an error message in the + 'message' option. + +3.6 Use of DHCP in clients with multiple interfaces + + A client with multiple network interfaces must use DHCP through each + interface independently to obtain configuration information + parameters for those separate interfaces. + +3.7 When clients should use DHCP + + A client SHOULD use DHCP to reacquire or verify its IP address and + network parameters whenever the local network parameters may have + changed; e.g., at system boot time or after a disconnection from the + local network, as the local network configuration may change without + the client's or user's knowledge. + + If a client has knowledge of a previous network address and is unable + to contact a local DHCP server, the client may continue to use the + previous network address until the lease for that address expires. + If the lease expires before the client can contact a DHCP server, the + client must immediately discontinue use of the previous network + address and may inform local users of the problem. + +4. Specification of the DHCP client-server protocol + + In this section, we assume that a DHCP server has a block of network + addresses from which it can satisfy requests for new addresses. Each + server also maintains a database of allocated addresses and leases in + local permanent storage. + +4.1 Constructing and sending DHCP messages + + DHCP clients and servers both construct DHCP messages by filling in + fields in the fixed format section of the message and appending + tagged data items in the variable length option area. The options + area includes first a four-octet 'magic cookie' (which was described + in section 3), followed by the options. The last option must always + + + +Droms Standards Track [Page 22] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + be the 'end' option. + + DHCP uses UDP as its transport protocol. DHCP messages from a client + to a server are sent to the 'DHCP server' port (67), and DHCP + messages from a server to a client are sent to the 'DHCP client' port + (68). A server with multiple network address (e.g., a multi-homed + host) MAY use any of its network addresses in outgoing DHCP messages. + + The 'server identifier' field is used both to identify a DHCP server + in a DHCP message and as a destination address from clients to + servers. A server with multiple network addresses MUST be prepared + to to accept any of its network addresses as identifying that server + in a DHCP message. To accommodate potentially incomplete network + connectivity, a server MUST choose an address as a 'server + identifier' that, to the best of the server's knowledge, is reachable + from the client. For example, if the DHCP server and the DHCP client + are connected to the same subnet (i.e., the 'giaddr' field in the + message from the client is zero), the server SHOULD select the IP + address the server is using for communication on that subnet as the + 'server identifier'. If the server is using multiple IP addresses on + that subnet, any such address may be used. If the server has + received a message through a DHCP relay agent, the server SHOULD + choose an address from the interface on which the message was + recieved as the 'server identifier' (unless the server has other, + better information on which to make its choice). DHCP clients MUST + use the IP address provided in the 'server identifier' option for any + unicast requests to the DHCP server. + + DHCP messages broadcast by a client prior to that client obtaining + its IP address must have the source address field in the IP header + set to 0. + + If the 'giaddr' field in a DHCP message from a client is non-zero, + the server sends any return messages to the 'DHCP server' port on the + BOOTP relay agent whose address appears in 'giaddr'. If the 'giaddr' + field is zero and the 'ciaddr' field is nonzero, then the server + unicasts DHCPOFFER and DHCPACK messages to the address in 'ciaddr'. + If 'giaddr' is zero and 'ciaddr' is zero, and the broadcast bit is + set, then the server broadcasts DHCPOFFER and DHCPACK messages to + 0xffffffff. If the broadcast bit is not set and 'giaddr' is zero and + 'ciaddr' is zero, then the server unicasts DHCPOFFER and DHCPACK + messages to the client's hardware address and 'yiaddr' address. In + all cases, when 'giaddr' is zero, the server broadcasts any DHCPNAK + messages to 0xffffffff. + + If the options in a DHCP message extend into the 'sname' and 'file' + fields, the 'option overload' option MUST appear in the 'options' + field, with value 1, 2 or 3, as specified in RFC 1533. If the + + + +Droms Standards Track [Page 23] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + 'option overload' option is present in the 'options' field, the + options in the 'options' field MUST be terminated by an 'end' option, + and MAY contain one or more 'pad' options to fill the options field. + The options in the 'sname' and 'file' fields (if in use as indicated + by the 'options overload' option) MUST begin with the first octet of + the field, MUST be terminated by an 'end' option, and MUST be + followed by 'pad' options to fill the remainder of the field. Any + individual option in the 'options', 'sname' and 'file' fields MUST be + entirely contained in that field. The options in the 'options' field + MUST be interpreted first, so that any 'option overload' options may + be interpreted. The 'file' field MUST be interpreted next (if the + 'option overload' option indicates that the 'file' field contains + DHCP options), followed by the 'sname' field. + + The values to be passed in an 'option' tag may be too long to fit in + the 255 octets available to a single option (e.g., a list of routers + in a 'router' option [21]). Options may appear only once, unless + otherwise specified in the options document. The client concatenates + the values of multiple instances of the same option into a single + parameter list for configuration. + + DHCP clients are responsible for all message retransmission. The + client MUST adopt a retransmission strategy that incorporates a + randomized exponential backoff algorithm to determine the delay + between retransmissions. The delay between retransmissions SHOULD be + chosen to allow sufficient time for replies from the server to be + delivered based on the characteristics of the internetwork between + the client and the server. For example, in a 10Mb/sec Ethernet + internetwork, the delay before the first retransmission SHOULD be 4 + seconds randomized by the value of a uniform random number chosen + from the range -1 to +1. Clients with clocks that provide resolution + granularity of less than one second may choose a non-integer + randomization value. The delay before the next retransmission SHOULD + be 8 seconds randomized by the value of a uniform number chosen from + the range -1 to +1. The retransmission delay SHOULD be doubled with + subsequent retransmissions up to a maximum of 64 seconds. The client + MAY provide an indication of retransmission attempts to the user as + an indication of the progress of the configuration process. + + The 'xid' field is used by the client to match incoming DHCP messages + with pending requests. A DHCP client MUST choose 'xid's in such a + way as to minimize the chance of using an 'xid' identical to one used + by another client. For example, a client may choose a different, + random initial 'xid' each time the client is rebooted, and + subsequently use sequential 'xid's until the next reboot. Selecting + a new 'xid' for each retransmission is an implementation decision. A + client may choose to reuse the same 'xid' or select a new 'xid' for + each retransmitted message. + + + +Droms Standards Track [Page 24] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + Normally, DHCP servers and BOOTP relay agents attempt to deliver + DHCPOFFER, DHCPACK and DHCPNAK messages directly to the client using + uicast delivery. The IP destination address (in the IP header) is + set to the DHCP 'yiaddr' address and the link-layer destination + address is set to the DHCP 'chaddr' address. Unfortunately, some + client implementations are unable to receive such unicast IP + datagrams until the implementation has been configured with a valid + IP address (leading to a deadlock in which the client's IP address + cannot be delivered until the client has been configured with an IP + address). + + A client that cannot receive unicast IP datagrams until its protocol + software has been configured with an IP address SHOULD set the + BROADCAST bit in the 'flags' field to 1 in any DHCPDISCOVER or + DHCPREQUEST messages that client sends. The BROADCAST bit will + provide a hint to the DHCP server and BOOTP relay agent to broadcast + any messages to the client on the client's subnet. A client that can + receive unicast IP datagrams before its protocol software has been + configured SHOULD clear the BROADCAST bit to 0. The BOOTP + clarifications document discusses the ramifications of the use of the + BROADCAST bit [21]. + + A server or relay agent sending or relaying a DHCP message directly + to a DHCP client (i.e., not to a relay agent specified in the + 'giaddr' field) SHOULD examine the BROADCAST bit in the 'flags' + field. If this bit is set to 1, the DHCP message SHOULD be sent as + an IP broadcast using an IP broadcast address (preferably 0xffffffff) + as the IP destination address and the link-layer broadcast address as + the link-layer destination address. If the BROADCAST bit is cleared + to 0, the message SHOULD be sent as an IP unicast to the IP address + specified in the 'yiaddr' field and the link-layer address specified + in the 'chaddr' field. If unicasting is not possible, the message + MAY be sent as an IP broadcast using an IP broadcast address + (preferably 0xffffffff) as the IP destination address and the link- + layer broadcast address as the link-layer destination address. + +4.2 DHCP server administrative controls + + DHCP servers are not required to respond to every DHCPDISCOVER and + DHCPREQUEST message they receive. For example, a network + administrator, to retain stringent control over the clients attached + to the network, may choose to configure DHCP servers to respond only + to clients that have been previously registered through some external + mechanism. The DHCP specification describes only the interactions + between clients and servers when the clients and servers choose to + interact; it is beyond the scope of the DHCP specification to + describe all of the administrative controls that system + administrators might want to use. Specific DHCP server + + + +Droms Standards Track [Page 25] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + implementations may incorporate any controls or policies desired by a + network administrator. + + In some environments, a DHCP server will have to consider the values + of the vendor class options included in DHCPDISCOVER or DHCPREQUEST + messages when determining the correct parameters for a particular + client. + + A DHCP server needs to use some unique identifier to associate a + client with its lease. The client MAY choose to explicitly provide + the identifier through the 'client identifier' option. If the client + supplies a 'client identifier', the client MUST use the same 'client + identifier' in all subsequent messages, and the server MUST use that + identifier to identify the client. If the client does not provide a + 'client identifier' option, the server MUST use the contents of the + 'chaddr' field to identify the client. It is crucial for a DHCP + client to use an identifier unique within the subnet to which the + client is attached in the 'client identifier' option. Use of + 'chaddr' as the client's unique identifier may cause unexpected + results, as that identifier may be associated with a hardware + interface that could be moved to a new client. Some sites may choose + to use a manufacturer's serial number as the 'client identifier', to + avoid unexpected changes in a clients network address due to transfer + of hardware interfaces among computers. Sites may also choose to use + a DNS name as the 'client identifier', causing address leases to be + associated with the DNS name rather than a specific hardware box. + + DHCP clients are free to use any strategy in selecting a DHCP server + among those from which the client receives a DHCPOFFER message. The + client implementation of DHCP SHOULD provide a mechanism for the user + to select directly the 'vendor class identifier' values. + +4.3 DHCP server behavior + + A DHCP server processes incoming DHCP messages from a client based on + the current state of the binding for that client. A DHCP server can + receive the following messages from a client: + + o DHCPDISCOVER + + o DHCPREQUEST + + o DHCPDECLINE + + o DHCPRELEASE + + o DHCPINFORM + + + + +Droms Standards Track [Page 26] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + Table 3 gives the use of the fields and options in a DHCP message by + a server. The remainder of this section describes the action of the + DHCP server for each possible incoming message. + +4.3.1 DHCPDISCOVER message + + When a server receives a DHCPDISCOVER message from a client, the + server chooses a network address for the requesting client. If no + address is available, the server may choose to report the problem to + the system administrator. If an address is available, the new address + SHOULD be chosen as follows: + + o The client's current address as recorded in the client's current + binding, ELSE + + o The client's previous address as recorded in the client's (now + expired or released) binding, if that address is in the server's + pool of available addresses and not already allocated, ELSE + + o The address requested in the 'Requested IP Address' option, if that + address is valid and not already allocated, ELSE + + o A new address allocated from the server's pool of available + addresses; the address is selected based on the subnet from which + the message was received (if 'giaddr' is 0) or on the address of + the relay agent that forwarded the message ('giaddr' when not 0). + + As described in section 4.2, a server MAY, for administrative + reasons, assign an address other than the one requested, or may + refuse to allocate an address to a particular client even though free + addresses are available. + + Note that, in some network architectures (e.g., internets with more + than one IP subnet assigned to a physical network segment), it may be + the case that the DHCP client should be assigned an address from a + different subnet than the address recorded in 'giaddr'. Thus, DHCP + does not require that the client be assigned as address from the + subnet in 'giaddr'. A server is free to choose some other subnet, + and it is beyond the scope of the DHCP specification to describe ways + in which the assigned IP address might be chosen. + + While not required for correct operation of DHCP, the server SHOULD + NOT reuse the selected network address before the client responds to + the server's DHCPOFFER message. The server may choose to record the + address as offered to the client. + + The server must also choose an expiration time for the lease, as + follows: + + + +Droms Standards Track [Page 27] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + o IF the client has not requested a specific lease in the + DHCPDISCOVER message and the client already has an assigned network + address, the server returns the lease expiration time previously + assigned to that address (note that the client must explicitly + request a specific lease to extend the expiration time on a + previously assigned address), ELSE + + o IF the client has not requested a specific lease in the + DHCPDISCOVER message and the client does not have an assigned + network address, the server assigns a locally configured default + lease time, ELSE + + o IF the client has requested a specific lease in the DHCPDISCOVER + message (regardless of whether the client has an assigned network + address), the server may choose either to return the requested + lease (if the lease is acceptable to local policy) or select + another lease. + +Field DHCPOFFER DHCPACK DHCPNAK +----- --------- ------- ------- +'op' BOOTREPLY BOOTREPLY BOOTREPLY +'htype' (From "Assigned Numbers" RFC) +'hlen' (Hardware address length in octets) +'hops' 0 0 0 +'xid' 'xid' from client 'xid' from client 'xid' from client + DHCPDISCOVER DHCPREQUEST DHCPREQUEST + message message message +'secs' 0 0 0 +'ciaddr' 0 'ciaddr' from 0 + DHCPREQUEST or 0 +'yiaddr' IP address offered IP address 0 + to client assigned to client +'siaddr' IP address of next IP address of next 0 + bootstrap server bootstrap server +'flags' 'flags' from 'flags' from 'flags' from + client DHCPDISCOVER client DHCPREQUEST client DHCPREQUEST + message message message +'giaddr' 'giaddr' from 'giaddr' from 'giaddr' from + client DHCPDISCOVER client DHCPREQUEST client DHCPREQUEST + message message message +'chaddr' 'chaddr' from 'chaddr' from 'chaddr' from + client DHCPDISCOVER client DHCPREQUEST client DHCPREQUEST + message message message +'sname' Server host name Server host name (unused) + or options or options +'file' Client boot file Client boot file (unused) + name or options name or options +'options' options options + + + +Droms Standards Track [Page 28] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +Option DHCPOFFER DHCPACK DHCPNAK +------ --------- ------- ------- +Requested IP address MUST NOT MUST NOT MUST NOT +IP address lease time MUST MUST (DHCPREQUEST) MUST NOT + MUST NOT (DHCPINFORM) +Use 'file'/'sname' fields MAY MAY MUST NOT +DHCP message type DHCPOFFER DHCPACK DHCPNAK +Parameter request list MUST NOT MUST NOT MUST NOT +Message SHOULD SHOULD SHOULD +Client identifier MUST NOT MUST NOT MAY +Vendor class identifier MAY MAY MAY +Server identifier MUST MUST MUST +Maximum message size MUST NOT MUST NOT MUST NOT +All others MAY MAY MUST NOT + + Table 3: Fields and options used by DHCP servers + + Once the network address and lease have been determined, the server + constructs a DHCPOFFER message with the offered configuration + parameters. It is important for all DHCP servers to return the same + parameters (with the possible exception of a newly allocated network + address) to ensure predictable client behavior regardless of which + server the client selects. The configuration parameters MUST be + selected by applying the following rules in the order given below. + The network administrator is responsible for configuring multiple + DHCP servers to ensure uniform responses from those servers. The + server MUST return to the client: + + o The client's network address, as determined by the rules given + earlier in this section, + + o The expiration time for the client's lease, as determined by the + rules given earlier in this section, + + o Parameters requested by the client, according to the following + rules: + + -- IF the server has been explicitly configured with a default + value for the parameter, the server MUST include that value + in an appropriate option in the 'option' field, ELSE + + -- IF the server recognizes the parameter as a parameter + defined in the Host Requirements Document, the server MUST + include the default value for that parameter as given in the + Host Requirements Document in an appropriate option in the + 'option' field, ELSE + + -- The server MUST NOT return a value for that parameter, + + + +Droms Standards Track [Page 29] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + The server MUST supply as many of the requested parameters as + possible and MUST omit any parameters it cannot provide. The + server MUST include each requested parameter only once unless + explicitly allowed in the DHCP Options and BOOTP Vendor + Extensions document. + + o Any parameters from the existing binding that differ from the Host + Requirements Document defaults, + + o Any parameters specific to this client (as identified by + the contents of 'chaddr' or 'client identifier' in the DHCPDISCOVER + or DHCPREQUEST message), e.g., as configured by the network + administrator, + + o Any parameters specific to this client's class (as identified + by the contents of the 'vendor class identifier' + option in the DHCPDISCOVER or DHCPREQUEST message), + e.g., as configured by the network administrator; the parameters + MUST be identified by an exact match between the client's vendor + class identifiers and the client's classes identified in the + server, + + o Parameters with non-default values on the client's subnet. + + The server MAY choose to return the 'vendor class identifier' used to + determine the parameters in the DHCPOFFER message to assist the + client in selecting which DHCPOFFER to accept. The server inserts + the 'xid' field from the DHCPDISCOVER message into the 'xid' field of + the DHCPOFFER message and sends the DHCPOFFER message to the + requesting client. + +4.3.2 DHCPREQUEST message + + A DHCPREQUEST message may come from a client responding to a + DHCPOFFER message from a server, from a client verifying a previously + allocated IP address or from a client extending the lease on a + network address. If the DHCPREQUEST message contains a 'server + identifier' option, the message is in response to a DHCPOFFER + message. Otherwise, the message is a request to verify or extend an + existing lease. If the client uses a 'client identifier' in a + DHCPREQUEST message, it MUST use that same 'client identifier' in all + subsequent messages. If the client included a list of requested + parameters in a DHCPDISCOVER message, it MUST include that list in + all subsequent messages. + + + + + + + +Droms Standards Track [Page 30] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + Any configuration parameters in the DHCPACK message SHOULD NOT + conflict with those in the earlier DHCPOFFER message to which the + client is responding. The client SHOULD use the parameters in the + DHCPACK message for configuration. + + Clients send DHCPREQUEST messages as follows: + + o DHCPREQUEST generated during SELECTING state: + + Client inserts the address of the selected server in 'server + identifier', 'ciaddr' MUST be zero, 'requested IP address' MUST be + filled in with the yiaddr value from the chosen DHCPOFFER. + + Note that the client may choose to collect several DHCPOFFER + messages and select the "best" offer. The client indicates its + selection by identifying the offering server in the DHCPREQUEST + message. If the client receives no acceptable offers, the client + may choose to try another DHCPDISCOVER message. Therefore, the + servers may not receive a specific DHCPREQUEST from which they can + decide whether or not the client has accepted the offer. Because + the servers have not committed any network address assignments on + the basis of a DHCPOFFER, servers are free to reuse offered + network addresses in response to subsequent requests. As an + implementation detail, servers SHOULD NOT reuse offered addresses + and may use an implementation-specific timeout mechanism to decide + when to reuse an offered address. + + o DHCPREQUEST generated during INIT-REBOOT state: + + 'server identifier' MUST NOT be filled in, 'requested IP address' + option MUST be filled in with client's notion of its previously + assigned address. 'ciaddr' MUST be zero. The client is seeking to + verify a previously allocated, cached configuration. Server SHOULD + send a DHCPNAK message to the client if the 'requested IP address' + is incorrect, or is on the wrong network. + + Determining whether a client in the INIT-REBOOT state is on the + correct network is done by examining the contents of 'giaddr', the + 'requested IP address' option, and a database lookup. If the DHCP + server detects that the client is on the wrong net (i.e., the + result of applying the local subnet mask or remote subnet mask (if + 'giaddr' is not zero) to 'requested IP address' option value + doesn't match reality), then the server SHOULD send a DHCPNAK + message to the client. + + + + + + + +Droms Standards Track [Page 31] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + If the network is correct, then the DHCP server should check if + the client's notion of its IP address is correct. If not, then the + server SHOULD send a DHCPNAK message to the client. If the DHCP + server has no record of this client, then it MUST remain silent, + and MAY output a warning to the network administrator. This + behavior is necessary for peaceful coexistence of non- + communicating DHCP servers on the same wire. + + If 'giaddr' is 0x0 in the DHCPREQUEST message, the client is on + the same subnet as the server. The server MUST broadcast the + DHCPNAK message to the 0xffffffff broadcast address because the + client may not have a correct network address or subnet mask, and + the client may not be answering ARP requests. + + If 'giaddr' is set in the DHCPREQUEST message, the client is on a + different subnet. The server MUST set the broadcast bit in the + DHCPNAK, so that the relay agent will broadcast the DHCPNAK to the + client, because the client may not have a correct network address + or subnet mask, and the client may not be answering ARP requests. + + o DHCPREQUEST generated during RENEWING state: + + 'server identifier' MUST NOT be filled in, 'requested IP address' + option MUST NOT be filled in, 'ciaddr' MUST be filled in with + client's IP address. In this situation, the client is completely + configured, and is trying to extend its lease. This message will + be unicast, so no relay agents will be involved in its + transmission. Because 'giaddr' is therefore not filled in, the + DHCP server will trust the value in 'ciaddr', and use it when + replying to the client. + + A client MAY choose to renew or extend its lease prior to T1. The + server may choose not to extend the lease (as a policy decision by + the network administrator), but should return a DHCPACK message + regardless. + + o DHCPREQUEST generated during REBINDING state: + + 'server identifier' MUST NOT be filled in, 'requested IP address' + option MUST NOT be filled in, 'ciaddr' MUST be filled in with + client's IP address. In this situation, the client is completely + configured, and is trying to extend its lease. This message MUST + be broadcast to the 0xffffffff IP broadcast address. The DHCP + server SHOULD check 'ciaddr' for correctness before replying to + the DHCPREQUEST. + + + + + + +Droms Standards Track [Page 32] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + The DHCPREQUEST from a REBINDING client is intended to accommodate + sites that have multiple DHCP servers and a mechanism for + maintaining consistency among leases managed by multiple servers. + A DHCP server MAY extend a client's lease only if it has local + administrative authority to do so. + +4.3.3 DHCPDECLINE message + + If the server receives a DHCPDECLINE message, the client has + discovered through some other means that the suggested network + address is already in use. The server MUST mark the network address + as not available and SHOULD notify the local system administrator of + a possible configuration problem. + +4.3.4 DHCPRELEASE message + + Upon receipt of a DHCPRELEASE message, the server marks the network + address as not allocated. The server SHOULD retain a record of the + client's initialization parameters for possible reuse in response to + subsequent requests from the client. + +4.3.5 DHCPINFORM message + + The server responds to a DHCPINFORM message by sending a DHCPACK + message directly to the address given in the 'ciaddr' field of the + DHCPINFORM message. The server MUST NOT send a lease expiration time + to the client and SHOULD NOT fill in 'yiaddr'. The server includes + other parameters in the DHCPACK message as defined in section 4.3.1. + +4.3.6 Client messages + + Table 4 details the differences between messages from clients in + various states. + + --------------------------------------------------------------------- + | |INIT-REBOOT |SELECTING |RENEWING |REBINDING | + --------------------------------------------------------------------- + |broad/unicast |broadcast |broadcast |unicast |broadcast | + |server-ip |MUST NOT |MUST |MUST NOT |MUST NOT | + |requested-ip |MUST |MUST |MUST NOT |MUST NOT | + |ciaddr |zero |zero |IP address |IP address| + --------------------------------------------------------------------- + + Table 4: Client messages from different states + + + + + + + +Droms Standards Track [Page 33] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +4.4 DHCP client behavior + + Figure 5 gives a state-transition diagram for a DHCP client. A + client can receive the following messages from a server: + + o DHCPOFFER + + o DHCPACK + + o DHCPNAK + + The DHCPINFORM message is not shown in figure 5. A client simply + sends the DHCPINFORM and waits for DHCPACK messages. Once the client + has selected its parameters, it has completed the configuration + process. + + Table 5 gives the use of the fields and options in a DHCP message by + a client. The remainder of this section describes the action of the + DHCP client for each possible incoming message. The description in + the following section corresponds to the full configuration procedure + previously described in section 3.1, and the text in the subsequent + section corresponds to the abbreviated configuration procedure + described in section 3.2. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Droms Standards Track [Page 34] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + -------- ------- +| | +-------------------------->| |<-------------------+ +| INIT- | | +-------------------->| INIT | | +| REBOOT |DHCPNAK/ +---------->| |<---+ | +| |Restart| | ------- | | + -------- | DHCPNAK/ | | | + | Discard offer | -/Send DHCPDISCOVER | +-/Send DHCPREQUEST | | | + | | | DHCPACK v | | + ----------- | (not accept.)/ ----------- | | +| | | Send DHCPDECLINE | | | +| REBOOTING | | | | SELECTING |<----+ | +| | | / | | |DHCPOFFER/ | + ----------- | / ----------- | |Collect | + | | / | | | replies | +DHCPACK/ | / +----------------+ +-------+ | +Record lease, set| | v Select offer/ | +timers T1, T2 ------------ send DHCPREQUEST | | + | +----->| | DHCPNAK, Lease expired/ | + | | | REQUESTING | Halt network | + DHCPOFFER/ | | | | + Discard ------------ | | + | | | | ----------- | + | +--------+ DHCPACK/ | | | + | Record lease, set -----| REBINDING | | + | timers T1, T2 / | | | + | | DHCPACK/ ----------- | + | v Record lease, set ^ | + +----------------> ------- /timers T1,T2 | | + +----->| |<---+ | | + | | BOUND |<---+ | | + DHCPOFFER, DHCPACK, | | | T2 expires/ DHCPNAK/ + DHCPNAK/Discard ------- | Broadcast Halt network + | | | | DHCPREQUEST | + +-------+ | DHCPACK/ | | + T1 expires/ Record lease, set | | + Send DHCPREQUEST timers T1, T2 | | + to leasing server | | | + | ---------- | | + | | |------------+ | + +->| RENEWING | | + | |----------------------------+ + ---------- + Figure 5: State-transition diagram for DHCP clients + + + + + + + +Droms Standards Track [Page 35] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +4.4.1 Initialization and allocation of network address + + The client begins in INIT state and forms a DHCPDISCOVER message. + The client SHOULD wait a random time between one and ten seconds to + desynchronize the use of DHCP at startup. The client sets 'ciaddr' + to 0x00000000. The client MAY request specific parameters by + including the 'parameter request list' option. The client MAY + suggest a network address and/or lease time by including the + 'requested IP address' and 'IP address lease time' options. The + client MUST include its hardware address in the 'chaddr' field, if + necessary for delivery of DHCP reply messages. The client MAY + include a different unique identifier in the 'client identifier' + option, as discussed in section 4.2. If the client included a list + of requested parameters in a DHCPDISCOVER message, it MUST include + that list in all subsequent messages. + + The client generates and records a random transaction identifier and + inserts that identifier into the 'xid' field. The client records its + own local time for later use in computing the lease expiration. The + client then broadcasts the DHCPDISCOVER on the local hardware + broadcast address to the 0xffffffff IP broadcast address and 'DHCP + server' UDP port. + + If the 'xid' of an arriving DHCPOFFER message does not match the + 'xid' of the most recent DHCPDISCOVER message, the DHCPOFFER message + must be silently discarded. Any arriving DHCPACK messages must be + silently discarded. + + The client collects DHCPOFFER messages over a period of time, selects + one DHCPOFFER message from the (possibly many) incoming DHCPOFFER + messages (e.g., the first DHCPOFFER message or the DHCPOFFER message + from the previously used server) and extracts the server address from + the 'server identifier' option in the DHCPOFFER message. The time + over which the client collects messages and the mechanism used to + select one DHCPOFFER are implementation dependent. + + + + + + + + + + + + + + + + +Droms Standards Track [Page 36] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +Field DHCPDISCOVER DHCPREQUEST DHCPDECLINE, + DHCPINFORM DHCPRELEASE +----- ------------ ----------- ----------- +'op' BOOTREQUEST BOOTREQUEST BOOTREQUEST +'htype' (From "Assigned Numbers" RFC) +'hlen' (Hardware address length in octets) +'hops' 0 0 0 +'xid' selected by client 'xid' from server selected by + DHCPOFFER message client +'secs' 0 or seconds since 0 or seconds since 0 + DHCP process started DHCP process started +'flags' Set 'BROADCAST' Set 'BROADCAST' 0 + flag if client flag if client + requires broadcast requires broadcast + reply reply +'ciaddr' 0 (DHCPDISCOVER) 0 or client's 0 (DHCPDECLINE) + client's network address client's network + network address (BOUND/RENEW/REBIND) address + (DHCPINFORM) (DHCPRELEASE) +'yiaddr' 0 0 0 +'siaddr' 0 0 0 +'giaddr' 0 0 0 +'chaddr' client's hardware client's hardware client's hardware + address address address +'sname' options, if options, if (unused) + indicated in indicated in + 'sname/file' 'sname/file' + option; otherwise option; otherwise + unused unused +'file' options, if options, if (unused) + indicated in indicated in + 'sname/file' 'sname/file' + option; otherwise option; otherwise + unused unused +'options' options options (unused) + + + + + + + + + + + + + + + + +Droms Standards Track [Page 37] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +Option DHCPDISCOVER DHCPREQUEST DHCPDECLINE, + DHCPINFORM DHCPRELEASE +------ ------------ ----------- ----------- +Requested IP address MAY MUST (in MUST + (DISCOVER) SELECTING or (DHCPDECLINE), + MUST NOT INIT-REBOOT) MUST NOT + (INFORM) MUST NOT (in (DHCPRELEASE) + BOUND or + RENEWING) +IP address lease time MAY MAY MUST NOT + (DISCOVER) + MUST NOT + (INFORM) +Use 'file'/'sname' fields MAY MAY MAY +DHCP message type DHCPDISCOVER/ DHCPREQUEST DHCPDECLINE/ + DHCPINFORM DHCPRELEASE +Client identifier MAY MAY MAY +Vendor class identifier MAY MAY MUST NOT +Server identifier MUST NOT MUST (after MUST + SELECTING) + MUST NOT (after + INIT-REBOOT, + BOUND, RENEWING + or REBINDING) +Parameter request list MAY MAY MUST NOT +Maximum message size MAY MAY MUST NOT +Message SHOULD NOT SHOULD NOT SHOULD +Site-specific MAY MAY MUST NOT +All others MAY MAY MUST NOT + + Table 5: Fields and options used by DHCP clients + + If the parameters are acceptable, the client records the address of + the server that supplied the parameters from the 'server identifier' + field and sends that address in the 'server identifier' field of a + DHCPREQUEST broadcast message. Once the DHCPACK message from the + server arrives, the client is initialized and moves to BOUND state. + The DHCPREQUEST message contains the same 'xid' as the DHCPOFFER + message. The client records the lease expiration time as the sum of + the time at which the original request was sent and the duration of + the lease from the DHCPACK message. The client SHOULD perform a + check on the suggested address to ensure that the address is not + already in use. For example, if the client is on a network that + supports ARP, the client may issue an ARP request for the suggested + request. When broadcasting an ARP request for the suggested address, + the client must fill in its own hardware address as the sender's + hardware address, and 0 as the sender's IP address, to avoid + confusing ARP caches in other hosts on the same subnet. If the + + + +Droms Standards Track [Page 38] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + network address appears to be in use, the client MUST send a + DHCPDECLINE message to the server. The client SHOULD broadcast an ARP + reply to announce the client's new IP address and clear any outdated + ARP cache entries in hosts on the client's subnet. + +4.4.2 Initialization with known network address + + The client begins in INIT-REBOOT state and sends a DHCPREQUEST + message. The client MUST insert its known network address as a + 'requested IP address' option in the DHCPREQUEST message. The client + may request specific configuration parameters by including the + 'parameter request list' option. The client generates and records a + random transaction identifier and inserts that identifier into the + 'xid' field. The client records its own local time for later use in + computing the lease expiration. The client MUST NOT include a + 'server identifier' in the DHCPREQUEST message. The client then + broadcasts the DHCPREQUEST on the local hardware broadcast address to + the 'DHCP server' UDP port. + + Once a DHCPACK message with an 'xid' field matching that in the + client's DHCPREQUEST message arrives from any server, the client is + initialized and moves to BOUND state. The client records the lease + expiration time as the sum of the time at which the DHCPREQUEST + message was sent and the duration of the lease from the DHCPACK + message. + +4.4.3 Initialization with an externally assigned network address + + The client sends a DHCPINFORM message. The client may request + specific configuration parameters by including the 'parameter request + list' option. The client generates and records a random transaction + identifier and inserts that identifier into the 'xid' field. The + client places its own network address in the 'ciaddr' field. The + client SHOULD NOT request lease time parameters. + + The client then unicasts the DHCPINFORM to the DHCP server if it + knows the server's address, otherwise it broadcasts the message to + the limited (all 1s) broadcast address. DHCPINFORM messages MUST be + directed to the 'DHCP server' UDP port. + + Once a DHCPACK message with an 'xid' field matching that in the + client's DHCPINFORM message arrives from any server, the client is + initialized. + + If the client does not receive a DHCPACK within a reasonable period + of time (60 seconds or 4 tries if using timeout suggested in section + 4.1), then it SHOULD display a message informing the user of the + problem, and then SHOULD begin network processing using suitable + + + +Droms Standards Track [Page 39] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + defaults as per Appendix A. + +4.4.4 Use of broadcast and unicast + + The DHCP client broadcasts DHCPDISCOVER, DHCPREQUEST and DHCPINFORM + messages, unless the client knows the address of a DHCP server. The + client unicasts DHCPRELEASE messages to the server. Because the + client is declining the use of the IP address supplied by the server, + the client broadcasts DHCPDECLINE messages. + + When the DHCP client knows the address of a DHCP server, in either + INIT or REBOOTING state, the client may use that address in the + DHCPDISCOVER or DHCPREQUEST rather than the IP broadcast address. + The client may also use unicast to send DHCPINFORM messages to a + known DHCP server. If the client receives no response to DHCP + messages sent to the IP address of a known DHCP server, the DHCP + client reverts to using the IP broadcast address. + +4.4.5 Reacquisition and expiration + + The client maintains two times, T1 and T2, that specify the times at + which the client tries to extend its lease on its network address. + T1 is the time at which the client enters the RENEWING state and + attempts to contact the server that originally issued the client's + network address. T2 is the time at which the client enters the + REBINDING state and attempts to contact any server. T1 MUST be + earlier than T2, which, in turn, MUST be earlier than the time at + which the client's lease will expire. + + To avoid the need for synchronized clocks, T1 and T2 are expressed in + options as relative times [2]. + + At time T1 the client moves to RENEWING state and sends (via unicast) + a DHCPREQUEST message to the server to extend its lease. The client + sets the 'ciaddr' field in the DHCPREQUEST to its current network + address. The client records the local time at which the DHCPREQUEST + message is sent for computation of the lease expiration time. The + client MUST NOT include a 'server identifier' in the DHCPREQUEST + message. + + Any DHCPACK messages that arrive with an 'xid' that does not match + the 'xid' of the client's DHCPREQUEST message are silently discarded. + When the client receives a DHCPACK from the server, the client + computes the lease expiration time as the sum of the time at which + the client sent the DHCPREQUEST message and the duration of the lease + in the DHCPACK message. The client has successfully reacquired its + network address, returns to BOUND state and may continue network + processing. + + + +Droms Standards Track [Page 40] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + If no DHCPACK arrives before time T2, the client moves to REBINDING + state and sends (via broadcast) a DHCPREQUEST message to extend its + lease. The client sets the 'ciaddr' field in the DHCPREQUEST to its + current network address. The client MUST NOT include a 'server + identifier' in the DHCPREQUEST message. + + Times T1 and T2 are configurable by the server through options. T1 + defaults to (0.5 * duration_of_lease). T2 defaults to (0.875 * + duration_of_lease). Times T1 and T2 SHOULD be chosen with some + random "fuzz" around a fixed value, to avoid synchronization of + client reacquisition. + + A client MAY choose to renew or extend its lease prior to T1. The + server MAY choose to extend the client's lease according to policy + set by the network administrator. The server SHOULD return T1 and + T2, and their values SHOULD be adjusted from their original values to + take account of the time remaining on the lease. + + In both RENEWING and REBINDING states, if the client receives no + response to its DHCPREQUEST message, the client SHOULD wait one-half + of the remaining time until T2 (in RENEWING state) and one-half of + the remaining lease time (in REBINDING state), down to a minimum of + 60 seconds, before retransmitting the DHCPREQUEST message. + + If the lease expires before the client receives a DHCPACK, the client + moves to INIT state, MUST immediately stop any other network + processing and requests network initialization parameters as if the + client were uninitialized. If the client then receives a DHCPACK + allocating that client its previous network address, the client + SHOULD continue network processing. If the client is given a new + network address, it MUST NOT continue using the previous network + address and SHOULD notify the local users of the problem. + +4.4.6 DHCPRELEASE + + If the client no longer requires use of its assigned network address + (e.g., the client is gracefully shut down), the client sends a + DHCPRELEASE message to the server. Note that the correct operation + of DHCP does not depend on the transmission of DHCPRELEASE messages. + + + + + + + + + + + + +Droms Standards Track [Page 41] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +5. Acknowledgments + + The author thanks the many (and too numerous to mention!) members of + the DHC WG for their tireless and ongoing efforts in the development + of DHCP and this document. + + The efforts of J Allard, Mike Carney, Dave Lapp, Fred Lien and John + Mendonca in organizing DHCP interoperability testing sessions are + gratefully acknowledged. + + The development of this document was supported in part by grants from + the Corporation for National Research Initiatives (CNRI), Bucknell + University and Sun Microsystems. + +6. References + + [1] Acetta, M., "Resource Location Protocol", RFC 887, CMU, December + 1983. + + [2] Alexander, S., and R. Droms, "DHCP Options and BOOTP Vendor + Extensions", RFC 1533, Lachman Technology, Inc., Bucknell + University, October 1993. + + [3] Braden, R., Editor, "Requirements for Internet Hosts -- + Communication Layers", STD 3, RFC 1122, USC/Information Sciences + Institute, October 1989. + + [4] Braden, R., Editor, "Requirements for Internet Hosts -- + Application and Support, STD 3, RFC 1123, USC/Information + Sciences Institute, October 1989. + + [5] Brownell, D, "Dynamic Reverse Address Resolution Protocol + (DRARP)", Work in Progress. + + [6] Comer, D., and R. Droms, "Uniform Access to Internet Directory + Services", Proc. of ACM SIGCOMM '90 (Special issue of Computer + Communications Review), 20(4):50--59, 1990. + + [7] Croft, B., and J. Gilmore, "Bootstrap Protocol (BOOTP)", RFC 951, + Stanford and SUN Microsystems, September 1985. + + [8] Deering, S., "ICMP Router Discovery Messages", RFC 1256, Xerox + PARC, September 1991. + + [9] Droms, D., "Interoperation between DHCP and BOOTP", RFC 1534, + Bucknell University, October 1993. + + + + + +Droms Standards Track [Page 42] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + [10] Finlayson, R., Mann, T., Mogul, J., and M. Theimer, "A Reverse + Address Resolution Protocol", RFC 903, Stanford, June 1984. + + [11] Gray C., and D. Cheriton, "Leases: An Efficient Fault-Tolerant + Mechanism for Distributed File Cache Consistency", In Proc. of + the Twelfth ACM Symposium on Operating Systems Design, 1989. + + [12] Mockapetris, P., "Domain Names -- Concepts and Facilities", STD + 13, RFC 1034, USC/Information Sciences Institute, November 1987. + + [13] Mockapetris, P., "Domain Names -- Implementation and + Specification", STD 13, RFC 1035, USC/Information Sciences + Institute, November 1987. + + [14] Mogul J., and S. Deering, "Path MTU Discovery", RFC 1191, + November 1990. + + [15] Morgan, R., "Dynamic IP Address Assignment for Ethernet Attached + Hosts", Work in Progress. + + [16] Postel, J., "Internet Control Message Protocol", STD 5, RFC 792, + USC/Information Sciences Institute, September 1981. + + [17] Reynolds, J., "BOOTP Vendor Information Extensions", RFC 1497, + USC/Information Sciences Institute, August 1993. + + [18] Reynolds, J., and J. Postel, "Assigned Numbers", STD 2, RFC 1700, + USC/Information Sciences Institute, October 1994. + + [19] Jeffrey Schiller and Mark Rosenstein. A Protocol for the Dynamic + Assignment of IP Addresses for use on an Ethernet. (Available + from the Athena Project, MIT), 1989. + + [20] Sollins, K., "The TFTP Protocol (Revision 2)", RFC 783, NIC, + June 1981. + + [21] Wimer, W., "Clarifications and Extensions for the Bootstrap + Protocol", RFC 1542, Carnegie Mellon University, October 1993. + +7. Security Considerations + + DHCP is built directly on UDP and IP which are as yet inherently + insecure. Furthermore, DHCP is generally intended to make + maintenance of remote and/or diskless hosts easier. While perhaps + not impossible, configuring such hosts with passwords or keys may be + difficult and inconvenient. Therefore, DHCP in its current form is + quite insecure. + + + + +Droms Standards Track [Page 43] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + + Unauthorized DHCP servers may be easily set up. Such servers can + then send false and potentially disruptive information to clients + such as incorrect or duplicate IP addresses, incorrect routing + information (including spoof routers, etc.), incorrect domain + nameserver addresses (such as spoof nameservers), and so on. + Clearly, once this seed information is in place, an attacker can + further compromise affected systems. + + Malicious DHCP clients could masquerade as legitimate clients and + retrieve information intended for those legitimate clients. Where + dynamic allocation of resources is used, a malicious client could + claim all resources for itself, thereby denying resources to + legitimate clients. + +8. Author's Address + + Ralph Droms + Computer Science Department + 323 Dana Engineering + Bucknell University + Lewisburg, PA 17837 + + Phone: (717) 524-1145 + EMail: droms@bucknell.edu + + + + + + + + + + + + + + + + + + + + + + + + + + + +Droms Standards Track [Page 44] + +RFC 2131 Dynamic Host Configuration Protocol March 1997 + + +A. Host Configuration Parameters + + IP-layer_parameters,_per_host:_ + + Be a router on/off HRC 3.1 + Non-local source routing on/off HRC 3.3.5 + Policy filters for + non-local source routing (list) HRC 3.3.5 + Maximum reassembly size integer HRC 3.3.2 + Default TTL integer HRC 3.2.1.7 + PMTU aging timeout integer MTU 6.6 + MTU plateau table (list) MTU 7 + IP-layer_parameters,_per_interface:_ + IP address (address) HRC 3.3.1.6 + Subnet mask (address mask) HRC 3.3.1.6 + MTU integer HRC 3.3.3 + All-subnets-MTU on/off HRC 3.3.3 + Broadcast address flavor 0x00000000/0xffffffff HRC 3.3.6 + Perform mask discovery on/off HRC 3.2.2.9 + Be a mask supplier on/off HRC 3.2.2.9 + Perform router discovery on/off RD 5.1 + Router solicitation address (address) RD 5.1 + Default routers, list of: + router address (address) HRC 3.3.1.6 + preference level integer HRC 3.3.1.6 + Static routes, list of: + destination (host/subnet/net) HRC 3.3.1.2 + destination mask (address mask) HRC 3.3.1.2 + type-of-service integer HRC 3.3.1.2 + first-hop router (address) HRC 3.3.1.2 + ignore redirects on/off HRC 3.3.1.2 + PMTU integer MTU 6.6 + perform PMTU discovery on/off MTU 6.6 + + Link-layer_parameters,_per_interface:_ + Trailers on/off HRC 2.3.1 + ARP cache timeout integer HRC 2.3.2.1 + Ethernet encapsulation (RFC 894/RFC 1042) HRC 2.3.3 + + TCP_parameters,_per_host:_ + TTL integer HRC 4.2.2.19 + Keep-alive interval integer HRC 4.2.3.6 + Keep-alive data size 0/1 HRC 4.2.3.6 + +Key: + + MTU = Path MTU Discovery (RFC 1191, Proposed Standard) + RD = Router Discovery (RFC 1256, Proposed Standard) + + + +Droms Standards Track [Page 45] + diff --git a/rfc/rfc2132.txt b/rfc/rfc2132.txt new file mode 100644 index 00000000..e9c4f4b3 --- /dev/null +++ b/rfc/rfc2132.txt @@ -0,0 +1,1907 @@ + + + + + + +Network Working Group S. Alexander +Request for Comments: 2132 Silicon Graphics, Inc. +Obsoletes: 1533 R. Droms +Category: Standards Track Bucknell University + March 1997 + + DHCP Options and BOOTP Vendor Extensions + +Status of this memo + + This document specifies an Internet standards track protocol for the + Internet community, and requests discussion and suggestions for + improvements. Please refer to the current edition of the "Internet + Official Protocol Standards" (STD 1) for the standardization state + and status of this protocol. Distribution of this memo is unlimited. + +Abstract + + The Dynamic Host Configuration Protocol (DHCP) [1] provides a + framework for passing configuration information to hosts on a TCP/IP + network. Configuration parameters and other control information are + carried in tagged data items that are stored in the 'options' field + of the DHCP message. The data items themselves are also called + "options." + + This document specifies the current set of DHCP options. Future + options will be specified in separate RFCs. The current list of + valid options is also available in ftp://ftp.isi.edu/in- + notes/iana/assignments [22]. + + All of the vendor information extensions defined in RFC 1497 [2] may + be used as DHCP options. The definitions given in RFC 1497 are + included in this document, which supersedes RFC 1497. All of the + DHCP options defined in this document, except for those specific to + DHCP as defined in section 9, may be used as BOOTP vendor information + extensions. + +Table of Contents + + 1. Introduction .............................................. 2 + 2. BOOTP Extension/DHCP Option Field Format .................. 4 + 3. RFC 1497 Vendor Extensions ................................ 5 + 4. IP Layer Parameters per Host .............................. 11 + 5. IP Layer Parameters per Interface ........................ 13 + 6. Link Layer Parameters per Interface ....................... 16 + 7. TCP Parameters ............................................ 17 + 8. Application and Service Parameters ........................ 18 + 9. DHCP Extensions ........................................... 25 + + + +Alexander & Droms Standards Track [Page 1] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + 10. Defining new extensions ................................... 31 + 11. Acknowledgements .......................................... 31 + 12. References ................................................ 32 + 13. Security Considerations ................................... 33 + 14. Authors' Addresses ........................................ 34 + +1. Introduction + + This document specifies options for use with both the Dynamic Host + Configuration Protocol and the Bootstrap Protocol. + + The full description of DHCP packet formats may be found in the DHCP + specification document [1], and the full description of BOOTP packet + formats may be found in the BOOTP specification document [3]. This + document defines the format of information in the last field of DHCP + packets ('options') and of BOOTP packets ('vend'). The remainder of + this section defines a generalized use of this area for giving + information useful to a wide class of machines, operating systems and + configurations. Sites with a single DHCP or BOOTP server that is + shared among heterogeneous clients may choose to define other, site- + specific formats for the use of the 'options' field. + + Section 2 of this memo describes the formats of DHCP options and + BOOTP vendor extensions. Section 3 describes options defined in + previous documents for use with BOOTP (all may also be used with + DHCP). Sections 4-8 define new options intended for use with both + DHCP and BOOTP. Section 9 defines options used only in DHCP. + + References further describing most of the options defined in sections + 2-6 can be found in section 12. The use of the options defined in + section 9 is described in the DHCP specification [1]. + + Information on registering new options is contained in section 10. + + This document updates the definition of DHCP/BOOTP options that + appears in RFC1533. The classing mechanism has been extended to + include vendor classes as described in section 8.4 and 9.13. The new + procedure for defining new DHCP/BOOTP options in described in section + 10. Several new options, including NIS+ domain and servers, Mobile + IP home agent, SMTP server, TFTP server and Bootfile server, have + been added. Text giving definitions used throughout the document has + been added in section 1.1. Text emphasizing the need for uniqueness + of client-identifiers has been added to section 9.14. + + + + + + + + +Alexander & Droms Standards Track [Page 2] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +1.1 Requirements + + Throughout this document, the words that are used to define the + significance of particular requirements are capitalized. These words + are: + + o "MUST" + + This word or the adjective "REQUIRED" means that the item is an + absolute requirement of this specification. + + o "MUST NOT" + + This phrase means that the item is an absolute prohibition of + this specification. + + o "SHOULD" + + This word or the adjective "RECOMMENDED" means that there may + exist valid reasons in particular circumstances to ignore this + item, but the full implications should be understood and the case + carefully weighed before choosing a different course. + + o "SHOULD NOT" + + This phrase means that there may exist valid reasons in + particular circumstances when the listed behavior is acceptable + or even useful, but the full implications should be understood + and the case carefully weighed before implementing any behavior + described with this label. + + o "MAY" + + This word or the adjective "OPTIONAL" means that this item is + truly optional. One vendor may choose to include the item + because a particular marketplace requires it or because it + enhances the product, for example; another vendor may omit the + same item. + +1.2 Terminology + + This document uses the following terms: + + o "DHCP client" + + A DHCP client or "client" is an Internet host using DHCP to + obtain configuration parameters such as a network address. + + + + +Alexander & Droms Standards Track [Page 3] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + o "DHCP server" + + A DHCP server of "server"is an Internet host that returns + configuration parameters to DHCP clients. + + o "binding" + + A binding is a collection of configuration parameters, including + at least an IP address, associated with or "bound to" a DHCP + client. Bindings are managed by DHCP servers. + +2. BOOTP Extension/DHCP Option Field Format + + + DHCP options have the same format as the BOOTP 'vendor extensions' + defined in RFC 1497 [2]. Options may be fixed length or variable + length. All options begin with a tag octet, which uniquely + identifies the option. Fixed-length options without data consist of + only a tag octet. Only options 0 and 255 are fixed length. All + other options are variable-length with a length octet following the + tag octet. The value of the length octet does not include the two + octets specifying the tag and length. The length octet is followed + by "length" octets of data. Options containing NVT ASCII data SHOULD + NOT include a trailing NULL; however, the receiver of such options + MUST be prepared to delete trailing nulls if they exist. The + receiver MUST NOT require that a trailing null be included in the + data. In the case of some variable-length options the length field + is a constant but must still be specified. + + Any options defined subsequent to this document MUST contain a length + octet even if the length is fixed or zero. + + All multi-octet quantities are in network byte-order. + + When used with BOOTP, the first four octets of the vendor information + field have been assigned to the "magic cookie" (as suggested in RFC + 951). This field identifies the mode in which the succeeding data is + to be interpreted. The value of the magic cookie is the 4 octet + dotted decimal 99.130.83.99 (or hexadecimal number 63.82.53.63) in + network byte order. + + All of the "vendor extensions" defined in RFC 1497 are also DHCP + options. + + Option codes 128 to 254 (decimal) are reserved for site-specific + options. + + + + + +Alexander & Droms Standards Track [Page 4] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Except for the options in section 9, all options may be used with + either DHCP or BOOTP. + + Many of these options have their default values specified in other + documents. In particular, RFC 1122 [4] specifies default values for + most IP and TCP configuration parameters. + + Many options supply one or more 32-bit IP address. Use of IP + addresses rather than fully-qualified Domain Names (FQDNs) may make + future renumbering of IP hosts more difficult. Use of these + addresses is discouraged at sites that may require renumbering. + +3. RFC 1497 Vendor Extensions + + This section lists the vendor extensions as defined in RFC 1497. + They are defined here for completeness. + +3.1. Pad Option + + The pad option can be used to cause subsequent fields to align on + word boundaries. + + The code for the pad option is 0, and its length is 1 octet. + + Code + +-----+ + | 0 | + +-----+ + +3.2. End Option + + The end option marks the end of valid information in the vendor + field. Subsequent octets should be filled with pad options. + + The code for the end option is 255, and its length is 1 octet. + + Code + +-----+ + | 255 | + +-----+ + +3.3. Subnet Mask + + The subnet mask option specifies the client's subnet mask as per RFC + 950 [5]. + + If both the subnet mask and the router option are specified in a DHCP + reply, the subnet mask option MUST be first. + + + +Alexander & Droms Standards Track [Page 5] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for the subnet mask option is 1, and its length is 4 octets. + + Code Len Subnet Mask + +-----+-----+-----+-----+-----+-----+ + | 1 | 4 | m1 | m2 | m3 | m4 | + +-----+-----+-----+-----+-----+-----+ + +3.4. Time Offset + + The time offset field specifies the offset of the client's subnet in + seconds from Coordinated Universal Time (UTC). The offset is + expressed as a two's complement 32-bit integer. A positive offset + indicates a location east of the zero meridian and a negative offset + indicates a location west of the zero meridian. + + The code for the time offset option is 2, and its length is 4 octets. + + Code Len Time Offset + +-----+-----+-----+-----+-----+-----+ + | 2 | 4 | n1 | n2 | n3 | n4 | + +-----+-----+-----+-----+-----+-----+ + +3.5. Router Option + + The router option specifies a list of IP addresses for routers on the + client's subnet. Routers SHOULD be listed in order of preference. + + The code for the router option is 3. The minimum length for the + router option is 4 octets, and the length MUST always be a multiple + of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 3 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.6. Time Server Option + + The time server option specifies a list of RFC 868 [6] time servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for the time server option is 4. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + + + + + +Alexander & Droms Standards Track [Page 6] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 4 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.7. Name Server Option + + The name server option specifies a list of IEN 116 [7] name servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for the name server option is 5. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 5 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.8. Domain Name Server Option + + The domain name server option specifies a list of Domain Name System + (STD 13, RFC 1035 [8]) name servers available to the client. Servers + SHOULD be listed in order of preference. + + The code for the domain name server option is 6. The minimum length + for this option is 4 octets, and the length MUST always be a multiple + of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 6 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.9. Log Server Option + + The log server option specifies a list of MIT-LCS UDP log servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for the log server option is 7. The minimum length for this + option is 4 octets, and the length MUST always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 7 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + + + +Alexander & Droms Standards Track [Page 7] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +3.10. Cookie Server Option + + The cookie server option specifies a list of RFC 865 [9] cookie + servers available to the client. Servers SHOULD be listed in order + of preference. + + The code for the log server option is 8. The minimum length for this + option is 4 octets, and the length MUST always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 8 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.11. LPR Server Option + + The LPR server option specifies a list of RFC 1179 [10] line printer + servers available to the client. Servers SHOULD be listed in order + of preference. + + The code for the LPR server option is 9. The minimum length for this + option is 4 octets, and the length MUST always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 9 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.12. Impress Server Option + + The Impress server option specifies a list of Imagen Impress servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for the Impress server option is 10. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 10 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.13. Resource Location Server Option + + This option specifies a list of RFC 887 [11] Resource Location + servers available to the client. Servers SHOULD be listed in order + of preference. + + + +Alexander & Droms Standards Track [Page 8] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for this option is 11. The minimum length for this option + is 4 octets, and the length MUST always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 11 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.14. Host Name Option + + This option specifies the name of the client. The name may or may + not be qualified with the local domain name (see section 3.17 for the + preferred way to retrieve the domain name). See RFC 1035 for + character set restrictions. + + The code for this option is 12, and its minimum length is 1. + + Code Len Host Name + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 12 | n | h1 | h2 | h3 | h4 | h5 | h6 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +3.15. Boot File Size Option + + This option specifies the length in 512-octet blocks of the default + boot image for the client. The file length is specified as an + unsigned 16-bit integer. + + The code for this option is 13, and its length is 2. + + Code Len File Size + +-----+-----+-----+-----+ + | 13 | 2 | l1 | l2 | + +-----+-----+-----+-----+ + +3.16. Merit Dump File + + This option specifies the path-name of a file to which the client's + core image should be dumped in the event the client crashes. The + path is formatted as a character string consisting of characters from + the NVT ASCII character set. + + The code for this option is 14. Its minimum length is 1. + + Code Len Dump File Pathname + +-----+-----+-----+-----+-----+-----+--- + | 14 | n | n1 | n2 | n3 | n4 | ... + +-----+-----+-----+-----+-----+-----+--- + + + +Alexander & Droms Standards Track [Page 9] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +3.17. Domain Name + + This option specifies the domain name that client should use when + resolving hostnames via the Domain Name System. + + The code for this option is 15. Its minimum length is 1. + + Code Len Domain Name + +-----+-----+-----+-----+-----+-----+-- + | 15 | n | d1 | d2 | d3 | d4 | ... + +-----+-----+-----+-----+-----+-----+-- + +3.18. Swap Server + + This specifies the IP address of the client's swap server. + + The code for this option is 16 and its length is 4. + + Code Len Swap Server Address + +-----+-----+-----+-----+-----+-----+ + | 16 | n | a1 | a2 | a3 | a4 | + +-----+-----+-----+-----+-----+-----+ + +3.19. Root Path + + This option specifies the path-name that contains the client's root + disk. The path is formatted as a character string consisting of + characters from the NVT ASCII character set. + + The code for this option is 17. Its minimum length is 1. + + Code Len Root Disk Pathname + +-----+-----+-----+-----+-----+-----+--- + | 17 | n | n1 | n2 | n3 | n4 | ... + +-----+-----+-----+-----+-----+-----+--- + +3.20. Extensions Path + + A string to specify a file, retrievable via TFTP, which contains + information which can be interpreted in the same way as the 64-octet + vendor-extension field within the BOOTP response, with the following + exceptions: + + - the length of the file is unconstrained; + - all references to Tag 18 (i.e., instances of the + BOOTP Extensions Path field) within the file are + ignored. + + + + +Alexander & Droms Standards Track [Page 10] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for this option is 18. Its minimum length is 1. + + Code Len Extensions Pathname + +-----+-----+-----+-----+-----+-----+--- + | 18 | n | n1 | n2 | n3 | n4 | ... + +-----+-----+-----+-----+-----+-----+--- + +4. IP Layer Parameters per Host + + This section details the options that affect the operation of the IP + layer on a per-host basis. + +4.1. IP Forwarding Enable/Disable Option + + This option specifies whether the client should configure its IP + layer for packet forwarding. A value of 0 means disable IP + forwarding, and a value of 1 means enable IP forwarding. + + The code for this option is 19, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 19 | 1 | 0/1 | + +-----+-----+-----+ + +4.2. Non-Local Source Routing Enable/Disable Option + + This option specifies whether the client should configure its IP + layer to allow forwarding of datagrams with non-local source routes + (see Section 3.3.5 of [4] for a discussion of this topic). A value + of 0 means disallow forwarding of such datagrams, and a value of 1 + means allow forwarding. + + The code for this option is 20, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 20 | 1 | 0/1 | + +-----+-----+-----+ + +4.3. Policy Filter Option + + This option specifies policy filters for non-local source routing. + The filters consist of a list of IP addresses and masks which specify + destination/mask pairs with which to filter incoming source routes. + + Any source routed datagram whose next-hop address does not match one + of the filters should be discarded by the client. + + + +Alexander & Droms Standards Track [Page 11] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + See [4] for further information. + + The code for this option is 21. The minimum length of this option is + 8, and the length MUST be a multiple of 8. + + Code Len Address 1 Mask 1 + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+ + | 21 | n | a1 | a2 | a3 | a4 | m1 | m2 | m3 | m4 | + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+ + Address 2 Mask 2 + +-----+-----+-----+-----+-----+-----+-----+-----+--- + | a1 | a2 | a3 | a4 | m1 | m2 | m3 | m4 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+--- + +4.4. Maximum Datagram Reassembly Size + + This option specifies the maximum size datagram that the client + should be prepared to reassemble. The size is specified as a 16-bit + unsigned integer. The minimum value legal value is 576. + + The code for this option is 22, and its length is 2. + + Code Len Size + +-----+-----+-----+-----+ + | 22 | 2 | s1 | s2 | + +-----+-----+-----+-----+ + +4.5. Default IP Time-to-live + + This option specifies the default time-to-live that the client should + use on outgoing datagrams. The TTL is specified as an octet with a + value between 1 and 255. + + The code for this option is 23, and its length is 1. + + Code Len TTL + +-----+-----+-----+ + | 23 | 1 | ttl | + +-----+-----+-----+ + +4.6. Path MTU Aging Timeout Option + + This option specifies the timeout (in seconds) to use when aging Path + MTU values discovered by the mechanism defined in RFC 1191 [12]. The + timeout is specified as a 32-bit unsigned integer. + + The code for this option is 24, and its length is 4. + + + + +Alexander & Droms Standards Track [Page 12] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Code Len Timeout + +-----+-----+-----+-----+-----+-----+ + | 24 | 4 | t1 | t2 | t3 | t4 | + +-----+-----+-----+-----+-----+-----+ + +4.7. Path MTU Plateau Table Option + + This option specifies a table of MTU sizes to use when performing + Path MTU Discovery as defined in RFC 1191. The table is formatted as + a list of 16-bit unsigned integers, ordered from smallest to largest. + The minimum MTU value cannot be smaller than 68. + + The code for this option is 25. Its minimum length is 2, and the + length MUST be a multiple of 2. + + Code Len Size 1 Size 2 + +-----+-----+-----+-----+-----+-----+--- + | 25 | n | s1 | s2 | s1 | s2 | ... + +-----+-----+-----+-----+-----+-----+--- + +5. IP Layer Parameters per Interface + + This section details the options that affect the operation of the IP + layer on a per-interface basis. It is expected that a client can + issue multiple requests, one per interface, in order to configure + interfaces with their specific parameters. + +5.1. Interface MTU Option + + This option specifies the MTU to use on this interface. The MTU is + specified as a 16-bit unsigned integer. The minimum legal value for + the MTU is 68. + + The code for this option is 26, and its length is 2. + + Code Len MTU + +-----+-----+-----+-----+ + | 26 | 2 | m1 | m2 | + +-----+-----+-----+-----+ + +5.2. All Subnets are Local Option + + This option specifies whether or not the client may assume that all + subnets of the IP network to which the client is connected use the + same MTU as the subnet of that network to which the client is + directly connected. A value of 1 indicates that all subnets share + the same MTU. A value of 0 means that the client should assume that + some subnets of the directly connected network may have smaller MTUs. + + + +Alexander & Droms Standards Track [Page 13] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for this option is 27, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 27 | 1 | 0/1 | + +-----+-----+-----+ + +5.3. Broadcast Address Option + + This option specifies the broadcast address in use on the client's + subnet. Legal values for broadcast addresses are specified in + section 3.2.1.3 of [4]. + + The code for this option is 28, and its length is 4. + + Code Len Broadcast Address + +-----+-----+-----+-----+-----+-----+ + | 28 | 4 | b1 | b2 | b3 | b4 | + +-----+-----+-----+-----+-----+-----+ + +5.4. Perform Mask Discovery Option + + This option specifies whether or not the client should perform subnet + mask discovery using ICMP. A value of 0 indicates that the client + should not perform mask discovery. A value of 1 means that the + client should perform mask discovery. + + The code for this option is 29, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 29 | 1 | 0/1 | + +-----+-----+-----+ + +5.5. Mask Supplier Option + + This option specifies whether or not the client should respond to + subnet mask requests using ICMP. A value of 0 indicates that the + client should not respond. A value of 1 means that the client should + respond. + + The code for this option is 30, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 30 | 1 | 0/1 | + +-----+-----+-----+ + + + + +Alexander & Droms Standards Track [Page 14] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +5.6. Perform Router Discovery Option + + This option specifies whether or not the client should solicit + routers using the Router Discovery mechanism defined in RFC 1256 + [13]. A value of 0 indicates that the client should not perform + router discovery. A value of 1 means that the client should perform + router discovery. + + The code for this option is 31, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 31 | 1 | 0/1 | + +-----+-----+-----+ + +5.7. Router Solicitation Address Option + + This option specifies the address to which the client should transmit + router solicitation requests. + + The code for this option is 32, and its length is 4. + + Code Len Address + +-----+-----+-----+-----+-----+-----+ + | 32 | 4 | a1 | a2 | a3 | a4 | + +-----+-----+-----+-----+-----+-----+ + +5.8. Static Route Option + + This option specifies a list of static routes that the client should + install in its routing cache. If multiple routes to the same + destination are specified, they are listed in descending order of + priority. + + The routes consist of a list of IP address pairs. The first address + is the destination address, and the second address is the router for + the destination. + + The default route (0.0.0.0) is an illegal destination for a static + route. See section 3.5 for information about the router option. + + The code for this option is 33. The minimum length of this option is + 8, and the length MUST be a multiple of 8. + + + + + + + + +Alexander & Droms Standards Track [Page 15] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Code Len Destination 1 Router 1 + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+ + | 33 | n | d1 | d2 | d3 | d4 | r1 | r2 | r3 | r4 | + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+ + Destination 2 Router 2 + +-----+-----+-----+-----+-----+-----+-----+-----+--- + | d1 | d2 | d3 | d4 | r1 | r2 | r3 | r4 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+--- + +6. Link Layer Parameters per Interface + + This section lists the options that affect the operation of the data + link layer on a per-interface basis. + +6.1. Trailer Encapsulation Option + + This option specifies whether or not the client should negotiate the + use of trailers (RFC 893 [14]) when using the ARP protocol. A value + of 0 indicates that the client should not attempt to use trailers. A + value of 1 means that the client should attempt to use trailers. + + The code for this option is 34, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 34 | 1 | 0/1 | + +-----+-----+-----+ + +6.2. ARP Cache Timeout Option + + This option specifies the timeout in seconds for ARP cache entries. + The time is specified as a 32-bit unsigned integer. + + The code for this option is 35, and its length is 4. + + Code Len Time + +-----+-----+-----+-----+-----+-----+ + | 35 | 4 | t1 | t2 | t3 | t4 | + +-----+-----+-----+-----+-----+-----+ + +6.3. Ethernet Encapsulation Option + + This option specifies whether or not the client should use Ethernet + Version 2 (RFC 894 [15]) or IEEE 802.3 (RFC 1042 [16]) encapsulation + if the interface is an Ethernet. A value of 0 indicates that the + client should use RFC 894 encapsulation. A value of 1 means that the + client should use RFC 1042 encapsulation. + + + + +Alexander & Droms Standards Track [Page 16] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for this option is 36, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 36 | 1 | 0/1 | + +-----+-----+-----+ + +7. TCP Parameters + + This section lists the options that affect the operation of the TCP + layer on a per-interface basis. + +7.1. TCP Default TTL Option + + This option specifies the default TTL that the client should use when + sending TCP segments. The value is represented as an 8-bit unsigned + integer. The minimum value is 1. + + The code for this option is 37, and its length is 1. + + Code Len TTL + +-----+-----+-----+ + | 37 | 1 | n | + +-----+-----+-----+ + +7.2. TCP Keepalive Interval Option + + This option specifies the interval (in seconds) that the client TCP + should wait before sending a keepalive message on a TCP connection. + The time is specified as a 32-bit unsigned integer. A value of zero + indicates that the client should not generate keepalive messages on + connections unless specifically requested by an application. + + The code for this option is 38, and its length is 4. + + Code Len Time + +-----+-----+-----+-----+-----+-----+ + | 38 | 4 | t1 | t2 | t3 | t4 | + +-----+-----+-----+-----+-----+-----+ + +7.3. TCP Keepalive Garbage Option + + This option specifies the whether or not the client should send TCP + keepalive messages with a octet of garbage for compatibility with + older implementations. A value of 0 indicates that a garbage octet + should not be sent. A value of 1 indicates that a garbage octet + should be sent. + + + + +Alexander & Droms Standards Track [Page 17] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for this option is 39, and its length is 1. + + Code Len Value + +-----+-----+-----+ + | 39 | 1 | 0/1 | + +-----+-----+-----+ + +8. Application and Service Parameters + + This section details some miscellaneous options used to configure + miscellaneous applications and services. + +8.1. Network Information Service Domain Option + + This option specifies the name of the client's NIS [17] domain. The + domain is formatted as a character string consisting of characters + from the NVT ASCII character set. + + The code for this option is 40. Its minimum length is 1. + + Code Len NIS Domain Name + +-----+-----+-----+-----+-----+-----+--- + | 40 | n | n1 | n2 | n3 | n4 | ... + +-----+-----+-----+-----+-----+-----+--- + +8.2. Network Information Servers Option + + This option specifies a list of IP addresses indicating NIS servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for this option is 41. Its minimum length is 4, and the + length MUST be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 41 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.3. Network Time Protocol Servers Option + + This option specifies a list of IP addresses indicating NTP [18] + servers available to the client. Servers SHOULD be listed in order + of preference. + + The code for this option is 42. Its minimum length is 4, and the + length MUST be a multiple of 4. + + + + +Alexander & Droms Standards Track [Page 18] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 42 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.4. Vendor Specific Information + + This option is used by clients and servers to exchange vendor- + specific information. The information is an opaque object of n + octets, presumably interpreted by vendor-specific code on the clients + and servers. The definition of this information is vendor specific. + The vendor is indicated in the vendor class identifier option. + Servers not equipped to interpret the vendor-specific information + sent by a client MUST ignore it (although it may be reported). + Clients which do not receive desired vendor-specific information + SHOULD make an attempt to operate without it, although they may do so + (and announce they are doing so) in a degraded mode. + + If a vendor potentially encodes more than one item of information in + this option, then the vendor SHOULD encode the option using + "Encapsulated vendor-specific options" as described below: + + The Encapsulated vendor-specific options field SHOULD be encoded as a + sequence of code/length/value fields of identical syntax to the DHCP + options field with the following exceptions: + + 1) There SHOULD NOT be a "magic cookie" field in the encapsulated + vendor-specific extensions field. + + 2) Codes other than 0 or 255 MAY be redefined by the vendor within + the encapsulated vendor-specific extensions field, but SHOULD + conform to the tag-length-value syntax defined in section 2. + + 3) Code 255 (END), if present, signifies the end of the + encapsulated vendor extensions, not the end of the vendor + extensions field. If no code 255 is present, then the end of + the enclosing vendor-specific information field is taken as the + end of the encapsulated vendor-specific extensions field. + + The code for this option is 43 and its minimum length is 1. + + Code Len Vendor-specific information + +-----+-----+-----+-----+--- + | 43 | n | i1 | i2 | ... + +-----+-----+-----+-----+--- + + + + + + +Alexander & Droms Standards Track [Page 19] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + When encapsulated vendor-specific extensions are used, the + information bytes 1-n have the following format: + + Code Len Data item Code Len Data item Code + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+ + | T1 | n | d1 | d2 | ... | T2 | n | D1 | D2 | ... | ... | + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+ + +8.5. NetBIOS over TCP/IP Name Server Option + + The NetBIOS name server (NBNS) option specifies a list of RFC + 1001/1002 [19] [20] NBNS name servers listed in order of preference. + + The code for this option is 44. The minimum length of the option is + 4 octets, and the length must always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---- + | 44 | n | a1 | a2 | a3 | a4 | b1 | b2 | b3 | b4 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---- + +8.6. NetBIOS over TCP/IP Datagram Distribution Server Option + + The NetBIOS datagram distribution server (NBDD) option specifies a + list of RFC 1001/1002 NBDD servers listed in order of preference. The + code for this option is 45. The minimum length of the option is 4 + octets, and the length must always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---- + | 45 | n | a1 | a2 | a3 | a4 | b1 | b2 | b3 | b4 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---- + +8.7. NetBIOS over TCP/IP Node Type Option + + The NetBIOS node type option allows NetBIOS over TCP/IP clients which + are configurable to be configured as described in RFC 1001/1002. The + value is specified as a single octet which identifies the client type + as follows: + + Value Node Type + ----- --------- + 0x1 B-node + 0x2 P-node + 0x4 M-node + 0x8 H-node + + + + + +Alexander & Droms Standards Track [Page 20] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + In the above chart, the notation '0x' indicates a number in base-16 + (hexadecimal). + + The code for this option is 46. The length of this option is always + 1. + + Code Len Node Type + +-----+-----+-----------+ + | 46 | 1 | see above | + +-----+-----+-----------+ + +8.8. NetBIOS over TCP/IP Scope Option + + The NetBIOS scope option specifies the NetBIOS over TCP/IP scope + parameter for the client as specified in RFC 1001/1002. See [19], + [20], and [8] for character-set restrictions. + + The code for this option is 47. The minimum length of this option is + 1. + + Code Len NetBIOS Scope + +-----+-----+-----+-----+-----+-----+---- + | 47 | n | s1 | s2 | s3 | s4 | ... + +-----+-----+-----+-----+-----+-----+---- + +8.9. X Window System Font Server Option + + This option specifies a list of X Window System [21] Font servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for this option is 48. The minimum length of this option is + 4 octets, and the length MUST be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+--- + | 48 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+--- + +8.10. X Window System Display Manager Option + + This option specifies a list of IP addresses of systems that are + running the X Window System Display Manager and are available to the + client. + + Addresses SHOULD be listed in order of preference. + + + + + +Alexander & Droms Standards Track [Page 21] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for the this option is 49. The minimum length of this option + is 4, and the length MUST be a multiple of 4. + + Code Len Address 1 Address 2 + + +-----+-----+-----+-----+-----+-----+-----+-----+--- + | 49 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+--- + +8.11. Network Information Service+ Domain Option + + This option specifies the name of the client's NIS+ [17] domain. The + domain is formatted as a character string consisting of characters + from the NVT ASCII character set. + + The code for this option is 64. Its minimum length is 1. + + Code Len NIS Client Domain Name + +-----+-----+-----+-----+-----+-----+--- + | 64 | n | n1 | n2 | n3 | n4 | ... + +-----+-----+-----+-----+-----+-----+--- + +8.12. Network Information Service+ Servers Option + + This option specifies a list of IP addresses indicating NIS+ servers + available to the client. Servers SHOULD be listed in order of + preference. + + The code for this option is 65. Its minimum length is 4, and the + length MUST be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 65 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.13. Mobile IP Home Agent option + + This option specifies a list of IP addresses indicating mobile IP + home agents available to the client. Agents SHOULD be listed in + order of preference. + + The code for this option is 68. Its minimum length is 0 (indicating + no home agents are available) and the length MUST be a multiple of 4. + It is expected that the usual length will be four octets, containing + a single home agent's address. + + + + + +Alexander & Droms Standards Track [Page 22] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Code Len Home Agent Addresses (zero or more) + +-----+-----+-----+-----+-----+-----+-- + | 68 | n | a1 | a2 | a3 | a4 | ... + +-----+-----+-----+-----+-----+-----+-- + +8.14. Simple Mail Transport Protocol (SMTP) Server Option + + The SMTP server option specifies a list of SMTP servers available to + the client. Servers SHOULD be listed in order of preference. + + The code for the SMTP server option is 69. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 69 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.15. Post Office Protocol (POP3) Server Option + + The POP3 server option specifies a list of POP3 available to the + client. Servers SHOULD be listed in order of preference. + + The code for the POP3 server option is 70. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 70 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.16. Network News Transport Protocol (NNTP) Server Option + + The NNTP server option specifies a list of NNTP available to the + client. Servers SHOULD be listed in order of preference. + + The code for the NNTP server option is 71. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 71 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + + + + + +Alexander & Droms Standards Track [Page 23] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +8.17. Default World Wide Web (WWW) Server Option + + The WWW server option specifies a list of WWW available to the + client. Servers SHOULD be listed in order of preference. + + The code for the WWW server option is 72. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 72 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.18. Default Finger Server Option + + The Finger server option specifies a list of Finger available to the + client. Servers SHOULD be listed in order of preference. + + The code for the Finger server option is 73. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 73 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.19. Default Internet Relay Chat (IRC) Server Option + + The IRC server option specifies a list of IRC available to the + client. Servers SHOULD be listed in order of preference. + + The code for the IRC server option is 74. The minimum length for + this option is 4 octets, and the length MUST always be a multiple of + 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 74 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.20. StreetTalk Server Option + + The StreetTalk server option specifies a list of StreetTalk servers + available to the client. Servers SHOULD be listed in order of + preference. + + + + +Alexander & Droms Standards Track [Page 24] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for the StreetTalk server option is 75. The minimum length + for this option is 4 octets, and the length MUST always be a multiple + of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 75 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +8.21. StreetTalk Directory Assistance (STDA) Server Option + + The StreetTalk Directory Assistance (STDA) server option specifies a + list of STDA servers available to the client. Servers SHOULD be + listed in order of preference. + + The code for the StreetTalk Directory Assistance server option is 76. + The minimum length for this option is 4 octets, and the length MUST + always be a multiple of 4. + + Code Len Address 1 Address 2 + +-----+-----+-----+-----+-----+-----+-----+-----+-- + | 76 | n | a1 | a2 | a3 | a4 | a1 | a2 | ... + +-----+-----+-----+-----+-----+-----+-----+-----+-- + +9. DHCP Extensions + + This section details the options that are specific to DHCP. + +9.1. Requested IP Address + + This option is used in a client request (DHCPDISCOVER) to allow the + client to request that a particular IP address be assigned. + + The code for this option is 50, and its length is 4. + + Code Len Address + +-----+-----+-----+-----+-----+-----+ + | 50 | 4 | a1 | a2 | a3 | a4 | + +-----+-----+-----+-----+-----+-----+ + +9.2. IP Address Lease Time + + This option is used in a client request (DHCPDISCOVER or DHCPREQUEST) + to allow the client to request a lease time for the IP address. In a + server reply (DHCPOFFER), a DHCP server uses this option to specify + the lease time it is willing to offer. + + + + + +Alexander & Droms Standards Track [Page 25] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The time is in units of seconds, and is specified as a 32-bit + unsigned integer. + + The code for this option is 51, and its length is 4. + + Code Len Lease Time + +-----+-----+-----+-----+-----+-----+ + | 51 | 4 | t1 | t2 | t3 | t4 | + +-----+-----+-----+-----+-----+-----+ + +9.3. Option Overload + + This option is used to indicate that the DHCP 'sname' or 'file' + fields are being overloaded by using them to carry DHCP options. A + DHCP server inserts this option if the returned parameters will + exceed the usual space allotted for options. + + If this option is present, the client interprets the specified + additional fields after it concludes interpretation of the standard + option fields. + + The code for this option is 52, and its length is 1. Legal values + for this option are: + + Value Meaning + ----- -------- + 1 the 'file' field is used to hold options + 2 the 'sname' field is used to hold options + 3 both fields are used to hold options + + Code Len Value + +-----+-----+-----+ + | 52 | 1 |1/2/3| + +-----+-----+-----+ + +9.4 TFTP server name + + This option is used to identify a TFTP server when the 'sname' field + in the DHCP header has been used for DHCP options. + + The code for this option is 66, and its minimum length is 1. + + Code Len TFTP server + +-----+-----+-----+-----+-----+--- + | 66 | n | c1 | c2 | c3 | ... + +-----+-----+-----+-----+-----+--- + + + + + +Alexander & Droms Standards Track [Page 26] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +9.5 Bootfile name + + This option is used to identify a bootfile when the 'file' field in + the DHCP header has been used for DHCP options. + + The code for this option is 67, and its minimum length is 1. + + Code Len Bootfile name + +-----+-----+-----+-----+-----+--- + | 67 | n | c1 | c2 | c3 | ... + +-----+-----+-----+-----+-----+--- + +9.6. DHCP Message Type + + This option is used to convey the type of the DHCP message. The code + for this option is 53, and its length is 1. Legal values for this + option are: + + Value Message Type + ----- ------------ + 1 DHCPDISCOVER + 2 DHCPOFFER + 3 DHCPREQUEST + 4 DHCPDECLINE + 5 DHCPACK + 6 DHCPNAK + 7 DHCPRELEASE + 8 DHCPINFORM + + Code Len Type + +-----+-----+-----+ + | 53 | 1 | 1-9 | + +-----+-----+-----+ + +9.7. Server Identifier + + This option is used in DHCPOFFER and DHCPREQUEST messages, and may + optionally be included in the DHCPACK and DHCPNAK messages. DHCP + servers include this option in the DHCPOFFER in order to allow the + client to distinguish between lease offers. DHCP clients use the + contents of the 'server identifier' field as the destination address + for any DHCP messages unicast to the DHCP server. DHCP clients also + indicate which of several lease offers is being accepted by including + this option in a DHCPREQUEST message. + + The identifier is the IP address of the selected server. + + The code for this option is 54, and its length is 4. + + + +Alexander & Droms Standards Track [Page 27] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + Code Len Address + +-----+-----+-----+-----+-----+-----+ + | 54 | 4 | a1 | a2 | a3 | a4 | + +-----+-----+-----+-----+-----+-----+ + +9.8. Parameter Request List + + This option is used by a DHCP client to request values for specified + configuration parameters. The list of requested parameters is + specified as n octets, where each octet is a valid DHCP option code + as defined in this document. + + The client MAY list the options in order of preference. The DHCP + server is not required to return the options in the requested order, + but MUST try to insert the requested options in the order requested + by the client. + + The code for this option is 55. Its minimum length is 1. + + Code Len Option Codes + +-----+-----+-----+-----+--- + | 55 | n | c1 | c2 | ... + +-----+-----+-----+-----+--- + +9.9. Message + + This option is used by a DHCP server to provide an error message to a + DHCP client in a DHCPNAK message in the event of a failure. A client + may use this option in a DHCPDECLINE message to indicate the why the + client declined the offered parameters. The message consists of n + octets of NVT ASCII text, which the client may display on an + available output device. + + The code for this option is 56 and its minimum length is 1. + + Code Len Text + +-----+-----+-----+-----+--- + | 56 | n | c1 | c2 | ... + +-----+-----+-----+-----+--- + +9.10. Maximum DHCP Message Size + + This option specifies the maximum length DHCP message that it is + willing to accept. The length is specified as an unsigned 16-bit + integer. A client may use the maximum DHCP message size option in + DHCPDISCOVER or DHCPREQUEST messages, but should not use the option + in DHCPDECLINE messages. + + + + +Alexander & Droms Standards Track [Page 28] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The code for this option is 57, and its length is 2. The minimum + legal value is 576 octets. + + Code Len Length + +-----+-----+-----+-----+ + | 57 | 2 | l1 | l2 | + +-----+-----+-----+-----+ + +9.11. Renewal (T1) Time Value + + This option specifies the time interval from address assignment until + the client transitions to the RENEWING state. + + The value is in units of seconds, and is specified as a 32-bit + unsigned integer. + + The code for this option is 58, and its length is 4. + + Code Len T1 Interval + +-----+-----+-----+-----+-----+-----+ + | 58 | 4 | t1 | t2 | t3 | t4 | + +-----+-----+-----+-----+-----+-----+ + +9.12. Rebinding (T2) Time Value + + This option specifies the time interval from address assignment until + the client transitions to the REBINDING state. + + The value is in units of seconds, and is specified as a 32-bit + unsigned integer. + + The code for this option is 59, and its length is 4. + + Code Len T2 Interval + +-----+-----+-----+-----+-----+-----+ + | 59 | 4 | t1 | t2 | t3 | t4 | + +-----+-----+-----+-----+-----+-----+ + +9.13. Vendor class identifier + + This option is used by DHCP clients to optionally identify the vendor + type and configuration of a DHCP client. The information is a string + of n octets, interpreted by servers. Vendors may choose to define + specific vendor class identifiers to convey particular configuration + or other identification information about a client. For example, the + identifier may encode the client's hardware configuration. Servers + not equipped to interpret the class-specific information sent by a + client MUST ignore it (although it may be reported). Servers that + + + +Alexander & Droms Standards Track [Page 29] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + respond SHOULD only use option 43 to return the vendor-specific + information to the client. + + The code for this option is 60, and its minimum length is 1. + + Code Len Vendor class Identifier + +-----+-----+-----+-----+--- + | 60 | n | i1 | i2 | ... + +-----+-----+-----+-----+--- + +9.14. Client-identifier + + This option is used by DHCP clients to specify their unique + identifier. DHCP servers use this value to index their database of + address bindings. This value is expected to be unique for all + clients in an administrative domain. + + Identifiers SHOULD be treated as opaque objects by DHCP servers. + + The client identifier MAY consist of type-value pairs similar to the + 'htype'/'chaddr' fields defined in [3]. For instance, it MAY consist + of a hardware type and hardware address. In this case the type field + SHOULD be one of the ARP hardware types defined in STD2 [22]. A + hardware type of 0 (zero) should be used when the value field + contains an identifier other than a hardware address (e.g. a fully + qualified domain name). + + For correct identification of clients, each client's client- + identifier MUST be unique among the client-identifiers used on the + subnet to which the client is attached. Vendors and system + administrators are responsible for choosing client-identifiers that + meet this requirement for uniqueness. + + The code for this option is 61, and its minimum length is 2. + + Code Len Type Client-Identifier + +-----+-----+-----+-----+-----+--- + | 61 | n | t1 | i1 | i2 | ... + +-----+-----+-----+-----+-----+--- + + + + + + + + + + + + +Alexander & Droms Standards Track [Page 30] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +10. Defining new extensions + + The author of a new DHCP option will follow these steps to obtain + acceptance of the option as a part of the DHCP Internet Standard: + + 1. The author devises the new option. + 2. The author requests a number for the new option from IANA by + contacting: + Internet Assigned Numbers Authority (IANA) + USC/Information Sciences Institute + 4676 Admiralty Way + Marina del Rey, California 90292-6695 + + or by email as: iana@iana.org + + 3. The author documents the new option, using the newly obtained + option number, as an Internet Draft. + 4. The author submits the Internet Draft for review through the IETF + standards process as defined in "Internet Official Protocol + Standards" (STD 1). The new option will be submitted for eventual + acceptance as an Internet Standard. + 5. The new option progresses through the IETF standards process; the + new option will be reviewed by the Dynamic Host Configuration + Working Group (if that group still exists), or as an Internet + Draft not submitted by an IETF working group. + 6. If the new option fails to gain acceptance as an Internet + Standard, the assigned option number will be returned to IANA for + reassignment. + + This procedure for defining new extensions will ensure that: + + * allocation of new option numbers is coordinated from a single + authority, + * new options are reviewed for technical correctness and + appropriateness, and + * documentation for new options is complete and published. + +11. Acknowledgements + + The author thanks the many (and too numerous to mention!) members of + the DHC WG for their tireless and ongoing efforts in the development + of DHCP and this document. + + The efforts of J Allard, Mike Carney, Dave Lapp, Fred Lien and John + Mendonca in organizing DHCP interoperability testing sessions are + gratefully acknowledged. + + + + + +Alexander & Droms Standards Track [Page 31] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + The development of this document was supported in part by grants from + the Corporation for National Research Initiatives (CNRI), Bucknell + University and Sun Microsystems. + +12. References + + [1] Droms, R., "Dynamic Host Configuration Protocol", RFC 2131, + Bucknell University, March 1997. + + [2] Reynolds, J., "BOOTP Vendor Information Extensions", RFC 1497, + USC/Information Sciences Institute, August 1993. + + [3] Croft, W., and J. Gilmore, "Bootstrap Protocol", RFC 951, + Stanford University and Sun Microsystems, September 1985. + + [4] Braden, R., Editor, "Requirements for Internet Hosts - + Communication Layers", STD 3, RFC 1122, USC/Information Sciences + Institute, October 1989. + + [5] Mogul, J., and J. Postel, "Internet Standard Subnetting + Procedure", STD 5, RFC 950, USC/Information Sciences Institute, + August 1985. + + [6] Postel, J., and K. Harrenstien, "Time Protocol", STD 26, RFC + 868, USC/Information Sciences Institute, SRI, May 1983. + + [7] Postel, J., "Name Server", IEN 116, USC/Information Sciences + Institute, August 1979. + + [8] Mockapetris, P., "Domain Names - Implementation and + Specification", STD 13, RFC 1035, USC/Information Sciences + Institute, November 1987. + + [9] Postel, J., "Quote of the Day Protocol", STD 23, RFC 865, + USC/Information Sciences Institute, May 1983. + + [10] McLaughlin, L., "Line Printer Daemon Protocol", RFC 1179, The + Wollongong Group, August 1990. + + [11] Accetta, M., "Resource Location Protocol", RFC 887, CMU, + December 1983. + + [12] Mogul, J. and S. Deering, "Path MTU Discovery", RFC 1191, + DECWRL, Stanford University, November 1990. + + [13] Deering, S., "ICMP Router Discovery Messages", RFC 1256, + Xerox PARC, September 1991. + + + + +Alexander & Droms Standards Track [Page 32] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + + [14] Leffler, S. and M. Karels, "Trailer Encapsulations", RFC 893, + U. C. Berkeley, April 1984. + + [15] Hornig, C., "Standard for the Transmission of IP Datagrams over + Ethernet Networks", RFC 894, Symbolics, April 1984. + + [16] Postel, J. and J. Reynolds, "Standard for the Transmission of + IP Datagrams Over IEEE 802 Networks", RFC 1042, USC/Information + Sciences Institute, February 1988. + + [17] Sun Microsystems, "System and Network Administration", March + 1990. + + [18] Mills, D., "Internet Time Synchronization: The Network Time + Protocol", RFC 1305, UDEL, March 1992. + + [19] NetBIOS Working Group, "Protocol Standard for a NetBIOS Service + on a TCP/UDP transport: Concepts and Methods", STD 19, RFC 1001, + March 1987. + + [20] NetBIOS Working Group, "Protocol Standard for a NetBIOS Service + on a TCP/UDP transport: Detailed Specifications", STD 19, RFC + 1002, March 1987. + + [21] Scheifler, R., "FYI On the X Window System", FYI 6, RFC 1198, + MIT Laboratory for Computer Science, January 1991. + + [22] Reynolds, J., and J. Postel, "Assigned Numbers", STD 2, RFC 1700, + USC/Information Sciences Institute, July 1992. + +13. Security Considerations + + Security issues are not discussed in this memo. + + + + + + + + + + + + + + + + + + +Alexander & Droms Standards Track [Page 33] + +RFC 2132 DHCP Options and BOOTP Vendor Extensions March 1997 + + +14. Authors' Addresses + + Steve Alexander + Silicon Graphics, Inc. + 2011 N. Shoreline Boulevard + Mailstop 510 + Mountain View, CA 94043-1389 + + Phone: (415) 933-6172 + EMail: sca@engr.sgi.com + + + Ralph Droms + Bucknell University + Lewisburg, PA 17837 + + Phone: (717) 524-1145 + EMail: droms@bucknell.edu + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Alexander & Droms Standards Track [Page 34] + diff --git a/rfc/rfc3046.txt b/rfc/rfc3046.txt new file mode 100644 index 00000000..18d70144 --- /dev/null +++ b/rfc/rfc3046.txt @@ -0,0 +1,787 @@ + + + + + + +Network Working Group M. Patrick +Request for Comments: 3046 Motorola BCS +Category: Standards Track January 2001 + + + DHCP Relay Agent Information Option + +Status of this Memo + + This document specifies an Internet standards track protocol for the + Internet community, and requests discussion and suggestions for + improvements. Please refer to the current edition of the "Internet + Official Protocol Standards" (STD 1) for the standardization state + and status of this protocol. Distribution of this memo is unlimited. + +Copyright Notice + + Copyright (C) The Internet Society (2001). All Rights Reserved. + +Abstract + + Newer high-speed public Internet access technologies call for a + high-speed modem to have a local area network (LAN) attachment to one + or more customer premise hosts. It is advantageous to use the + Dynamic Host Configuration Protocol (DHCP) as defined in RFC 2131 to + assign customer premise host IP addresses in this environment. + However, a number of security and scaling problems arise with such + "public" DHCP use. This document describes a new DHCP option to + address these issues. This option extends the set of DHCP options as + defined in RFC 2132. + + The new option is called the Relay Agent Information option and is + inserted by the DHCP relay agent when forwarding client-originated + DHCP packets to a DHCP server. Servers recognizing the Relay Agent + Information option may use the information to implement IP address or + other parameter assignment policies. The DHCP Server echoes the + option back verbatim to the relay agent in server-to-client replies, + and the relay agent strips the option before forwarding the reply to + the client. + + The "Relay Agent Information" option is organized as a single DHCP + option that contains one or more "sub-options" that convey + information known by the relay agent. The initial sub-options are + defined for a relay agent that is co-located in a public circuit + access unit. These include a "circuit ID" for the incoming circuit, + and a "remote ID" which provides a trusted identifier for the remote + high-speed modem. + + + + +Patrick Standards Track [Page 1] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +Table of Contents + + 1 Introduction........................................... 2 + 1.1 High-Speed Circuit Switched Data Networks.............. 2 + 1.2 DHCP Relay Agent in the Circuit Access Equipment....... 4 + 2.0 Relay Agent Information Option......................... 5 + 2.1 Agent Operation........................................ 6 + 2.1.1 Reforwarded DHCP requests............................ 7 + 2.2 Server Operation....................................... 7 + 3.0 Relay Agent Information Suboptions..................... 8 + 3.1 Agent Circuit ID....................................... 8 + 3.2 Agent Remote ID........................................ 9 + 4.0 Issues Resolved........................................ 9 + 5.0 Security Considerations................................ 10 + 6.0 IANA Considerations.................................... 11 + 7.0 Intellectual Property Notice........................... 12 + 8.0 References............................................. 12 + 9.0 Glossary............................................... 13 + 10.0 Author's Address...................................... 13 + 11.0 Full Copyright Statement ............................. 14 + +1 Introduction + +1.1 High-Speed Circuit Switched Data Networks + + Public Access to the Internet is usually via a circuit switched data + network. Today, this is primarily implemented with dial-up modems + connecting to a Remote Access Server. But higher speed circuit + access networks also include ISDN, ATM, Frame Relay, and Cable Data + Networks. All of these networks can be characterized as a "star" + topology where multiple users connect to a "circuit access unit" via + switched or permanent circuits. + + With dial-up modems, only a single host PC attempts to connect to the + central point. The PPP protocol is widely used to assign IP + addresses to be used by the single host PC. + + The newer high-speed circuit technologies, however, frequently + provide a LAN interface (especially Ethernet) to one or more host + PCs. It is desirable to support centralized assignment of the IP + addresses of host computers connecting on such circuits via DHCP. + The DHCP server can be, but usually is not, co-implemented with the + centralized circuit concentration access device. The DHCP server is + often connected as a separate server on the "Central LAN" to which + the central access device (or devices) attach. + + + + + + +Patrick Standards Track [Page 2] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + + A common physical model for high-speed Internet circuit access is + shown in Figure 1, below. + + +---------------+ | + Central | Circuit |-- ckt 1--- Modem1-- Host-|- Host A + LAN | | Access | Lan |- Host B + | | Unit 1 | |- Host C + |-----| |-- | + | |(relay agent) |... ++---------+ | +---------------+ +| DHCP |--| +| Server | | ++---------+ | + | + | +---------------+ ++---------+ | | Circuit |-- ckt 1--- Modem2-- Host--- Host D +| Other | | | Access | Lan +| Servers |--|-----| Unit 2 | +| (Web, | | | |-- ckt 2--- Modem3-- Host--- Host E +| DNS) | | |(relay agent) |... Lan +| | +---------------+ ++---------+ + + Figure 1: DHCP High Speed Circuit Access Model + + Note that in this model, the "modem" connects to a LAN at the user + site, rather than to a single host. Multiple hosts are implemented + at this site. Although it is certainly possible to implement a full + IP router at the user site, this requires a relatively expensive + piece of equipment (compared to typical modem costs). Furthermore, a + router requires an IP address not only for every host, but for the + router itself. Finally, a user-side router requires a dedicated + Logical IP Subnet (LIS) for each user. While this model is + appropriate for relatively small corporate networking environments, + it is not appropriate for large, public accessed networks. In this + scenario, it is advantageous to implement an IP networking model that + does not allocate an IP address for the modem (or other networking + equipment device at the user site), and especially not an entire LIS + for the user side LAN. + + Note that using this method to obtain IP addresses means that IP + addresses can only be obtained while communication to the central + site is available. Some host lan installations may use a local DHCP + server or other methods to obtain IP addresses for in-house use. + + + + + + + +Patrick Standards Track [Page 3] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +1.2 DHCP Relay Agent in the Circuit Access Unit + + It is desirable to use DHCP to assign the IP addresses for public + high-speed circuit access. A number of circuit access units (e.g., + RAS's, cable modem termination systems, ADSL access units, etc) + connect to a LAN (or local internet) to which is attached a DHCP + server. + + For scaling and security reasons, it is advantageous to implement a + "router hop" at the circuit access unit, much like high-capacity + RAS's do today. The circuit access equipment acts as both a router + to the circuits and as the DHCP relay agent. + + The advantages of co-locating the DHCP relay agent with the circuit + access equipment are: + + DHCP broadcast replies can be routed to only the proper circuit, + avoiding, say, the replication of the DCHP reply broadcast onto + thousands of access circuits; + + The same mechanism used to identify the remote connection of the + circuit (e.g., a user ID requested by a Remote Access Server acting + as the circuit access equipment) may be used as a host identifier by + DHCP, and used for parameter assignment. This includes centralized + assignment of IP addresses to hosts. This provides a secure remote + ID from a trusted source -- the relay agent. + + A number of issues arise when forwarding DHCP requests from hosts + connecting publicly accessed high-speed circuits with LAN connections + at the host. Many of these are security issues arising from DHCP + client requests from untrusted sources. How does the relay agent + know to which circuit to forward replies? How does the system + prevent DHCP IP exhaustion attacks? This is when an attacker + requests all available IP addresses from a DHCP server by sending + requests with fabricated client MAC addresses. How can an IP address + or LIS be permanently assigned to a particular user or modem? How + does one prevent "spoofing" of client identifier fields used to + assign IP addresses? How does one prevent denial of service by + "spoofing" other client's MAC addresses? + + All of these issues may be addressed by having the circuit access + equipment, which is a trusted component, add information to DHCP + client requests that it forwards to the DHCP server. + + + + + + + + +Patrick Standards Track [Page 4] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +2.0 Relay Agent Information Option + + This document defines a new DHCP Option called the Relay Agent + Information Option. It is a "container" option for specific agent- + supplied sub-options. The format of the Relay Agent Information + option is: + + Code Len Agent Information Field + +------+------+------+------+------+------+--...-+------+ + | 82 | N | i1 | i2 | i3 | i4 | | iN | + +------+------+------+------+------+------+--...-+------+ + + The length N gives the total number of octets in the Agent + Information Field. The Agent Information field consists of a + sequence of SubOpt/Length/Value tuples for each sub-option, encoded + in the following manner: + + SubOpt Len Sub-option Value + +------+------+------+------+------+------+--...-+------+ + | 1 | N | s1 | s2 | s3 | s4 | | sN | + +------+------+------+------+------+------+--...-+------+ + SubOpt Len Sub-option Value + +------+------+------+------+------+------+--...-+------+ + | 2 | N | i1 | i2 | i3 | i4 | | iN | + +------+------+------+------+------+------+--...-+------+ + + No "pad" sub-option is defined, and the Information field shall NOT + be terminated with a 255 sub-option. The length N of the DHCP Agent + Information Option shall include all bytes of the sub-option + code/length/value tuples. Since at least one sub-option must be + defined, the minimum Relay Agent Information length is two (2). The + length N of the sub-options shall be the number of octets in only + that sub-option's value field. A sub-option length may be zero. The + sub-options need not appear in sub-option code order. + + The initial assignment of DHCP Relay Agent Sub-options is as follows: + + DHCP Agent Sub-Option Description + Sub-option Code + --------------- ---------------------- + 1 Agent Circuit ID Sub-option + 2 Agent Remote ID Sub-option + + + + + + + + + +Patrick Standards Track [Page 5] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +2.1 Agent Operation + + Overall adding of the DHCP relay agent option SHOULD be configurable, + and SHOULD be disabled by default. Relay agents SHOULD have separate + configurables for each sub-option to control whether it is added to + client-to-server packets. + + A DHCP relay agent adding a Relay Agent Information field SHALL add + it as the last option (but before 'End Option' 255, if present) in + the DHCP options field of any recognized BOOTP or DHCP packet + forwarded from a client to a server. + + Relay agents receiving a DHCP packet from an untrusted circuit with + giaddr set to zero (indicating that they are the first-hop router) + but with a Relay Agent Information option already present in the + packet SHALL discard the packet and increment an error count. A + trusted circuit may contain a trusted downstream (closer to client) + network element (bridge) between the relay agent and the client that + MAY add a relay agent option but not set the giaddr field. In this + case, the relay agent does NOT add a "second" relay agent option, but + forwards the DHCP packet per normal DHCP relay agent operations, + setting the giaddr field as it deems appropriate. + + The mechanisms for distinguishing between "trusted" and "untrusted" + circuits are specific to the type of circuit termination equipment, + and may involve local administration. For example, a Cable Modem + Termination System may consider upstream packets from most cable + modems as "untrusted", but an ATM switch terminating VCs switched + through a DSLAM may consider such VCs as "trusted" and accept a relay + agent option added by the DSLAM. + + Relay agents MAY have a configurable for the maximum size of the DHCP + packet to be created after appending the Agent Information option. + Packets which, after appending the Relay Agent Information option, + would exceed this configured maximum size shall be forwarded WITHOUT + adding the Agent Information option. An error counter SHOULD be + incremented in this case. In the absence of this configurable, the + agent SHALL NOT increase a forwarded DHCP packet size to exceed the + MTU of the interface on which it is forwarded. + + The Relay Agent Information option echoed by a server MUST be removed + by either the relay agent or the trusted downstream network element + which added it when forwarding a server-to-client response back to + the client. + + + + + + + +Patrick Standards Track [Page 6] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + + The agent SHALL NOT add an "Option Overload" option to the packet or + use the "file" or "sname" fields for adding Relay Agent Information + option. It SHALL NOT parse or remove Relay Agent Information options + that may appear in the sname or file fields of a server-to-client + packet forwarded through the agent. + + The operation of relay agents for specific sub-options is specified + with that sub-option. + + Relay agents are NOT required to monitor or modify client-originated + DHCP packets addressed to a server unicast address. This includes + the DHCP-REQUEST sent when entering the RENEWING state. + + Relay agents MUST NOT modify DHCP packets that use the IPSEC + Authentication Header or IPSEC Encapsulating Security Payload [6]. + +2.1.1 Reforwarded DHCP requests + + A DHCP relay agent may receive a client DHCP packet forwarded from a + BOOTP/DHCP relay agent closer to the client. Such a packet will have + giaddr as non-zero, and may or may not already have a DHCP Relay + Agent option in it. + + Relay agents configured to add a Relay Agent option which receive a + client DHCP packet with a nonzero giaddr SHALL discard the packet if + the giaddr spoofs a giaddr address implemented by the local agent + itself. + + Otherwise, the relay agent SHALL forward any received DHCP packet + with a valid non-zero giaddr WITHOUT adding any relay agent options. + Per RFC 2131, it shall also NOT modify the giaddr value. + +2.2 Server Operation + + DHCP servers unaware of the Relay Agent Information option will + ignore the option upon receive and will not echo it back on + responses. This is the specified server behavior for unknown + options. + + DHCP servers claiming to support the Relay Agent Information option + SHALL echo the entire contents of the Relay Agent Information option + in all replies. Servers SHOULD copy the Relay Agent Information + option as the last DHCP option in the response. Servers SHALL NOT + place the echoed Relay Agent Information option in the overloaded + sname or file fields. If a server is unable to copy a full Relay + Agent Information field into a response, it SHALL send the response + without the Relay Information Field, and SHOULD increment an error + counter for the situation. + + + +Patrick Standards Track [Page 7] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + + The operation of DHCP servers for specific sub-options is specified + with that sub-option. + + Note that DHCP relay agents are not required to monitor unicast DHCP + messages sent directly between the client and server (i.e., those + that aren't sent via a relay agent). However, some relay agents MAY + chose to do such monitoring and add relay agent options. + Consequently, servers SHOULD be prepared to handle relay agent + options in unicast messages, but MUST NOT expect them to always be + there. + +3.0 Relay Agent Information Sub-options + +3.1 Agent Circuit ID Sub-option + + This sub-option MAY be added by DHCP relay agents which terminate + switched or permanent circuits. It encodes an agent-local identifier + of the circuit from which a DHCP client-to-server packet was + received. It is intended for use by agents in relaying DHCP + responses back to the proper circuit. Possible uses of this field + include: + + - Router interface number + - Switching Hub port number + - Remote Access Server port number + - Frame Relay DLCI + - ATM virtual circuit number + - Cable Data virtual circuit number + + Servers MAY use the Circuit ID for IP and other parameter assignment + policies. The Circuit ID SHOULD be considered an opaque value, with + policies based on exact string match only; that is, the Circuit ID + SHOULD NOT be internally parsed by the server. + + The DHCP server SHOULD report the Agent Circuit ID value of current + leases in statistical reports (including its MIB) and in logs. Since + the Circuit ID is local only to a particular relay agent, a circuit + ID should be qualified with the giaddr value that identifies the + relay agent. + + SubOpt Len Circuit ID + +------+------+------+------+------+------+------+------+-- + | 1 | n | c1 | c2 | c3 | c4 | c5 | c6 | ... + +------+------+------+------+------+------+------+------+-- + + + + + + + +Patrick Standards Track [Page 8] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +3.2 Agent Remote ID Sub-option + + This sub-option MAY be added by DHCP relay agents which terminate + switched or permanent circuits and have mechanisms to identify the + remote host end of the circuit. The Remote ID field may be used to + encode, for instance: + + -- a "caller ID" telephone number for dial-up connection + -- a "user name" prompted for by a Remote Access Server + -- a remote caller ATM address + -- a "modem ID" of a cable data modem + -- the remote IP address of a point-to-point link + -- a remote X.25 address for X.25 connections + + The remote ID MUST be globally unique. + + DHCP servers MAY use this option to select parameters specific to + particular users, hosts, or subscriber modems. The option SHOULD be + considered an opaque value, with policies based on exact string match + only; that is, the option SHOULD NOT be internally parsed by the + server. + + The relay agent MAY use this field in addition to or instead of the + Agent Circuit ID field to select the circuit on which to forward the + DHCP reply (e.g., Offer, Ack, or Nak). DHCP servers SHOULD report + this value in any reports or MIBs associated with a particular + client. + + SubOpt Len Agent Remote ID + +------+------+------+------+------+------+------+------+-- + | 2 | n | r1 | r2 | r3 | r4 | r5 | r6 | ... + +------+------+------+------+------+------+------+------+-- + +4.0 Issues Resolved + + The DHCP relay agent option resolves several issues in an environment + in which untrusted hosts access the internet via a circuit based + public network. This resolution assumes that all DHCP protocol + traffic by the public hosts traverse the DHCP relay agent and that + the IP network between the DHCP relay agent and the DHCP server is + uncompromised. + + Broadcast Forwarding + + The circuit access equipment forwards the normally broadcasted + DHCP response only on the circuit indicated in the Agent Circuit + ID. + + + + +Patrick Standards Track [Page 9] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + + DHCP Address Exhaustion + + In general, the DHCP server may be extended to maintain a database + with the "triplet" of + + (client IP address, client MAC address, client remote ID) + + The DHCP server SHOULD implement policies that restrict the number + of IP addresses to be assigned to a single remote ID. + + Static Assignment + + The DHCP server may use the remote ID to select the IP address to + be assigned. It may permit static assignment of IP addresses to + particular remote IDs, and disallow an address request from an + unauthorized remote ID. + + IP Spoofing + + The circuit access device may associate the IP address assigned by + a DHCP server in a forwarded DHCP Ack packet with the circuit to + which it was forwarded. The circuit access device MAY prevent + forwarding of IP packets with source IP addresses -other than- + those it has associated with the receiving circuit. This prevents + simple IP spoofing attacks on the Central LAN, and IP spoofing of + other hosts. + + Client Identifier Spoofing + + By using the agent-supplied Agent Remote ID option, the untrusted + and as-yet unstandardized client identifier field need not be used + by the DHCP server. + + MAC Address Spoofing + + By associating a MAC address with an Agent Remote ID, the DHCP + server can prevent offering an IP address to an attacker spoofing + the same MAC address on a different remote ID. + +5.0 Security Considerations + + DHCP as currently defined provides no authentication or security + mechanisms. Potential exposures to attack are discussed in section 7 + of the DHCP protocol specification in RFC 2131 [1]. + + This document introduces mechanisms to address several security + attacks on the operation of IP address assignment, including IP + spoofing, Client ID spoofing, MAC address spoofing, and DHCP server + + + +Patrick Standards Track [Page 10] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + + address exhaustion. It relies on an implied trusted relationship + between the DHCP Relay Agent and the DHCP server, with an assumed + untrusted DHCP client. It introduces a new identifer, the "Remote + ID", that is also assumed to be trusted. The Remote ID is provided + by the access network or modem and not by client premise equipment. + Cryptographic or other techniques to authenticate the remote ID are + certainly possible and encouraged, but are beyond the scope of this + document. + + This option is targeted towards environments in which the network + infrastructure -- the relay agent, the DHCP server, and the entire + network in which those two devices reside -- is trusted and secure. + As used in this document, the word "trusted" implies that + unauthorized DHCP traffic cannot enter the trusted network except + through secured and trusted relay agents and that all devices + internal to the network are secure and trusted. Potential deployers + of this option should give careful consideration to the potential + security vulnerabilities that are present in this model before + deploying this option in actual networks. + + Note that any future mechanisms for authenticating DHCP client to + server communications must take care to omit the DHCP Relay Agent + option from server authentication calculations. This was the + principal reason for organizing the DHCP Relay Agent Option as a + single option with sub-options, and for requiring the relay agent to + remove the option before forwarding to the client. + + While it is beyond the scope of this document to specify the general + forwarding algorithm of public data circuit access units, note that + automatic reforwarding of IP or ARP broadcast packets back downstream + exposes serious IP security risks. For example, if an upstream + broadcast DHCP-DISCOVER or DHCP-REQUEST were re-broadcast back + downstream, any public host may easily spoof the desired DHCP server. + +6.0 IANA Considerations + + IANA is required to maintain a new number space of "DHCP Relay Agent + Sub-options", located in the BOOTP-DHCP Parameters Registry. The + initial sub-options are described in section 2.0 of this document. + + IANA assigns future DHCP Relay Agent Sub-options with a "IETF + Consensus" policy as described in RFC 2434 [3]. Future proposed + sub-options are to be referenced symbolically in the Internet-Drafts + that describe them, and shall be assigned numeric codes by IANA when + approved for publication as an RFC. + + + + + + +Patrick Standards Track [Page 11] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +7.0 Intellectual Property Notices + + This section contains two notices as required by [5] for standards + track documents. + + The IETF takes no position regarding the validity or scope of any + intellectual property or other rights that might be claimed to + pertain to the implementation or use of the technology described in + this document or the extent to which any license under such rights + might or might not be available; neither does it represent that it + has made any effort to identify any such rights. Information on the + IETF's procedures with respect to rights in standards-track and + standards-related documentation can be found in BCP-11. Copies of + claims of rights made available for publication and any assurances of + licenses to be made available, or the result of an attempt made to + obtain a general license or permission for the use of such + proprietary rights by implementors or users of this specification can + be obtained from the IETF Secretariat. + + The IETF has been notified of intellectual property rights claimed in + regard to some or all of the specification contained in this + document. For more information consult the online list of claimed + rights. + +8.0 References + + [1] Droms, R., "Dynamic Host Configuration Protocol", RFC 2131, + March 1997. + + [2] Alexander, S. and R. Droms, "DHCP Options and BOOTP Vendor + Extension", RFC 2132, March 1997. + + [3] Narten, T. and H. Alvestrand, "Guidelines for Writing an IANA + Considerations Section in RFCs", BCP 26, RFC 2434, October 1998. + + [4] Bradner, S., "Key words for use in RFCs to Indicate Requirement + Levels", BCP 14, RFC 2119, March 1997. + + [5] Bradner, S., "The Internet Standards Process -- Revision 3", BCP + 9, RFC 2026, October 1996. + + [6] Kent, S. and R. Atkinson, "Security Architecture for the + Internet Protocol", RFC 2401, November 1998. + + + + + + + + +Patrick Standards Track [Page 12] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +9.0 Glossary + + DSLAM Digital Subscriber Link Access Multiplexer + IANA Internet Assigned Numbers Authority + LIS Logical IP Subnet + MAC Message Authentication Code + RAS Remote Access Server + +10.0 Author's Address + + Michael Patrick + Motorola Broadband Communications Sector + 20 Cabot Blvd., MS M4-30 + Mansfield, MA 02048 + + Phone: (508) 261-5707 + EMail: michael.patrick@motorola.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Patrick Standards Track [Page 13] + +RFC 3046 DHCP Relay Agent Information Option January 2001 + + +11.0 Full Copyright Statement + + Copyright (C) The Internet Society (2001). All Rights Reserved. + + This document and translations of it may be copied and furnished to + others, and derivative works that comment on or otherwise explain it + or assist in its implementation may be prepared, copied, published + and distributed, in whole or in part, without restriction of any + kind, provided that the above copyright notice and this paragraph are + included on all such copies and derivative works. However, this + document itself may not be modified in any way, such as by removing + the copyright notice or references to the Internet Society or other + Internet organizations, except as needed for the purpose of + developing Internet standards in which case the procedures for + copyrights defined in the Internet Standards process must be + followed, or as required to translate it into languages other than + English. + + The limited permissions granted above are perpetual and will not be + revoked by the Internet Society or its successors or assigns. + + This document and the information contained herein is provided on an + "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING + TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING + BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION + HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + +Acknowledgement + + Funding for the RFC Editor function is currently provided by the + Internet Society. + + + + + + + + + + + + + + + + + + + +Patrick Standards Track [Page 14] + |