diff options
Diffstat (limited to 'src/conf_mode')
-rwxr-xr-x | src/conf_mode/container.py | 21 | ||||
-rwxr-xr-x | src/conf_mode/interfaces_openvpn.py | 8 | ||||
-rwxr-xr-x | src/conf_mode/interfaces_tunnel.py | 19 | ||||
-rwxr-xr-x | src/conf_mode/load-balancing_reverse-proxy.py | 86 | ||||
-rwxr-xr-x | src/conf_mode/nat.py | 18 | ||||
-rwxr-xr-x | src/conf_mode/nat64.py | 10 | ||||
-rwxr-xr-x | src/conf_mode/nat66.py | 22 | ||||
-rwxr-xr-x | src/conf_mode/nat_cgnat.py | 125 | ||||
-rwxr-xr-x | src/conf_mode/protocols_bfd.py | 2 | ||||
-rwxr-xr-x | src/conf_mode/qos.py | 77 | ||||
-rwxr-xr-x | src/conf_mode/service_dhcpv6-server.py | 8 | ||||
-rwxr-xr-x | src/conf_mode/service_dns_forwarding.py | 15 | ||||
-rwxr-xr-x | src/conf_mode/service_suricata.py | 161 | ||||
-rwxr-xr-x | src/conf_mode/service_upnp.py | 157 |
14 files changed, 460 insertions, 269 deletions
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py index a73a18ffa..3efeb9b40 100755 --- a/src/conf_mode/container.py +++ b/src/conf_mode/container.py @@ -16,6 +16,7 @@ import os +from decimal import Decimal from hashlib import sha256 from ipaddress import ip_address from ipaddress import ip_network @@ -28,6 +29,7 @@ from vyos.configdict import node_changed from vyos.configdict import is_node_changed from vyos.configverify import verify_vrf from vyos.ifconfig import Interface +from vyos.cpu import get_core_count from vyos.utils.file import write_file from vyos.utils.process import call from vyos.utils.process import cmd @@ -127,6 +129,11 @@ def verify(container): f'locally. Please use "add container image {image}" to add it '\ f'to the system! Container "{name}" will not be started!') + if 'cpu_quota' in container_config: + cores = get_core_count() + if Decimal(container_config['cpu_quota']) > cores: + raise ConfigError(f'Cannot set limit to more cores than available "{name}"!') + if 'network' in container_config: if len(container_config['network']) > 1: raise ConfigError(f'Only one network can be specified for container "{name}"!') @@ -257,6 +264,7 @@ def verify(container): def generate_run_arguments(name, container_config): image = container_config['image'] + cpu_quota = container_config['cpu_quota'] memory = container_config['memory'] shared_memory = container_config['shared_memory'] restart = container_config['restart'] @@ -329,9 +337,13 @@ def generate_run_arguments(name, container_config): prop = vol_config['propagation'] volume += f' --volume {svol}:{dvol}:{mode},{prop}' - container_base_cmd = f'--detach --interactive --tty --replace {capabilities} ' \ + host_pid = '' + if 'allow_host_pid' in container_config: + host_pid = '--pid host' + + container_base_cmd = f'--detach --interactive --tty --replace {capabilities} --cpus {cpu_quota} ' \ f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \ - f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid}' + f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid} {host_pid}' entrypoint = '' if 'entrypoint' in container_config: @@ -339,11 +351,6 @@ def generate_run_arguments(name, container_config): entrypoint = json_write(container_config['entrypoint'].split()).replace('"', """) entrypoint = f'--entrypoint '{entrypoint}'' - hostname = '' - if 'host_name' in container_config: - hostname = container_config['host_name'] - hostname = f'--hostname {hostname}' - command = '' if 'command' in container_config: command = container_config['command'].strip() diff --git a/src/conf_mode/interfaces_openvpn.py b/src/conf_mode/interfaces_openvpn.py index 0ecffd3be..627cc90ba 100755 --- a/src/conf_mode/interfaces_openvpn.py +++ b/src/conf_mode/interfaces_openvpn.py @@ -168,6 +168,14 @@ def verify_pki(openvpn): 'verification, consult the documentation for details.') if tls: + if mode == 'site-to-site': + # XXX: site-to-site with PSKs is the only mode that can work without TLS, + # so 'tls role' is not mandatory for it, + # but we need to check that if it uses peer certificate fingerprints rather than PSKs, + # then the TLS role is set + if ('shared_secret_key' not in tls) and ('role' not in tls): + raise ConfigError('"tls role" is required for site-to-site OpenVPN with TLS') + if (mode in ['server', 'client']) and ('ca_certificate' not in tls): raise ConfigError(f'Must specify "tls ca-certificate" on openvpn interface {interface},\ it is required in server and client modes') diff --git a/src/conf_mode/interfaces_tunnel.py b/src/conf_mode/interfaces_tunnel.py index 43ba72857..98ef98d12 100755 --- a/src/conf_mode/interfaces_tunnel.py +++ b/src/conf_mode/interfaces_tunnel.py @@ -145,11 +145,20 @@ def verify(tunnel): # If no IP GRE key is defined we can not have more then one GRE tunnel # bound to any one interface/IP address and the same remote. This will # result in a OS PermissionError: add tunnel "gre0" failed: File exists - if (their_address == our_address or our_source_if == their_source_if) and \ - our_remote == their_remote: - raise ConfigError(f'Missing required "ip key" parameter when '\ - 'running more then one GRE based tunnel on the '\ - 'same source-interface/source-address') + if our_remote == their_remote: + if our_address is not None and their_address == our_address: + # If set to the same values, this is always a fail + raise ConfigError(f'Missing required "ip key" parameter when '\ + 'running more then one GRE based tunnel on the '\ + 'same source-address') + + if their_source_if == our_source_if and their_address == our_address: + # Note that lack of None check on these is deliberate. + # source-if and source-ip matching while unset (all None) is a fail + # source-ifs set and matching with unset source-ips is a fail + raise ConfigError(f'Missing required "ip key" parameter when '\ + 'running more then one GRE based tunnel on the '\ + 'same source-interface') # Keys are not allowed with ipip and sit tunnels if tunnel['encapsulation'] in ['ipip', 'sit']: diff --git a/src/conf_mode/load-balancing_reverse-proxy.py b/src/conf_mode/load-balancing_reverse-proxy.py index 1569d8d71..09c68dadd 100755 --- a/src/conf_mode/load-balancing_reverse-proxy.py +++ b/src/conf_mode/load-balancing_reverse-proxy.py @@ -26,9 +26,13 @@ from vyos.utils.dict import dict_search from vyos.utils.process import call from vyos.utils.network import check_port_availability from vyos.utils.network import is_listen_port_bind_service -from vyos.pki import wrap_certificate -from vyos.pki import wrap_private_key +from vyos.pki import find_chain +from vyos.pki import load_certificate +from vyos.pki import load_private_key +from vyos.pki import encode_certificate +from vyos.pki import encode_private_key from vyos.template import render +from vyos.utils.file import write_file from vyos import ConfigError from vyos import airbag airbag.enable() @@ -75,12 +79,21 @@ def verify(lb): raise ConfigError(f'"TCP" port "{tmp_port}" is used by another service') for back, back_config in lb['backend'].items(): - if 'http-check' in back_config: - http_check = back_config['http-check'] + if 'http_check' in back_config: + http_check = back_config['http_check'] if 'expect' in http_check and 'status' in http_check['expect'] and 'string' in http_check['expect']: raise ConfigError(f'"expect status" and "expect string" can not be configured together!') + + if 'health_check' in back_config: + if 'mode' not in back_config or back_config['mode'] != 'tcp': + raise ConfigError(f'backend "{back}" can only be configured with {back_config["health_check"]} ' + + f'health-check whilst in TCP mode!') + if 'http_check' in back_config: + raise ConfigError(f'backend "{back}" cannot be configured with both http-check and health-check!') + if 'server' not in back_config: raise ConfigError(f'"{back} server" must be configured!') + for bk_server, bk_server_conf in back_config['server'].items(): if 'address' not in bk_server_conf or 'port' not in bk_server_conf: raise ConfigError(f'"backend {back} server {bk_server} address and port" must be configured!') @@ -92,12 +105,18 @@ def verify(lb): if {'no_verify', 'ca_certificate'} <= set(back_config['ssl']): raise ConfigError(f'backend {back} cannot have both ssl options no-verify and ca-certificate set!') + # Check if http-response-headers are configured in any frontend/backend where mode != http + for group in ['service', 'backend']: + for config_name, config in lb[group].items(): + if 'http_response_headers' in config and ('mode' not in config or config['mode'] != 'http'): + raise ConfigError(f'{group} {config_name} must be set to http mode to use http_response_headers!') + for front, front_config in lb['service'].items(): for cert in dict_search('ssl.certificate', front_config) or []: verify_pki_certificate(lb, cert) for back, back_config in lb['backend'].items(): - tmp = dict_search('ssl.ca_certificate', front_config) + tmp = dict_search('ssl.ca_certificate', back_config) if tmp: verify_pki_ca_certificate(lb, tmp) @@ -118,51 +137,54 @@ def generate(lb): if not os.path.isdir(load_balancing_dir): os.mkdir(load_balancing_dir) + loaded_ca_certs = {load_certificate(c['certificate']) + for c in lb['pki']['ca'].values()} if 'ca' in lb['pki'] else {} + # SSL Certificates for frontend for front, front_config in lb['service'].items(): - if 'ssl' in front_config: - - if 'certificate' in front_config['ssl']: - cert_names = front_config['ssl']['certificate'] + if 'ssl' not in front_config: + continue - for cert_name in cert_names: - pki_cert = lb['pki']['certificate'][cert_name] - cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem') - cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key') + if 'certificate' in front_config['ssl']: + cert_names = front_config['ssl']['certificate'] - with open(cert_file_path, 'w') as f: - f.write(wrap_certificate(pki_cert['certificate'])) + for cert_name in cert_names: + pki_cert = lb['pki']['certificate'][cert_name] + cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem') + cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key') - if 'private' in pki_cert and 'key' in pki_cert['private']: - with open(cert_key_path, 'w') as f: - f.write(wrap_private_key(pki_cert['private']['key'])) + loaded_pki_cert = load_certificate(pki_cert['certificate']) + cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs) - if 'ca_certificate' in front_config['ssl']: - ca_name = front_config['ssl']['ca_certificate'] - pki_ca_cert = lb['pki']['ca'][ca_name] - ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem') + write_file(cert_file_path, + '\n'.join(encode_certificate(c) for c in cert_full_chain)) - with open(ca_cert_file_path, 'w') as f: - f.write(wrap_certificate(pki_ca_cert['certificate'])) + if 'private' in pki_cert and 'key' in pki_cert['private']: + loaded_key = load_private_key(pki_cert['private']['key'], passphrase=None, wrap_tags=True) + key_pem = encode_private_key(loaded_key, passphrase=None) + write_file(cert_key_path, key_pem) # SSL Certificates for backend for back, back_config in lb['backend'].items(): - if 'ssl' in back_config: + if 'ssl' not in back_config: + continue - if 'ca_certificate' in back_config['ssl']: - ca_name = back_config['ssl']['ca_certificate'] - pki_ca_cert = lb['pki']['ca'][ca_name] - ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem') + if 'ca_certificate' in back_config['ssl']: + ca_name = back_config['ssl']['ca_certificate'] + ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem') + ca_chains = [] - with open(ca_cert_file_path, 'w') as f: - f.write(wrap_certificate(pki_ca_cert['certificate'])) + pki_ca_cert = lb['pki']['ca'][ca_name] + loaded_ca_cert = load_certificate(pki_ca_cert['certificate']) + ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs) + ca_chains.append('\n'.join(encode_certificate(c) for c in ca_full_chain)) + write_file(ca_cert_file_path, '\n'.join(ca_chains)) render(load_balancing_conf_file, 'load-balancing/haproxy.cfg.j2', lb) render(systemd_override, 'load-balancing/override_haproxy.conf.j2', lb) return None - def apply(lb): call('systemctl daemon-reload') if not lb: diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py index 4cd9b570d..f74bb217e 100755 --- a/src/conf_mode/nat.py +++ b/src/conf_mode/nat.py @@ -17,7 +17,6 @@ import os from sys import exit -from netifaces import interfaces from vyos.base import Warning from vyos.config import Config @@ -30,6 +29,7 @@ from vyos.utils.dict import dict_search_args from vyos.utils.process import cmd from vyos.utils.process import run from vyos.utils.network import is_addr_assigned +from vyos.utils.network import interface_exists from vyos import ConfigError from vyos import airbag @@ -149,8 +149,12 @@ def verify(nat): if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']: raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"') elif 'name' in config['outbound_interface']: - if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces(): - Warning(f'NAT interface "{config["outbound_interface"]["name"]}" for source NAT rule "{rule}" does not exist!') + interface_name = config['outbound_interface']['name'] + if interface_name not in 'any': + if interface_name.startswith('!'): + interface_name = interface_name[1:] + if not interface_exists(interface_name): + Warning(f'Interface "{interface_name}" for source NAT rule "{rule}" does not exist!') else: group_name = config['outbound_interface']['group'] if group_name[0] == '!': @@ -182,8 +186,12 @@ def verify(nat): if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']: raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"') elif 'name' in config['inbound_interface']: - if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces(): - Warning(f'NAT interface "{config["inbound_interface"]["name"]}" for destination NAT rule "{rule}" does not exist!') + interface_name = config['inbound_interface']['name'] + if interface_name not in 'any': + if interface_name.startswith('!'): + interface_name = interface_name[1:] + if not interface_exists(interface_name): + Warning(f'Interface "{interface_name}" for destination NAT rule "{rule}" does not exist!') else: group_name = config['inbound_interface']['group'] if group_name[0] == '!': diff --git a/src/conf_mode/nat64.py b/src/conf_mode/nat64.py index c1e7ebf85..32a1c47d1 100755 --- a/src/conf_mode/nat64.py +++ b/src/conf_mode/nat64.py @@ -20,7 +20,7 @@ import csv import os import re -from ipaddress import IPv6Network +from ipaddress import IPv6Network, IPv6Address from json import dumps as json_write from vyos import ConfigError @@ -103,8 +103,14 @@ def verify(nat64) -> None: # Verify that source.prefix is set and is a /96 if not dict_search("source.prefix", instance): raise ConfigError(f"Source NAT64 rule {rule} missing source prefix") - if IPv6Network(instance["source"]["prefix"]).prefixlen != 96: + src_prefix = IPv6Network(instance["source"]["prefix"]) + if src_prefix.prefixlen != 96: raise ConfigError(f"Source NAT64 rule {rule} source prefix must be /96") + if (int(src_prefix[0]) & int(IPv6Address('0:0:0:0:ff00::'))) != 0: + raise ConfigError( + f'Source NAT64 rule {rule} source prefix is not RFC6052-compliant: ' + 'bits 64 to 71 (9th octet) must be zeroed' + ) pools = dict_search("translation.pool", instance) if pools: diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py index fe017527d..075738dad 100755 --- a/src/conf_mode/nat66.py +++ b/src/conf_mode/nat66.py @@ -17,15 +17,15 @@ import os from sys import exit -from netifaces import interfaces from vyos.base import Warning from vyos.config import Config from vyos.configdep import set_dependents, call_dependents from vyos.template import render -from vyos.utils.process import cmd -from vyos.utils.kernel import check_kmod from vyos.utils.dict import dict_search +from vyos.utils.kernel import check_kmod +from vyos.utils.network import interface_exists +from vyos.utils.process import cmd from vyos.template import is_ipv6 from vyos import ConfigError from vyos import airbag @@ -64,8 +64,12 @@ def verify(nat): if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']: raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"') elif 'name' in config['outbound_interface']: - if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces(): - Warning(f'NAT66 interface "{config["outbound_interface"]["name"]}" for source NAT66 rule "{rule}" does not exist!') + interface_name = config['outbound_interface']['name'] + if interface_name not in 'any': + if interface_name.startswith('!'): + interface_name = interface_name[1:] + if not interface_exists(interface_name): + Warning(f'Interface "{interface_name}" for source NAT66 rule "{rule}" does not exist!') addr = dict_search('translation.address', config) if addr != None: @@ -88,8 +92,12 @@ def verify(nat): if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']: raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"') elif 'name' in config['inbound_interface']: - if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces(): - Warning(f'NAT66 interface "{config["inbound_interface"]["name"]}" for destination NAT66 rule "{rule}" does not exist!') + interface_name = config['inbound_interface']['name'] + if interface_name not in 'any': + if interface_name.startswith('!'): + interface_name = interface_name[1:] + if not interface_exists(interface_name): + Warning(f'Interface "{interface_name}" for destination NAT66 rule "{rule}" does not exist!') return None diff --git a/src/conf_mode/nat_cgnat.py b/src/conf_mode/nat_cgnat.py index f41d66c66..957b12c28 100755 --- a/src/conf_mode/nat_cgnat.py +++ b/src/conf_mode/nat_cgnat.py @@ -189,11 +189,6 @@ def verify(config): if 'rule' not in config: raise ConfigError(f'Rule must be defined!') - # As PoC allow only one rule for CGNAT translations - # one internal pool and one external pool - if len(config['rule']) > 1: - raise ConfigError(f'Only one rule is allowed for translations!') - for pool in ('external', 'internal'): if pool not in config['pool']: raise ConfigError(f'{pool} pool must be defined!') @@ -203,6 +198,13 @@ def verify(config): f'Range for "{pool} pool {pool_name}" must be defined!' ) + external_pools_query = "keys(pool.external)" + external_pools: list = jmespath.search(external_pools_query, config) + internal_pools_query = "keys(pool.internal)" + internal_pools: list = jmespath.search(internal_pools_query, config) + + used_external_pools = {} + used_internal_pools = {} for rule, rule_config in config['rule'].items(): if 'source' not in rule_config: raise ConfigError(f'Rule "{rule}" source pool must be defined!') @@ -212,49 +214,86 @@ def verify(config): if 'translation' not in rule_config: raise ConfigError(f'Rule "{rule}" translation pool must be defined!') + # Check if pool exists + internal_pool = rule_config['source']['pool'] + if internal_pool not in internal_pools: + raise ConfigError(f'Internal pool "{internal_pool}" does not exist!') + external_pool = rule_config['translation']['pool'] + if external_pool not in external_pools: + raise ConfigError(f'External pool "{external_pool}" does not exist!') + + # Check pool duplication in different rules + if external_pool in used_external_pools: + raise ConfigError( + f'External pool "{external_pool}" is already used in rule ' + f'{used_external_pools[external_pool]} and cannot be used in ' + f'rule {rule}!' + ) + + if internal_pool in used_internal_pools: + raise ConfigError( + f'Internal pool "{internal_pool}" is already used in rule ' + f'{used_internal_pools[internal_pool]} and cannot be used in ' + f'rule {rule}!' + ) + + used_external_pools[external_pool] = rule + used_internal_pools[internal_pool] = rule + def generate(config): if not config: return None - # first external pool as we allow only one as PoC - ext_pool_name = jmespath.search("rule.*.translation | [0]", config).get('pool') - int_pool_name = jmespath.search("rule.*.source | [0]", config).get('pool') - ext_query = f"pool.external.{ext_pool_name}.range | keys(@)" - int_query = f"pool.internal.{int_pool_name}.range" - external_ranges = jmespath.search(ext_query, config) - internal_ranges = [jmespath.search(int_query, config)] - - external_list_count = [] - external_list_hosts = [] - internal_list_count = [] - internal_list_hosts = [] - for ext_range in external_ranges: - # External hosts count - e_count = IPOperations(ext_range).get_ips_count() - external_list_count.append(e_count) - # External hosts list - e_hosts = IPOperations(ext_range).convert_prefix_to_list_ips() - external_list_hosts.extend(e_hosts) - for int_range in internal_ranges: - # Internal hosts count - i_count = IPOperations(int_range).get_ips_count() - internal_list_count.append(i_count) - # Internal hosts list - i_hosts = IPOperations(int_range).convert_prefix_to_list_ips() - internal_list_hosts.extend(i_hosts) - - external_host_count = sum(external_list_count) - internal_host_count = sum(internal_list_count) - ports_per_user = int( - jmespath.search(f'pool.external.{ext_pool_name}.per_user_limit.port', config) - ) - external_port_range: str = jmespath.search( - f'pool.external.{ext_pool_name}.external_port_range', config - ) - proto_maps, other_maps = generate_port_rules( - external_list_hosts, internal_list_hosts, ports_per_user, external_port_range - ) + proto_maps = [] + other_maps = [] + + for rule, rule_config in config['rule'].items(): + ext_pool_name: str = rule_config['translation']['pool'] + int_pool_name: str = rule_config['source']['pool'] + + # Sort the external ranges by sequence + external_ranges: list = sorted( + config['pool']['external'][ext_pool_name]['range'], + key=lambda r: int(config['pool']['external'][ext_pool_name]['range'][r].get('seq', 999999)) + ) + internal_ranges: list = [range for range in config['pool']['internal'][int_pool_name]['range']] + external_list_hosts_count = [] + external_list_hosts = [] + internal_list_hosts_count = [] + internal_list_hosts = [] + + for ext_range in external_ranges: + # External hosts count + e_count = IPOperations(ext_range).get_ips_count() + external_list_hosts_count.append(e_count) + # External hosts list + e_hosts = IPOperations(ext_range).convert_prefix_to_list_ips() + external_list_hosts.extend(e_hosts) + + for int_range in internal_ranges: + # Internal hosts count + i_count = IPOperations(int_range).get_ips_count() + internal_list_hosts_count.append(i_count) + # Internal hosts list + i_hosts = IPOperations(int_range).convert_prefix_to_list_ips() + internal_list_hosts.extend(i_hosts) + + external_host_count = sum(external_list_hosts_count) + internal_host_count = sum(internal_list_hosts_count) + ports_per_user = int( + jmespath.search(f'pool.external."{ext_pool_name}".per_user_limit.port', config) + ) + external_port_range: str = jmespath.search( + f'pool.external."{ext_pool_name}".external_port_range', config + ) + + rule_proto_maps, rule_other_maps = generate_port_rules( + external_list_hosts, internal_list_hosts, ports_per_user, external_port_range + ) + + proto_maps.extend(rule_proto_maps) + other_maps.extend(rule_other_maps) config['proto_map_elements'] = ', '.join(proto_maps) config['other_map_elements'] = ', '.join(other_maps) diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index 1c01a9013..1361bb1a9 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -49,7 +49,7 @@ def verify(bfd): for peer, peer_config in bfd['peer'].items(): # IPv6 link local peers require an explicit local address/interface if is_ipv6_link_local(peer): - if 'source' not in peer_config or len(peer_config['source'] < 2): + if 'source' not in peer_config or len(peer_config['source']) < 2: raise ConfigError('BFD IPv6 link-local peers require explicit local address and interface setting') # IPv6 peers require an explicit local address diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py index 8a590cbc6..45248fb4a 100755 --- a/src/conf_mode/qos.py +++ b/src/conf_mode/qos.py @@ -17,6 +17,7 @@ from sys import exit from netifaces import interfaces +from vyos.base import Warning from vyos.config import Config from vyos.configdep import set_dependents from vyos.configdep import call_dependents @@ -89,6 +90,36 @@ def _clean_conf_dict(conf): return conf +def _get_group_filters(config: dict, group_name: str, visited=None) -> dict: + filters = dict() + if not visited: + visited = [group_name, ] + else: + if group_name in visited: + return filters + visited.append(group_name) + + for filter, filter_config in config.get(group_name, {}).items(): + if filter == 'match': + for match, match_config in filter_config.items(): + filters[f'{group_name}-{match}'] = match_config + elif filter == 'match_group': + for group in filter_config: + filters.update(_get_group_filters(config, group, visited)) + + return filters + + +def _get_group_match(config:dict, group_name:str) -> dict: + match = dict() + for key, val in _get_group_filters(config, group_name).items(): + # delete duplicate matches + if val not in match.values(): + match[key] = val + + return match + + def get_config(config=None): if config: conf = config @@ -135,11 +166,27 @@ def get_config(config=None): qos = conf.merge_defaults(qos, recursive=True) + if 'traffic_match_group' in qos: + for group, group_config in qos['traffic_match_group'].items(): + if 'match_group' in group_config: + qos['traffic_match_group'][group]['match'] = _get_group_match(qos['traffic_match_group'], group) + for policy in qos.get('policy', []): for p_name, p_config in qos['policy'][policy].items(): # cleanup empty match config if 'class' in p_config: for cls, cls_config in p_config['class'].items(): + if 'match_group' in cls_config: + # merge group match to match + for group in cls_config['match_group']: + for match, match_conf in qos['traffic_match_group'].get(group, {'match': {}})['match'].items(): + if 'match' not in cls_config: + cls_config['match'] = dict() + if match in cls_config['match']: + cls_config['match'][f'{group}-{match}'] = match_conf + else: + cls_config['match'][match] = match_conf + if 'match' in cls_config: cls_config['match'] = _clean_conf_dict(cls_config['match']) if cls_config['match'] == {}: @@ -147,6 +194,22 @@ def get_config(config=None): return qos + +def _verify_match(cls_config: dict) -> None: + if 'match' in cls_config: + for match, match_config in cls_config['match'].items(): + if {'ip', 'ipv6'} <= set(match_config): + raise ConfigError( + f'Can not use both IPv6 and IPv4 in one match ({match})!') + + +def _verify_match_group_exist(cls_config, qos): + if 'match_group' in cls_config: + for group in cls_config['match_group']: + if 'traffic_match_group' not in qos or group not in qos['traffic_match_group']: + Warning(f'Match group "{group}" does not exist!') + + def verify(qos): if not qos or 'interface' not in qos: return None @@ -174,11 +237,8 @@ def verify(qos): # bandwidth is not mandatory for priority-queue - that is why this is on the exception list if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']: raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!') - if 'match' in cls_config: - for match, match_config in cls_config['match'].items(): - if {'ip', 'ipv6'} <= set(match_config): - raise ConfigError(f'Can not use both IPv6 and IPv4 in one match ({match})!') - + _verify_match(cls_config) + _verify_match_group_exist(cls_config, qos) if policy_type in ['random_detect']: if 'precedence' in policy_config: for precedence, precedence_config in policy_config['precedence'].items(): @@ -216,8 +276,14 @@ def verify(qos): if direction not in tmp: raise ConfigError(f'Selected QoS policy on interface "{interface}" only supports "{tmp}"!') + if 'traffic_match_group' in qos: + for group, group_config in qos['traffic_match_group'].items(): + _verify_match(group_config) + _verify_match_group_exist(group_config, qos) + return None + def generate(qos): if not qos or 'interface' not in qos: return None @@ -254,6 +320,7 @@ def apply(qos): return None + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/service_dhcpv6-server.py b/src/conf_mode/service_dhcpv6-server.py index c7333dd3a..7af88007c 100755 --- a/src/conf_mode/service_dhcpv6-server.py +++ b/src/conf_mode/service_dhcpv6-server.py @@ -106,14 +106,14 @@ def verify(dhcpv6): # Stop address must be greater or equal to start address if not ip_address(stop) >= ip_address(start): - raise ConfigError(f'Range stop address "{stop}" must be greater then or equal ' \ + raise ConfigError(f'Range stop address "{stop}" must be greater than or equal ' \ f'to the range start address "{start}"!') # DHCPv6 range start address must be unique - two ranges can't # start with the same address - makes no sense if start in range6_start: raise ConfigError(f'Conflicting DHCPv6 lease range: '\ - f'Pool start address "{start}" defined multipe times!') + f'Pool start address "{start}" defined multiple times!') range6_start.append(start) @@ -121,7 +121,7 @@ def verify(dhcpv6): # end with the same address - makes no sense if stop in range6_stop: raise ConfigError(f'Conflicting DHCPv6 lease range: '\ - f'Pool stop address "{stop}" defined multipe times!') + f'Pool stop address "{stop}" defined multiple times!') range6_stop.append(stop) @@ -180,7 +180,7 @@ def verify(dhcpv6): if 'option' in subnet_config: if 'vendor_option' in subnet_config['option']: if len(dict_search('option.vendor_option.cisco.tftp_server', subnet_config)) > 2: - raise ConfigError(f'No more then two Cisco tftp-servers should be defined for subnet "{subnet}"!') + raise ConfigError(f'No more than two Cisco tftp-servers should be defined for subnet "{subnet}"!') # Subnets must be unique if subnet in subnets: diff --git a/src/conf_mode/service_dns_forwarding.py b/src/conf_mode/service_dns_forwarding.py index 7e863073a..70686534f 100755 --- a/src/conf_mode/service_dns_forwarding.py +++ b/src/conf_mode/service_dns_forwarding.py @@ -102,7 +102,7 @@ def get_config(config=None): 'ttl': rdata['ttl'], 'value': address }) - elif rtype in ['cname', 'ptr', 'ns']: + elif rtype in ['cname', 'ptr']: if not 'target' in rdata: dns['authoritative_zone_errors'].append(f'{subnode}.{node}: target is required') continue @@ -113,6 +113,19 @@ def get_config(config=None): 'ttl': rdata['ttl'], 'value': '{}.'.format(rdata['target']) }) + elif rtype == 'ns': + if not 'target' in rdata: + dns['authoritative_zone_errors'].append(f'{subnode}.{node}: at least one target is required') + continue + + for target in rdata['target']: + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': f'{target}.' + }) + elif rtype == 'mx': if not 'server' in rdata: dns['authoritative_zone_errors'].append(f'{subnode}.{node}: at least one server is required') diff --git a/src/conf_mode/service_suricata.py b/src/conf_mode/service_suricata.py new file mode 100755 index 000000000..69b369e0b --- /dev/null +++ b/src/conf_mode/service_suricata.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from sys import exit + +from vyos.base import Warning +from vyos.config import Config +from vyos.template import render +from vyos.utils.process import call +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +config_file = '/run/suricata/suricata.yaml' +rotate_file = '/etc/logrotate.d/suricata' + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['service', 'suricata'] + + if not conf.exists(base): + return None + + suricata = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, with_recursive_defaults=True) + + return suricata + +# https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search +def topological_sort(source): + sorted_nodes = [] + permanent_marks = set() + temporary_marks = set() + + def visit(n, v): + if n in permanent_marks: + return + if n in temporary_marks: + raise ConfigError('At least one cycle exists in the referenced groups') + + temporary_marks.add(n) + + for m in v.get('group', []): + m = m.lstrip('!') + if m not in source: + raise ConfigError(f'Undefined referenced group "{m}"') + visit(m, source[m]) + + temporary_marks.remove(n) + permanent_marks.add(n) + sorted_nodes.append((n, v)) + + while len(permanent_marks) < len(source): + n = next(n for n in source.keys() if n not in permanent_marks) + visit(n, source[n]) + + return sorted_nodes + +def verify(suricata): + if not suricata: + return None + + if 'interface' not in suricata: + raise ConfigError('No interfaces configured!') + + if 'address_group' not in suricata: + raise ConfigError('No address-group configured!') + + if 'port_group' not in suricata: + raise ConfigError('No port-group configured!') + + try: + topological_sort(suricata['address_group']) + except (ConfigError,StopIteration) as e: + raise ConfigError(f'Invalid address-group: {e}') + + try: + topological_sort(suricata['port_group']) + except (ConfigError,StopIteration) as e: + raise ConfigError(f'Invalid port-group: {e}') + +def generate(suricata): + if not suricata: + for file in [config_file, rotate_file]: + if os.path.isfile(file): + os.unlink(file) + + return None + + # Config-related formatters + def to_var(s:str): + return s.replace('-','_').upper() + + def to_val(s:str): + return s.replace('-',':') + + def to_ref(s:str): + if s[0] == '!': + return '!$' + to_var(s[1:]) + return '$' + to_var(s) + + def to_config(kind:str): + def format_group(group): + (name, value) = group + property = [to_val(property) for property in value.get(kind,[])] + group = [to_ref(group) for group in value.get('group',[])] + return (to_var(name), property + group) + return format_group + + # Format the address group + suricata['address_group'] = map(to_config('address'), + topological_sort(suricata['address_group'])) + + # Format the port group + suricata['port_group'] = map(to_config('port'), + topological_sort(suricata['port_group'])) + + render(config_file, 'ids/suricata.j2', {'suricata': suricata}) + render(rotate_file, 'ids/suricata_logrotate.j2', suricata) + return None + +def apply(suricata): + systemd_service = 'suricata.service' + if not suricata or 'interface' not in suricata: + # Stop suricata service if removed + call(f'systemctl stop {systemd_service}') + else: + Warning('To fetch the latest rules, use "update suricata"; ' + 'To periodically fetch the latest rules, ' + 'use the task scheduler!') + call(f'systemctl restart {systemd_service}') + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/service_upnp.py b/src/conf_mode/service_upnp.py deleted file mode 100755 index 0df8dc09e..000000000 --- a/src/conf_mode/service_upnp.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2021-2022 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import os - -from sys import exit -import uuid -import netifaces -from ipaddress import IPv4Network -from ipaddress import IPv6Network - -from vyos.config import Config -from vyos.utils.process import call -from vyos.template import render -from vyos.template import is_ipv4 -from vyos.template import is_ipv6 -from vyos import ConfigError -from vyos import airbag -airbag.enable() - -config_file = r'/run/upnp/miniupnp.conf' - -def get_config(config=None): - if config: - conf = config - else: - conf = Config() - - base = ['service', 'upnp'] - upnpd = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - - if not upnpd: - return None - - upnpd = conf.merge_defaults(upnpd, recursive=True) - - uuidgen = uuid.uuid1() - upnpd.update({'uuid': uuidgen}) - - return upnpd - -def get_all_interface_addr(prefix, filter_dev, filter_family): - list_addr = [] - for interface in netifaces.interfaces(): - if filter_dev and interface in filter_dev: - continue - addrs = netifaces.ifaddresses(interface) - if netifaces.AF_INET in addrs.keys(): - if netifaces.AF_INET in filter_family: - for addr in addrs[netifaces.AF_INET]: - if prefix: - # we need to manually assemble a list of IPv4 address/prefix - prefix = '/' + \ - str(IPv4Network('0.0.0.0/' + addr['netmask']).prefixlen) - list_addr.append(addr['addr'] + prefix) - else: - list_addr.append(addr['addr']) - if netifaces.AF_INET6 in addrs.keys(): - if netifaces.AF_INET6 in filter_family: - for addr in addrs[netifaces.AF_INET6]: - if prefix: - # we need to manually assemble a list of IPv4 address/prefix - bits = bin(int(addr['netmask'].replace(':', '').split('/')[0], 16)).count('1') - prefix = '/' + str(bits) - list_addr.append(addr['addr'] + prefix) - else: - list_addr.append(addr['addr']) - - return list_addr - -def verify(upnpd): - if not upnpd: - return None - - if 'wan_interface' not in upnpd: - raise ConfigError('To enable UPNP, you must have the "wan-interface" option!') - - if 'rule' in upnpd: - for rule, rule_config in upnpd['rule'].items(): - for option in ['external_port_range', 'internal_port_range', 'ip', 'action']: - if option not in rule_config: - tmp = option.replace('_', '-') - raise ConfigError(f'Every UPNP rule requires "{tmp}" to be set!') - - if 'stun' in upnpd: - for option in ['host', 'port']: - if option not in upnpd['stun']: - raise ConfigError(f'A UPNP stun support must have an "{option}" option!') - - # Check the validity of the IP address - listen_dev = [] - system_addrs_cidr = get_all_interface_addr(True, [], [netifaces.AF_INET, netifaces.AF_INET6]) - system_addrs = get_all_interface_addr(False, [], [netifaces.AF_INET, netifaces.AF_INET6]) - if 'listen' not in upnpd: - raise ConfigError(f'Listen address or interface is required!') - for listen_if_or_addr in upnpd['listen']: - if listen_if_or_addr not in netifaces.interfaces(): - listen_dev.append(listen_if_or_addr) - if (listen_if_or_addr not in system_addrs) and (listen_if_or_addr not in system_addrs_cidr) and \ - (listen_if_or_addr not in netifaces.interfaces()): - if is_ipv4(listen_if_or_addr) and IPv4Network(listen_if_or_addr).is_multicast: - raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed' - f'to listen on. It is not an interface address nor a multicast address!') - if is_ipv6(listen_if_or_addr) and IPv6Network(listen_if_or_addr).is_multicast: - raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed' - f'to listen on. It is not an interface address nor a multicast address!') - - system_listening_dev_addrs_cidr = get_all_interface_addr(True, listen_dev, [netifaces.AF_INET6]) - system_listening_dev_addrs = get_all_interface_addr(False, listen_dev, [netifaces.AF_INET6]) - for listen_if_or_addr in upnpd['listen']: - if listen_if_or_addr not in netifaces.interfaces() and \ - (listen_if_or_addr not in system_listening_dev_addrs_cidr) and \ - (listen_if_or_addr not in system_listening_dev_addrs) and \ - is_ipv6(listen_if_or_addr) and \ - (not IPv6Network(listen_if_or_addr).is_multicast): - raise ConfigError(f'{listen_if_or_addr} must listen on the interface of the network card') - -def generate(upnpd): - if not upnpd: - return None - - if os.path.isfile(config_file): - os.unlink(config_file) - - render(config_file, 'firewall/upnpd.conf.j2', upnpd) - -def apply(upnpd): - systemd_service_name = 'miniupnpd.service' - if not upnpd: - # Stop the UPNP service - call(f'systemctl stop {systemd_service_name}') - else: - # Start the UPNP service - call(f'systemctl restart {systemd_service_name}') - -if __name__ == '__main__': - try: - c = get_config() - verify(c) - generate(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) |