diff options
Diffstat (limited to 'src')
-rwxr-xr-x | src/conf_mode/container.py | 395 | ||||
-rwxr-xr-x | src/conf_mode/firewall_options.py | 14 | ||||
-rwxr-xr-x | src/conf_mode/policy-local-route.py | 194 | ||||
-rwxr-xr-x | src/conf_mode/service_webproxy.py | 104 | ||||
-rwxr-xr-x | src/conf_mode/system-option.py | 17 | ||||
-rwxr-xr-x | src/migration-scripts/container/0-to-1 | 77 | ||||
-rwxr-xr-x | src/op_mode/container.py | 84 | ||||
-rwxr-xr-x | src/op_mode/show_ipsec_connections.py | 284 | ||||
-rwxr-xr-x | src/op_mode/webproxy_update_blacklist.sh | 29 |
9 files changed, 1131 insertions, 67 deletions
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py new file mode 100755 index 000000000..7567444db --- /dev/null +++ b/src/conf_mode/container.py @@ -0,0 +1,395 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021-2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from ipaddress import ip_address +from ipaddress import ip_network +from time import sleep +from json import dumps as json_write + +from vyos.base import Warning +from vyos.config import Config +from vyos.configdict import dict_merge +from vyos.configdict import node_changed +from vyos.util import call +from vyos.util import cmd +from vyos.util import run +from vyos.util import write_file +from vyos.template import inc_ip +from vyos.template import is_ipv4 +from vyos.template import is_ipv6 +from vyos.template import render +from vyos.xml import defaults +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +config_containers_registry = '/etc/containers/registries.conf' +config_containers_storage = '/etc/containers/storage.conf' +systemd_unit_path = '/run/systemd/system' + +def _cmd(command): + if os.path.exists('/tmp/vyos.container.debug'): + print(command) + return cmd(command) + +def network_exists(name): + # Check explicit name for network, returns True if network exists + c = _cmd(f'podman network ls --quiet --filter name=^{name}$') + return bool(c) + +# Common functions +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + base = ['container'] + container = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = defaults(base) + # container base default values can not be merged here - remove and add them later + if 'name' in default_values: + del default_values['name'] + container = dict_merge(default_values, container) + + # Merge per-container default values + if 'name' in container: + default_values = defaults(base + ['name']) + if 'port' in default_values: + del default_values['port'] + for name in container['name']: + container['name'][name] = dict_merge(default_values, container['name'][name]) + + # XXX: T2665: we can not safely rely on the defaults() when there are + # tagNodes in place, it is better to blend in the defaults manually. + if 'port' in container['name'][name]: + for port in container['name'][name]['port']: + default_values = defaults(base + ['name', 'port']) + container['name'][name]['port'][port] = dict_merge( + default_values, container['name'][name]['port'][port]) + + # Delete container network, delete containers + tmp = node_changed(conf, base + ['network']) + if tmp: container.update({'network_remove' : tmp}) + + tmp = node_changed(conf, base + ['name']) + if tmp: container.update({'container_remove' : tmp}) + + return container + +def verify(container): + # bail out early - looks like removal from running config + if not container: + return None + + # Add new container + if 'name' in container: + for name, container_config in container['name'].items(): + # Container image is a mandatory option + if 'image' not in container_config: + raise ConfigError(f'Container image for "{name}" is mandatory!') + + # Check if requested container image exists locally. If it does not + # exist locally - inform the user. This is required as there is a + # shared container image storage accross all VyOS images. A user can + # delete a container image from the system, boot into another version + # of VyOS and then it would fail to boot. This is to prevent any + # configuration error when container images are deleted from the + # global storage. A per image local storage would be a super waste + # of diskspace as there will be a full copy (up tu several GB/image) + # on upgrade. This is the "cheapest" and fastest solution in terms + # of image upgrade and deletion. + image = container_config['image'] + if run(f'podman image exists {image}') != 0: + Warning(f'Image "{image}" used in container "{name}" does not exist '\ + f'locally. Please use "add container image {image}" to add it '\ + f'to the system! Container "{name}" will not be started!') + + if 'network' in container_config: + if len(container_config['network']) > 1: + raise ConfigError(f'Only one network can be specified for container "{name}"!') + + # Check if the specified container network exists + network_name = list(container_config['network'])[0] + if network_name not in container.get('network', {}): + raise ConfigError(f'Container network "{network_name}" does not exist!') + + if 'address' in container_config['network'][network_name]: + address = container_config['network'][network_name]['address'] + network = None + if is_ipv4(address): + network = [x for x in container['network'][network_name]['prefix'] if is_ipv4(x)][0] + elif is_ipv6(address): + network = [x for x in container['network'][network_name]['prefix'] if is_ipv6(x)][0] + + # Specified container IP address must belong to network prefix + if ip_address(address) not in ip_network(network): + raise ConfigError(f'Used container address "{address}" not in network "{network}"!') + + # We can not use the first IP address of a network prefix as this is used by podman + if ip_address(address) == ip_network(network)[1]: + raise ConfigError(f'IP address "{address}" can not be used for a container, '\ + 'reserved for the container engine!') + + if 'device' in container_config: + for dev, dev_config in container_config['device'].items(): + if 'source' not in dev_config: + raise ConfigError(f'Device "{dev}" has no source path configured!') + + if 'destination' not in dev_config: + raise ConfigError(f'Device "{dev}" has no destination path configured!') + + source = dev_config['source'] + if not os.path.exists(source): + raise ConfigError(f'Device "{dev}" source path "{source}" does not exist!') + + if 'environment' in container_config: + for var, cfg in container_config['environment'].items(): + if 'value' not in cfg: + raise ConfigError(f'Environment variable {var} has no value assigned!') + + if 'volume' in container_config: + for volume, volume_config in container_config['volume'].items(): + if 'source' not in volume_config: + raise ConfigError(f'Volume "{volume}" has no source path configured!') + + if 'destination' not in volume_config: + raise ConfigError(f'Volume "{volume}" has no destination path configured!') + + source = volume_config['source'] + if not os.path.exists(source): + raise ConfigError(f'Volume "{volume}" source path "{source}" does not exist!') + + if 'port' in container_config: + for tmp in container_config['port']: + if not {'source', 'destination'} <= set(container_config['port'][tmp]): + raise ConfigError(f'Both "source" and "destination" must be specified for a port mapping!') + + # If 'allow-host-networks' or 'network' not set. + if 'allow_host_networks' not in container_config and 'network' not in container_config: + raise ConfigError(f'Must either set "network" or "allow-host-networks" for container "{name}"!') + + # Can not set both allow-host-networks and network at the same time + if {'allow_host_networks', 'network'} <= set(container_config): + raise ConfigError(f'"allow-host-networks" and "network" for "{name}" cannot be both configured at the same time!') + + # Add new network + if 'network' in container: + for network, network_config in container['network'].items(): + v4_prefix = 0 + v6_prefix = 0 + # If ipv4-prefix not defined for user-defined network + if 'prefix' not in network_config: + raise ConfigError(f'prefix for network "{network}" must be defined!') + + for prefix in network_config['prefix']: + if is_ipv4(prefix): v4_prefix += 1 + elif is_ipv6(prefix): v6_prefix += 1 + + if v4_prefix > 1: + raise ConfigError(f'Only one IPv4 prefix can be defined for network "{network}"!') + if v6_prefix > 1: + raise ConfigError(f'Only one IPv6 prefix can be defined for network "{network}"!') + + + # A network attached to a container can not be deleted + if {'network_remove', 'name'} <= set(container): + for network in container['network_remove']: + for container, container_config in container['name'].items(): + if 'network' in container_config and network in container_config['network']: + raise ConfigError(f'Can not remove network "{network}", used by container "{container}"!') + + return None + +def generate_run_arguments(name, container_config): + image = container_config['image'] + memory = container_config['memory'] + shared_memory = container_config['shared_memory'] + restart = container_config['restart'] + + # Add capability options. Should be in uppercase + cap_add = '' + if 'cap_add' in container_config: + for c in container_config['cap_add']: + c = c.upper() + c = c.replace('-', '_') + cap_add += f' --cap-add={c}' + + # Add a host device to the container /dev/x:/dev/x + device = '' + if 'device' in container_config: + for dev, dev_config in container_config['device'].items(): + source_dev = dev_config['source'] + dest_dev = dev_config['destination'] + device += f' --device={source_dev}:{dest_dev}' + + # Check/set environment options "-e foo=bar" + env_opt = '' + if 'environment' in container_config: + for k, v in container_config['environment'].items(): + env_opt += f" -e \"{k}={v['value']}\"" + + # Publish ports + port = '' + if 'port' in container_config: + protocol = '' + for portmap in container_config['port']: + protocol = container_config['port'][portmap]['protocol'] + sport = container_config['port'][portmap]['source'] + dport = container_config['port'][portmap]['destination'] + port += f' -p {sport}:{dport}/{protocol}' + + # Bind volume + volume = '' + if 'volume' in container_config: + for vol, vol_config in container_config['volume'].items(): + svol = vol_config['source'] + dvol = vol_config['destination'] + volume += f' -v {svol}:{dvol}' + + container_base_cmd = f'--detach --interactive --tty --replace {cap_add} ' \ + f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \ + f'--name {name} {device} {port} {volume} {env_opt}' + + if 'allow_host_networks' in container_config: + return f'{container_base_cmd} --net host {image}' + + ip_param = '' + networks = ",".join(container_config['network']) + for network in container_config['network']: + if 'address' in container_config['network'][network]: + address = container_config['network'][network]['address'] + ip_param = f'--ip {address}' + + return f'{container_base_cmd} --net {networks} {ip_param} {image}' + +def generate(container): + # bail out early - looks like removal from running config + if not container: + if os.path.exists(config_containers_registry): + os.unlink(config_containers_registry) + if os.path.exists(config_containers_storage): + os.unlink(config_containers_storage) + return None + + if 'network' in container: + for network, network_config in container['network'].items(): + tmp = { + 'cniVersion' : '0.4.0', + 'name' : network, + 'plugins' : [{ + 'type': 'bridge', + 'bridge': f'cni-{network}', + 'isGateway': True, + 'ipMasq': False, + 'hairpinMode': False, + 'ipam' : { + 'type': 'host-local', + 'routes': [], + 'ranges' : [], + }, + }] + } + + for prefix in network_config['prefix']: + net = [{'gateway' : inc_ip(prefix, 1), 'subnet' : prefix}] + tmp['plugins'][0]['ipam']['ranges'].append(net) + + # install per address-family default orutes + default_route = '0.0.0.0/0' + if is_ipv6(prefix): + default_route = '::/0' + tmp['plugins'][0]['ipam']['routes'].append({'dst': default_route}) + + write_file(f'/etc/cni/net.d/{network}.conflist', json_write(tmp, indent=2)) + + render(config_containers_registry, 'container/registries.conf.j2', container) + render(config_containers_storage, 'container/storage.conf.j2', container) + + if 'name' in container: + for name, container_config in container['name'].items(): + if 'disable' in container_config: + continue + + file_path = os.path.join(systemd_unit_path, f'vyos-container-{name}.service') + run_args = generate_run_arguments(name, container_config) + render(file_path, 'container/systemd-unit.j2', {'name': name, 'run_args': run_args}) + + return None + +def apply(container): + # Delete old containers if needed. We can't delete running container + # Option "--force" allows to delete containers with any status + if 'container_remove' in container: + for name in container['container_remove']: + file_path = os.path.join(systemd_unit_path, f'vyos-container-{name}.service') + call(f'systemctl stop vyos-container-{name}.service') + if os.path.exists(file_path): + os.unlink(file_path) + + call('systemctl daemon-reload') + + # Delete old networks if needed + if 'network_remove' in container: + for network in container['network_remove']: + call(f'podman network rm {network}') + tmp = f'/etc/cni/net.d/{network}.conflist' + if os.path.exists(tmp): + os.unlink(tmp) + + # Add container + disabled_new = False + if 'name' in container: + for name, container_config in container['name'].items(): + image = container_config['image'] + + if run(f'podman image exists {image}') != 0: + # container image does not exist locally - user already got + # informed by a WARNING in verfiy() - bail out early + continue + + if 'disable' in container_config: + # check if there is a container by that name running + tmp = _cmd('podman ps -a --format "{{.Names}}"') + if name in tmp: + file_path = os.path.join(systemd_unit_path, f'vyos-container-{name}.service') + call(f'systemctl stop vyos-container-{name}.service') + if os.path.exists(file_path): + disabled_new = True + os.unlink(file_path) + continue + + cmd(f'systemctl restart vyos-container-{name}.service') + + if disabled_new: + call('systemctl daemon-reload') + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/firewall_options.py b/src/conf_mode/firewall_options.py index 67bf5d0e2..b7f4aa82c 100755 --- a/src/conf_mode/firewall_options.py +++ b/src/conf_mode/firewall_options.py @@ -115,9 +115,12 @@ def apply(tcp): continue # adjust TCP MSS per interface - if mss: + if mss == 'clamp-mss-to-pmtu': call('iptables --table mangle --append {} --out-interface {} --protocol tcp ' - '--tcp-flags SYN,RST SYN --jump TCPMSS --set-mss {} >&/dev/null'.format(target, intf, mss)) + '--tcp-flags SYN,RST SYN --jump TCPMSS --clamp-mss-to-pmtu >&/dev/null'.format(target, intf)) + elif mss: + call('iptables --table mangle --append {} --out-interface {} --protocol tcp ' + '--tcp-flags SYN,RST SYN --jump TCPMSS --set-mss {} >&/dev/null'.format(target, intf, mss)) # Setup new ip6tables rules if tcp['new_chain6']: @@ -133,9 +136,12 @@ def apply(tcp): continue # adjust TCP MSS per interface - if mss: + if mss == 'clamp-mss-to-pmtu': + call('ip6tables --table mangle --append {} --out-interface {} --protocol tcp ' + '--tcp-flags SYN,RST SYN --jump TCPMSS --clamp-mss-to-pmtu >&/dev/null'.format(target, intf)) + elif mss: call('ip6tables --table mangle --append {} --out-interface {} --protocol tcp ' - '--tcp-flags SYN,RST SYN --jump TCPMSS --set-mss {} >&/dev/null'.format(target, intf, mss)) + '--tcp-flags SYN,RST SYN --jump TCPMSS --set-mss {} >&/dev/null'.format(target, intf, mss)) return None diff --git a/src/conf_mode/policy-local-route.py b/src/conf_mode/policy-local-route.py index 013f22665..8a92bbc76 100755 --- a/src/conf_mode/policy-local-route.py +++ b/src/conf_mode/policy-local-route.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020 VyOS maintainers and contributors +# Copyright (C) 2020-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -18,6 +18,7 @@ import os from sys import exit +from netifaces import interfaces from vyos.config import Config from vyos.configdict import dict_merge from vyos.configdict import node_changed @@ -35,26 +36,103 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['policy', 'local-route'] + base = ['policy'] + pbr = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - # delete policy local-route - dict = {} - tmp = node_changed(conf, ['policy', 'local-route', 'rule'], key_mangling=('-', '_')) - if tmp: - for rule in (tmp or []): - src = leaf_node_changed(conf, ['policy', 'local-route', 'rule', rule, 'source']) - if src: - dict = dict_merge({'rule_remove' : {rule : {'source' : src}}}, dict) + for route in ['local_route', 'local_route6']: + dict_id = 'rule_remove' if route == 'local_route' else 'rule6_remove' + route_key = 'local-route' if route == 'local_route' else 'local-route6' + base_rule = base + [route_key, 'rule'] + + # delete policy local-route + dict = {} + tmp = node_changed(conf, base_rule, key_mangling=('-', '_')) + if tmp: + for rule in (tmp or []): + src = leaf_node_changed(conf, base_rule + [rule, 'source']) + fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark']) + iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface']) + dst = leaf_node_changed(conf, base_rule + [rule, 'destination']) + table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) + rule_def = {} + if src: + rule_def = dict_merge({'source' : src}, rule_def) + if fwmk: + rule_def = dict_merge({'fwmark' : fwmk}, rule_def) + if iif: + rule_def = dict_merge({'inbound_interface' : iif}, rule_def) + if dst: + rule_def = dict_merge({'destination' : dst}, rule_def) + if table: + rule_def = dict_merge({'table' : table}, rule_def) + dict = dict_merge({dict_id : {rule : rule_def}}, dict) pbr.update(dict) - # delete policy local-route rule x source x.x.x.x - if 'rule' in pbr: - for rule in pbr['rule']: - src = leaf_node_changed(conf, ['policy', 'local-route', 'rule', rule, 'source']) - if src: - dict = dict_merge({'rule_remove' : {rule : {'source' : src}}}, dict) - pbr.update(dict) + if not route in pbr: + continue + + # delete policy local-route rule x source x.x.x.x + # delete policy local-route rule x fwmark x + # delete policy local-route rule x destination x.x.x.x + if 'rule' in pbr[route]: + for rule, rule_config in pbr[route]['rule'].items(): + src = leaf_node_changed(conf, base_rule + [rule, 'source']) + fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark']) + iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface']) + dst = leaf_node_changed(conf, base_rule + [rule, 'destination']) + table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) + # keep track of changes in configuration + # otherwise we might remove an existing node although nothing else has changed + changed = False + + rule_def = {} + # src is None if there are no changes to src + if src is None: + # if src hasn't changed, include it in the removal selector + # if a new selector is added, we have to remove all previous rules without this selector + # to make sure we remove all previous rules with this source(s), it will be included + if 'source' in rule_config: + rule_def = dict_merge({'source': rule_config['source']}, rule_def) + else: + # if src is not None, it's previous content will be returned + # this can be an empty array if it's just being set, or the previous value + # either way, something has to be changed and we only want to remove previous values + changed = True + # set the old value for removal if it's not empty + if len(src) > 0: + rule_def = dict_merge({'source' : src}, rule_def) + if fwmk is None: + if 'fwmark' in rule_config: + rule_def = dict_merge({'fwmark': [rule_config['fwmark']]}, rule_def) + else: + changed = True + if len(fwmk) > 0: + rule_def = dict_merge({'fwmark' : fwmk}, rule_def) + if iif is None: + if 'inbound_interface' in rule_config: + rule_def = dict_merge({'inbound_interface': [rule_config['inbound_interface']]}, rule_def) + else: + changed = True + if len(iif) > 0: + rule_def = dict_merge({'inbound_interface' : iif}, rule_def) + if dst is None: + if 'destination' in rule_config: + rule_def = dict_merge({'destination': rule_config['destination']}, rule_def) + else: + changed = True + if len(dst) > 0: + rule_def = dict_merge({'destination' : dst}, rule_def) + if table is None: + if 'set' in rule_config and 'table' in rule_config['set']: + rule_def = dict_merge({'table': [rule_config['set']['table']]}, rule_def) + else: + changed = True + if len(table) > 0: + rule_def = dict_merge({'table' : table}, rule_def) + if changed: + dict = dict_merge({dict_id : {rule : rule_def}}, dict) + pbr.update(dict) return pbr @@ -63,13 +141,25 @@ def verify(pbr): if not pbr: return None - if 'rule' in pbr: - for rule in pbr['rule']: - if 'source' not in pbr['rule'][rule]: - raise ConfigError('Source address required!') - else: - if 'set' not in pbr['rule'][rule] or 'table' not in pbr['rule'][rule]['set']: - raise ConfigError('Table set is required!') + for route in ['local_route', 'local_route6']: + if not route in pbr: + continue + + pbr_route = pbr[route] + if 'rule' in pbr_route: + for rule in pbr_route['rule']: + if 'source' not in pbr_route['rule'][rule] \ + and 'destination' not in pbr_route['rule'][rule] \ + and 'fwmark' not in pbr_route['rule'][rule] \ + and 'inbound_interface' not in pbr_route['rule'][rule]: + raise ConfigError('Source or destination address or fwmark or inbound-interface is required!') + else: + if 'set' not in pbr_route['rule'][rule] or 'table' not in pbr_route['rule'][rule]['set']: + raise ConfigError('Table set is required!') + if 'inbound_interface' in pbr_route['rule'][rule]: + interface = pbr_route['rule'][rule]['inbound_interface'] + if interface not in interfaces(): + raise ConfigError(f'Interface "{interface}" does not exist') return None @@ -84,18 +174,54 @@ def apply(pbr): return None # Delete old rule if needed - if 'rule_remove' in pbr: - for rule in pbr['rule_remove']: - for src in pbr['rule_remove'][rule]['source']: - call(f'ip rule del prio {rule} from {src}') + for rule_rm in ['rule_remove', 'rule6_remove']: + if rule_rm in pbr: + v6 = " -6" if rule_rm == 'rule6_remove' else "" + for rule, rule_config in pbr[rule_rm].items(): + rule_config['source'] = rule_config['source'] if 'source' in rule_config else [''] + for src in rule_config['source']: + f_src = '' if src == '' else f' from {src} ' + rule_config['destination'] = rule_config['destination'] if 'destination' in rule_config else [''] + for dst in rule_config['destination']: + f_dst = '' if dst == '' else f' to {dst} ' + rule_config['fwmark'] = rule_config['fwmark'] if 'fwmark' in rule_config else [''] + for fwmk in rule_config['fwmark']: + f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} ' + rule_config['inbound_interface'] = rule_config['inbound_interface'] if 'inbound_interface' in rule_config else [''] + for iif in rule_config['inbound_interface']: + f_iif = '' if iif == '' else f' iif {iif} ' + rule_config['table'] = rule_config['table'] if 'table' in rule_config else [''] + for table in rule_config['table']: + f_table = '' if table == '' else f' lookup {table} ' + call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_fwmk}{f_iif}{f_table}') # Generate new config - if 'rule' in pbr: - for rule in pbr['rule']: - table = pbr['rule'][rule]['set']['table'] - if pbr['rule'][rule]['source']: - for src in pbr['rule'][rule]['source']: - call(f'ip rule add prio {rule} from {src} lookup {table}') + for route in ['local_route', 'local_route6']: + if not route in pbr: + continue + + v6 = " -6" if route == 'local_route6' else "" + + pbr_route = pbr[route] + if 'rule' in pbr_route: + for rule, rule_config in pbr_route['rule'].items(): + table = rule_config['set']['table'] + + rule_config['source'] = rule_config['source'] if 'source' in rule_config else ['all'] + for src in rule_config['source'] or ['all']: + f_src = '' if src == '' else f' from {src} ' + rule_config['destination'] = rule_config['destination'] if 'destination' in rule_config else ['all'] + for dst in rule_config['destination']: + f_dst = '' if dst == '' else f' to {dst} ' + f_fwmk = '' + if 'fwmark' in rule_config: + fwmk = rule_config['fwmark'] + f_fwmk = f' fwmark {fwmk} ' + f_iif = '' + if 'inbound_interface' in rule_config: + iif = rule_config['inbound_interface'] + f_iif = f' iif {iif} ' + call(f'ip{v6} rule add prio {rule} {f_src}{f_dst}{f_fwmk}{f_iif} lookup {table}') return None diff --git a/src/conf_mode/service_webproxy.py b/src/conf_mode/service_webproxy.py index cbbd2e0bc..59c087aaa 100755 --- a/src/conf_mode/service_webproxy.py +++ b/src/conf_mode/service_webproxy.py @@ -23,12 +23,15 @@ from vyos.config import Config from vyos.configdict import dict_merge from vyos.template import render from vyos.util import call +from vyos.util import chmod_755 from vyos.util import dict_search from vyos.util import write_file from vyos.validate import is_addr_assigned from vyos.xml import defaults +from vyos.base import Warning from vyos import ConfigError from vyos import airbag + airbag.enable() squid_config_file = '/etc/squid/squid.conf' @@ -36,24 +39,56 @@ squidguard_config_file = '/etc/squidguard/squidGuard.conf' squidguard_db_dir = '/opt/vyatta/etc/config/url-filtering/squidguard/db' user_group = 'proxy' -def generate_sg_localdb(category, list_type, role, proxy): + +def check_blacklist_categorydb(config_section): + if 'block_category' in config_section: + for category in config_section['block_category']: + check_categorydb(category) + if 'allow_category' in config_section: + for category in config_section['allow_category']: + check_categorydb(category) + + +def check_categorydb(category: str): + """ + Check if category's db exist + :param category: + :type str: + """ + path_to_cat: str = f'{squidguard_db_dir}/{category}' + if not os.path.exists(f'{path_to_cat}/domains.db') \ + and not os.path.exists(f'{path_to_cat}/urls.db') \ + and not os.path.exists(f'{path_to_cat}/expressions.db'): + Warning(f'DB of category {category} does not exist.\n ' + f'Use [update webproxy blacklists] ' + f'or delete undefined category!') + + +def generate_sg_rule_localdb(category, list_type, role, proxy): + if not category or not list_type or not role: + return None cat_ = category.replace('-', '_') - if isinstance(dict_search(f'url_filtering.squidguard.{cat_}', proxy), - list): + if role == 'default': + path_to_cat = f'{cat_}' + else: + path_to_cat = f'rule.{role}.{cat_}' + if isinstance( + dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy), + list): # local block databases must be generated "on-the-fly" tmp = { - 'squidguard_db_dir' : squidguard_db_dir, - 'category' : f'{category}-default', - 'list_type' : list_type, - 'rule' : role + 'squidguard_db_dir': squidguard_db_dir, + 'category': f'{category}-{role}', + 'list_type': list_type, + 'rule': role } sg_tmp_file = '/tmp/sg.conf' - db_file = f'{category}-default/{list_type}' - domains = '\n'.join(dict_search(f'url_filtering.squidguard.{cat_}', proxy)) - + db_file = f'{category}-{role}/{list_type}' + domains = '\n'.join( + dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy)) # local file - write_file(f'{squidguard_db_dir}/{category}-default/local', '', + write_file(f'{squidguard_db_dir}/{category}-{role}/local', '', user=user_group, group=user_group) # database input file write_file(f'{squidguard_db_dir}/{db_file}', domains, @@ -63,17 +98,18 @@ def generate_sg_localdb(category, list_type, role, proxy): render(sg_tmp_file, 'squid/sg_acl.conf.tmpl', tmp, user=user_group, group=user_group) - call(f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"') + call( + f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"') if os.path.exists(sg_tmp_file): os.unlink(sg_tmp_file) - else: # if category is not part of our configuration, clean out the # squidguard lists - tmp = f'{squidguard_db_dir}/{category}-default' + tmp = f'{squidguard_db_dir}/{category}-{role}' if os.path.exists(tmp): - rmtree(f'{squidguard_db_dir}/{category}-default') + rmtree(f'{squidguard_db_dir}/{category}-{role}') + def get_config(config=None): if config: @@ -84,7 +120,8 @@ def get_config(config=None): if not conf.exists(base): return None - proxy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + proxy = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True) # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. default_values = defaults(base) @@ -109,10 +146,11 @@ def get_config(config=None): default_values = defaults(base + ['cache-peer']) for peer in proxy['cache_peer']: proxy['cache_peer'][peer] = dict_merge(default_values, - proxy['cache_peer'][peer]) + proxy['cache_peer'][peer]) return proxy + def verify(proxy): if not proxy: return None @@ -169,17 +207,30 @@ def generate(proxy): render(squidguard_config_file, 'squid/squidGuard.conf.tmpl', proxy) cat_dict = { - 'local-block' : 'domains', - 'local-block-keyword' : 'expressions', - 'local-block-url' : 'urls', - 'local-ok' : 'domains', - 'local-ok-url' : 'urls' + 'local-block': 'domains', + 'local-block-keyword': 'expressions', + 'local-block-url': 'urls', + 'local-ok': 'domains', + 'local-ok-url': 'urls' } - for category, list_type in cat_dict.items(): - generate_sg_localdb(category, list_type, 'default', proxy) + if dict_search(f'url_filtering.squidguard', proxy) is not None: + squidgard_config_section = proxy['url_filtering']['squidguard'] + + for category, list_type in cat_dict.items(): + generate_sg_rule_localdb(category, list_type, 'default', proxy) + check_blacklist_categorydb(squidgard_config_section) + + if 'rule' in squidgard_config_section: + for rule in squidgard_config_section['rule']: + rule_config_section = squidgard_config_section['rule'][ + rule] + for category, list_type in cat_dict.items(): + generate_sg_rule_localdb(category, list_type, rule, proxy) + check_blacklist_categorydb(rule_config_section) return None + def apply(proxy): if not proxy: # proxy is removed in the commit @@ -192,9 +243,12 @@ def apply(proxy): return None - call('systemctl restart squid.service') + if os.path.exists(squidguard_db_dir): + chmod_755(squidguard_db_dir) + call('systemctl reload-or-restart squid.service') return None + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system-option.py index ddb91aeaf..a112c2b6f 100755 --- a/src/conf_mode/system-option.py +++ b/src/conf_mode/system-option.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2020 VyOS maintainers and contributors +# Copyright (C) 2019-2022 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -22,16 +22,18 @@ from time import sleep from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configverify import verify_source_interface from vyos.template import render from vyos.util import cmd from vyos.validate import is_addr_assigned +from vyos.validate import is_intf_addr_assigned from vyos.xml import defaults from vyos import ConfigError from vyos import airbag airbag.enable() curlrc_config = r'/etc/curlrc' -ssh_config = r'/etc/ssh/ssh_config' +ssh_config = r'/etc/ssh/ssh_config.d/91-vyos-ssh-client-options.conf' systemd_action_file = '/lib/systemd/system/ctrl-alt-del.target' def get_config(config=None): @@ -67,8 +69,17 @@ def verify(options): if 'ssh_client' in options: config = options['ssh_client'] if 'source_address' in config: + address = config['source_address'] if not is_addr_assigned(config['source_address']): - raise ConfigError('No interface with give address specified!') + raise ConfigError('No interface with address "{address}" configured!') + + if 'source_interface' in config: + verify_source_interface(config) + if 'source_address' in config: + address = config['source_address'] + interface = config['source_interface'] + if not is_intf_addr_assigned(interface, address): + raise ConfigError(f'Address "{address}" not assigned on interface "{interface}"!') return None diff --git a/src/migration-scripts/container/0-to-1 b/src/migration-scripts/container/0-to-1 new file mode 100755 index 000000000..d0461389b --- /dev/null +++ b/src/migration-scripts/container/0-to-1 @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T4870: change underlaying container filesystem from vfs to overlay + +import os +import shutil +import sys + +from vyos.configtree import ConfigTree +from vyos.util import call + +if (len(sys.argv) < 1): + print("Must specify file name!") + sys.exit(1) + +file_name = sys.argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['container', 'name'] +config = ConfigTree(config_file) + +# Check if containers exist and we need to perform image manipulation +if config.exists(base): + for container in config.list_nodes(base): + # Stop any given container first + call(f'systemctl stop vyos-container-{container}.service') + # Export container image for later re-import to new filesystem. We store + # the backup on a real disk as a tmpfs (like /tmp) could probably lack + # memory if a host has too many containers stored. + image_name = config.return_value(base + [container, 'image']) + call(f'podman image save --quiet --output /root/{container}.tar --format oci-archive {image_name}') + +# No need to adjust the strage driver online (this is only used for testing and +# debugging on a live system) - it is already overlay2 when the migration script +# is run during system update. But the specified driver in the image is actually +# overwritten by the still present VFS filesystem on disk. Thus podman still +# thinks it uses VFS until we delete the libpod directory under: +# /usr/lib/live/mount/persistence/container/storage +#call('sed -i "s/vfs/overlay2/g" /etc/containers/storage.conf /usr/share/vyos/templates/container/storage.conf.j2') + +base_path = '/usr/lib/live/mount/persistence/container/storage' +for dir in ['libpod', 'vfs', 'vfs-containers', 'vfs-images', 'vfs-layers']: + if os.path.exists(f'{base_path}/{dir}'): + shutil.rmtree(f'{base_path}/{dir}') + +# Now all remaining information about VFS is gone and we operate in overlayfs2 +# filesystem mode. Time to re-import the images. +if config.exists(base): + for container in config.list_nodes(base): + # Export container image for later re-import to new filesystem + image_name = config.return_value(base + [container, 'image']) + image_path = f'/root/{container}.tar' + call(f'podman image load --quiet --input {image_path}') + + # Start any given container first + call(f'systemctl start vyos-container-{container}.service') + + # Delete temporary container image + if os.path.exists(image_path): + os.unlink(image_path) + diff --git a/src/op_mode/container.py b/src/op_mode/container.py new file mode 100755 index 000000000..ecefc556e --- /dev/null +++ b/src/op_mode/container.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import json +import sys + +from sys import exit + +from vyos.util import cmd + +import vyos.opmode + +def _get_json_data(command: str) -> list: + """ + Get container command format JSON + """ + return cmd(f'{command} --format json') + + +def _get_raw_data(command: str) -> list: + json_data = _get_json_data(command) + data = json.loads(json_data) + return data + + +def show_container(raw: bool): + command = 'podman ps --all' + container_data = _get_raw_data(command) + if raw: + return container_data + else: + return cmd(command) + + +def show_image(raw: bool): + command = 'podman image ls' + container_data = _get_raw_data('podman image ls') + if raw: + return container_data + else: + return cmd(command) + + +def show_network(raw: bool): + command = 'podman network ls' + container_data = _get_raw_data(command) + if raw: + return container_data + else: + return cmd(command) + + +def restart(name: str): + from vyos.util import rc_cmd + + rc, output = rc_cmd(f'systemctl restart vyos-container-{name}.service') + if rc != 0: + print(output) + return None + print(f'Container name "{name}" restarted!') + return output + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/show_ipsec_connections.py b/src/op_mode/show_ipsec_connections.py new file mode 100755 index 000000000..4ca8f8e51 --- /dev/null +++ b/src/op_mode/show_ipsec_connections.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import sys + +from vyos.util import convert_data + + +def _get_vici_sas(): + from vici import Session as vici_session + + try: + session = vici_session() + except PermissionError: + print("You do not have a permission to connect to the IPsec daemon") + sys.exit(1) + except ConnectionRefusedError: + print("IPsec is not runing") + sys.exit(1) + except Exception as e: + print("An error occured: {0}".format(e)) + sys.exit(1) + sas = list(session.list_sas()) + return convert_data(sas) + + +def _get_vici_connections(): + from vici import Session as vici_session + + try: + session = vici_session() + except PermissionError: + print("You do not have a permission to connect to the IPsec daemon") + sys.exit(1) + except ConnectionRefusedError: + print("IPsec is not runing") + sys.exit(1) + except Exception as e: + print("An error occured: {0}".format(e)) + sys.exit(1) + connections = list(session.list_conns()) + return convert_data(connections) + + +def _get_parent_sa_proposal(connection_name: str, data: list) -> dict: + """Get parent SA proposals by connection name + if connections not in the 'down' state + Args: + connection_name (str): Connection name + data (list): List of current SAs from vici + Returns: + str: Parent SA connection proposal + AES_CBC/256/HMAC_SHA2_256_128/MODP_1024 + """ + if not data: + return {} + for sa in data: + # check if parent SA exist + if connection_name not in sa.keys(): + return {} + if 'encr-alg' in sa[connection_name]: + encr_alg = sa.get(connection_name, '').get('encr-alg') + cipher = encr_alg.split('_')[0] + mode = encr_alg.split('_')[1] + encr_keysize = sa.get(connection_name, '').get('encr-keysize') + integ_alg = sa.get(connection_name, '').get('integ-alg') + # prf_alg = sa.get(connection_name, '').get('prf-alg') + dh_group = sa.get(connection_name, '').get('dh-group') + proposal = { + 'cipher': cipher, + 'mode': mode, + 'key_size': encr_keysize, + 'hash': integ_alg, + 'dh': dh_group + } + return proposal + return {} + + +def _get_parent_sa_state(connection_name: str, data: list) -> str: + """Get parent SA state by connection name + Args: + connection_name (str): Connection name + data (list): List of current SAs from vici + Returns: + Parent SA connection state + """ + if not data: + return 'down' + for sa in data: + # check if parent SA exist + if connection_name not in sa.keys(): + return 'down' + if sa[connection_name]['state'].lower() == 'established': + return 'up' + else: + return 'down' + + +def _get_child_sa_state(connection_name: str, tunnel_name: str, + data: list) -> str: + """Get child SA state by connection and tunnel name + Args: + connection_name (str): Connection name + tunnel_name (str): Tunnel name + data (list): List of current SAs from vici + Returns: + str: `up` if child SA state is 'installed' otherwise `down` + """ + if not data: + return 'down' + for sa in data: + # check if parent SA exist + if connection_name not in sa.keys(): + return 'down' + child_sas = sa[connection_name]['child-sas'] + # Get all child SA states + # there can be multiple SAs per tunnel + child_sa_states = [ + v['state'] for k, v in child_sas.items() if v['name'] == tunnel_name + ] + return 'up' if 'INSTALLED' in child_sa_states else 'down' + + +def _get_child_sa_info(connection_name: str, tunnel_name: str, + data: list) -> dict: + """Get child SA installed info by connection and tunnel name + Args: + connection_name (str): Connection name + tunnel_name (str): Tunnel name + data (list): List of current SAs from vici + Returns: + dict: Info of the child SA in the dictionary format + """ + for sa in data: + # check if parent SA exist + if connection_name not in sa.keys(): + return {} + child_sas = sa[connection_name]['child-sas'] + # Get all child SA data + # Skip temp SA name (first key), get only SA values as dict + # {'OFFICE-B-tunnel-0-46': {'name': 'OFFICE-B-tunnel-0'}...} + # i.e get all data after 'OFFICE-B-tunnel-0-46' + child_sa_info = [ + v for k, v in child_sas.items() if 'name' in v and + v['name'] == tunnel_name and v['state'] == 'INSTALLED' + ] + return child_sa_info[-1] if child_sa_info else {} + + +def _get_child_sa_proposal(child_sa_data: dict) -> dict: + if child_sa_data and 'encr-alg' in child_sa_data: + encr_alg = child_sa_data.get('encr-alg') + cipher = encr_alg.split('_')[0] + mode = encr_alg.split('_')[1] + key_size = child_sa_data.get('encr-keysize') + integ_alg = child_sa_data.get('integ-alg') + dh_group = child_sa_data.get('dh-group') + proposal = { + 'cipher': cipher, + 'mode': mode, + 'key_size': key_size, + 'hash': integ_alg, + 'dh': dh_group + } + return proposal + return {} + + +def _get_raw_data_connections(list_connections: list, list_sas: list) -> list: + """Get configured VPN IKE connections and IPsec states + Args: + list_connections (list): List of configured connections from vici + list_sas (list): List of current SAs from vici + Returns: + list: List and status of IKE/IPsec connections/tunnels + """ + base_dict = [] + for connections in list_connections: + base_list = {} + for connection, conn_conf in connections.items(): + base_list['ike_connection_name'] = connection + base_list['ike_connection_state'] = _get_parent_sa_state( + connection, list_sas) + base_list['ike_remote_address'] = conn_conf['remote_addrs'] + base_list['ike_proposal'] = _get_parent_sa_proposal( + connection, list_sas) + base_list['local_id'] = conn_conf.get('local-1', '').get('id') + base_list['remote_id'] = conn_conf.get('remote-1', '').get('id') + base_list['version'] = conn_conf.get('version', 'IKE') + base_list['children'] = [] + children = conn_conf['children'] + for tunnel, tun_options in children.items(): + state = _get_child_sa_state(connection, tunnel, list_sas) + local_ts = tun_options.get('local-ts') + remote_ts = tun_options.get('remote-ts') + dpd_action = tun_options.get('dpd_action') + close_action = tun_options.get('close_action') + sa_info = _get_child_sa_info(connection, tunnel, list_sas) + esp_proposal = _get_child_sa_proposal(sa_info) + base_list['children'].append({ + 'name': tunnel, + 'state': state, + 'local_ts': local_ts, + 'remote_ts': remote_ts, + 'dpd_action': dpd_action, + 'close_action': close_action, + 'esp_proposal': esp_proposal + }) + base_dict.append(base_list) + return base_dict + + +def _get_formatted_output_conections(data): + from tabulate import tabulate + data_entries = '' + connections = [] + for entry in data: + tunnels = [] + ike_name = entry['ike_connection_name'] + ike_state = entry['ike_connection_state'] + conn_type = entry.get('version', 'IKE') + remote_addrs = ','.join(entry['ike_remote_address']) + local_ts, remote_ts = '-', '-' + local_id = entry['local_id'] + remote_id = entry['remote_id'] + proposal = '-' + if entry.get('ike_proposal'): + proposal = (f'{entry["ike_proposal"]["cipher"]}_' + f'{entry["ike_proposal"]["mode"]}/' + f'{entry["ike_proposal"]["key_size"]}/' + f'{entry["ike_proposal"]["hash"]}/' + f'{entry["ike_proposal"]["dh"]}') + connections.append([ + ike_name, ike_state, conn_type, remote_addrs, local_ts, remote_ts, + local_id, remote_id, proposal + ]) + for tun in entry['children']: + tun_name = tun.get('name') + tun_state = tun.get('state') + conn_type = 'IPsec' + local_ts = '\n'.join(tun.get('local_ts')) + remote_ts = '\n'.join(tun.get('remote_ts')) + proposal = '-' + if tun.get('esp_proposal'): + proposal = (f'{tun["esp_proposal"]["cipher"]}_' + f'{tun["esp_proposal"]["mode"]}/' + f'{tun["esp_proposal"]["key_size"]}/' + f'{tun["esp_proposal"]["hash"]}/' + f'{tun["esp_proposal"]["dh"]}') + connections.append([ + tun_name, tun_state, conn_type, remote_addrs, local_ts, + remote_ts, local_id, remote_id, proposal + ]) + connection_headers = [ + 'Connection', 'State', 'Type', 'Remote address', 'Local TS', + 'Remote TS', 'Local id', 'Remote id', 'Proposal' + ] + output = tabulate(connections, connection_headers, numalign='left') + return output + + +def main(): + list_conns = _get_vici_connections() + list_sas = _get_vici_sas() + connections = _get_raw_data_connections(list_conns, list_sas) + return _get_formatted_output_conections(connections) + + +if __name__ == '__main__': + print(main()) diff --git a/src/op_mode/webproxy_update_blacklist.sh b/src/op_mode/webproxy_update_blacklist.sh index 43a4b79fc..4fb9a54c6 100755 --- a/src/op_mode/webproxy_update_blacklist.sh +++ b/src/op_mode/webproxy_update_blacklist.sh @@ -18,6 +18,23 @@ blacklist_url='ftp://ftp.univ-tlse1.fr/pub/reseau/cache/squidguard_contrib/black data_dir="/opt/vyatta/etc/config/url-filtering" archive="${data_dir}/squidguard/archive" db_dir="${data_dir}/squidguard/db" +conf_file="/etc/squidguard/squidGuard.conf" +tmp_conf_file="/tmp/sg_update_db.conf" + +#$1-category +#$2-type +#$3-list +create_sg_db () +{ + FILE=$db_dir/$1/$2 + if test -f "$FILE"; then + rm -f ${tmp_conf_file} + printf "dbhome $db_dir\ndest $1 {\n $3 $1/$2\n}\nacl {\n default {\n pass any\n }\n}" >> ${tmp_conf_file} + /usr/bin/squidGuard -b -c ${tmp_conf_file} -C $FILE + rm -f ${tmp_conf_file} + fi + +} while [ $# -gt 0 ] do @@ -88,7 +105,17 @@ if [[ -n $update ]] && [[ $update -eq "yes" ]]; then # fix permissions chown -R proxy:proxy ${db_dir} - chmod 2770 ${db_dir} + + #create db + category_list=(`find $db_dir -type d -exec basename {} \; `) + for category in ${category_list[@]} + do + create_sg_db $category "domains" "domainlist" + create_sg_db $category "urls" "urllist" + create_sg_db $category "expressions" "expressionlist" + done + chown -R proxy:proxy ${db_dir} + chmod 755 ${db_dir} logger --priority WARNING "webproxy blacklist entries updated (${count_before}/${count_after})" |