diff options
Diffstat (limited to 'src')
46 files changed, 2378 insertions, 801 deletions
diff --git a/src/completion/list_bgp_neighbors.sh b/src/completion/list_bgp_neighbors.sh index 77c626452..f74f102ef 100755 --- a/src/completion/list_bgp_neighbors.sh +++ b/src/completion/list_bgp_neighbors.sh @@ -30,8 +30,7 @@ while [[ "$#" -gt 0 ]]; do done declare -a vals -eval "bgp_as=$(cli-shell-api listActiveNodes protocols bgp)" -eval "vals=($(cli-shell-api listActiveNodes protocols bgp $bgp_as neighbor))" +eval "vals=($(cli-shell-api listActiveNodes protocols bgp neighbor))" if [ $ipv4 -eq 1 ] && [ $ipv6 -eq 1 ]; then echo -n '<x.x.x.x>' '<h:h:h:h:h:h:h:h>' ${vals[@]} diff --git a/src/conf_mode/containers.py b/src/conf_mode/containers.py new file mode 100755 index 000000000..9b7a52d26 --- /dev/null +++ b/src/conf_mode/containers.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import json + +from ipaddress import ip_address +from ipaddress import ip_network + +from vyos.config import Config +from vyos.configdict import dict_merge +from vyos.configdict import node_changed +from vyos.util import cmd +from vyos.util import popen +from vyos.template import render +from vyos.template import is_ipv4 +from vyos.template import is_ipv6 +from vyos.xml import defaults +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +config_containers_registry = '/etc/containers/registries.conf' +config_containers_storage = '/etc/containers/storage.conf' + +def _cmd(command): + if os.path.exists('/tmp/vyos.container.debug'): + print(command) + return cmd(command) + +# Container management functions +def container_exists(name): + ''' + https://docs.podman.io/en/latest/_static/api.html#operation/ContainerExistsLibpod + Check if container exists. Response codes. + 204 - container exists + 404 - no such container + ''' + tmp = _cmd(f"curl --unix-socket /run/podman/podman.sock 'http://d/v3.0.0/libpod/containers/{name}/exists'") + # If container exists it return status code "0" - code can not be displayed + return (tmp == "") + +def container_status(name): + ''' + https://docs.podman.io/en/latest/_static/api.html#operation/ContainerInspectLibpod + ''' + tmp = _cmd(f"curl --unix-socket /run/podman/podman.sock 'http://d/v3.0.0/libpod/containers/{name}/json'") + data = json.loads(tmp) + return data['State']['Status'] + +def ctnr_network_exists(name): + # Check explicit name for network, returns True if network exists + c = _cmd(f'podman network ls --quiet --filter name=^{name}$') + return bool(c) + +# Common functions +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + base = ['container'] + container = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True) + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = defaults(base) + container = dict_merge(default_values, container) + + # Delete container network, delete containers + tmp = node_changed(conf, ['container', 'network']) + if tmp: container.update({'net_remove' : tmp}) + + tmp = node_changed(conf, ['container', 'name']) + if tmp: container.update({'container_remove' : tmp}) + + return container + +def verify(container): + # bail out early - looks like removal from running config + if not container: + return None + + # Add new container + if 'name' in container: + for name, container_config in container['name'].items(): + if 'network' in container_config: + if len(container_config['network']) > 1: + raise ConfigError(f'Only one network can be specified for container "{name}"!') + + + # Check if the specified container network exists + network_name = list(container_config['network'])[0] + if network_name not in container['network']: + raise ConfigError('Container network "{network_name}" does not exist!') + + if 'address' in container_config['network'][network_name]: + if 'network' not in container_config: + raise ConfigError(f'Can not use "address" without "network" for container "{name}"!') + + address = container_config['network'][network_name]['address'] + network = None + if is_ipv4(address): + network = [x for x in container['network'][network_name]['prefix'] if is_ipv4(x)][0] + elif is_ipv6(address): + network = [x for x in container['network'][network_name]['prefix'] if is_ipv6(x)][0] + + # Specified container IP address must belong to network prefix + if ip_address(address) not in ip_network(network): + raise ConfigError(f'Used container address "{address}" not in network "{network}"!') + + # We can not use the first IP address of a network prefix as this is used by podman + if ip_address(address) == ip_network(network)[1]: + raise ConfigError(f'Address "{address}" reserved for the container engine!') + + + # Container image is a mandatory option + if 'image' not in container_config: + raise ConfigError(f'Container image for "{name}" is mandatory!') + + # If 'allow-host-networks' or 'network' not set. + if 'allow_host_networks' not in container_config and 'network' not in container_config: + raise ConfigError(f'Must either set "network" or "allow-host-networks" for container "{name}"!') + + # Can not set both allow-host-networks and network at the same time + if {'allow_host_networks', 'network'} <= set(container_config): + raise ConfigError(f'"allow-host-networks" and "network" for "{name}" cannot be both configured at the same time!') + + # Add new network + if 'network' in container: + v4_prefix = 0 + v6_prefix = 0 + for network, network_config in container['network'].items(): + # If ipv4-prefix not defined for user-defined network + if 'prefix' not in network_config: + raise ConfigError(f'prefix for network "{net}" must be defined!') + + for prefix in network_config['prefix']: + if is_ipv4(prefix): v4_prefix += 1 + elif is_ipv6(prefix): v6_prefix += 1 + + if v4_prefix > 1: + raise ConfigError(f'Only one IPv4 prefix can be defined for network "{network}"!') + if v6_prefix > 1: + raise ConfigError(f'Only one IPv6 prefix can be defined for network "{network}"!') + + + # A network attached to a container can not be deleted + if {'net_remove', 'name'} <= set(container): + for network in container['net_remove']: + for container, container_config in container['name'].items(): + if 'network' in container_config and network in container_config['network']: + raise ConfigError(f'Can not remove network "{network}", used by container "{container}"!') + + return None + +def generate(container): + # bail out early - looks like removal from running config + if not container: + return None + + render(config_containers_registry, 'containers/registry.tmpl', container) + render(config_containers_storage, 'containers/storage.tmpl', container) + + return None + +def apply(container): + # Delete old containers if needed. We can't delete running container + # Option "--force" allows to delete containers with any status + if 'container_remove' in container: + for name in container['container_remove']: + if container_status(name) == 'running': + _cmd(f'podman stop {name}') + _cmd(f'podman rm --force {name}') + + # Delete old networks if needed + if 'net_remove' in container: + for network in container['net_remove']: + _cmd(f'podman network rm {network}') + + # Add network + if 'network' in container: + for network, network_config in container['network'].items(): + # Check if the network has already been created + if not ctnr_network_exists(network) and 'prefix' in network_config: + tmp = f'podman network create {network}' + # we can not use list comprehension here as the --ipv6 option + # must immediately follow the specified subnet!!! + for prefix in sorted(network_config['prefix']): + tmp += f' --subnet={prefix}' + if is_ipv6(prefix): + tmp += ' --ipv6' + _cmd(tmp) + + # Add container + if 'name' in container: + for name, container_config in container['name'].items(): + # Check if the container has already been created + if not container_exists(name): + image = container_config['image'] + # Currently the best way to run a command and immediately print stdout + print(os.system(f'podman pull {image}')) + + # Check/set environment options "-e foo=bar" + env_opt = '' + if 'environment' in container_config: + env_opt = '-e ' + env_opt += " -e ".join(f"{k}={v['value']}" for k, v in container_config['environment'].items()) + + if 'allow_host_networks' in container_config: + _cmd(f'podman run -dit --name {name} --net host {env_opt} {image}') + else: + for network in container_config['network']: + ipparam = '' + if 'address' in container_config['network'][network]: + ipparam = '--ip ' + container_config['network'][network]['address'] + _cmd(f'podman run --name {name} -dit --net {network} {ipparam} {env_opt} {image}') + + # Else container is already created. Just start it. + # It's needed after reboot. + elif container_status(name) != 'running': + _cmd(f'podman start {name}') + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/dynamic_dns.py b/src/conf_mode/dynamic_dns.py index 6d39c6644..c979feca7 100755 --- a/src/conf_mode/dynamic_dns.py +++ b/src/conf_mode/dynamic_dns.py @@ -114,7 +114,7 @@ def verify(dyndns): raise ConfigError(f'"password" {error_msg}') if 'zone' in config: - if service != 'cloudflare': + if service != 'cloudflare' and ('protocol' not in config or config['protocol'] != 'cloudflare'): raise ConfigError(f'"zone" option only supported with CloudFlare') if 'custom' in config: diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py index 472eb77e4..7e4b117c8 100755 --- a/src/conf_mode/http-api.py +++ b/src/conf_mode/http-api.py @@ -19,6 +19,7 @@ import sys import os import json +import time from copy import deepcopy import vyos.defaults @@ -34,11 +35,6 @@ config_file = '/etc/vyos/http-api.conf' vyos_conf_scripts_dir=vyos.defaults.directories['conf_mode'] -# XXX: this model will need to be extended for tag nodes -dependencies = [ - 'https.py', -] - def get_config(config=None): http_api = deepcopy(vyos.defaults.api_data) x = http_api.get('api_keys') @@ -103,8 +99,10 @@ def apply(http_api): else: call('systemctl stop vyos-http-api.service') - for dep in dependencies: - cmd(f'{vyos_conf_scripts_dir}/{dep}', raising=ConfigError) + # Let uvicorn settle before restarting Nginx + time.sleep(2) + + cmd(f'{vyos_conf_scripts_dir}/https.py', raising=ConfigError) if __name__ == '__main__': try: diff --git a/src/conf_mode/interfaces-bridge.py b/src/conf_mode/interfaces-bridge.py index fd4ffed9a..4d3ebc587 100755 --- a/src/conf_mode/interfaces-bridge.py +++ b/src/conf_mode/interfaces-bridge.py @@ -18,7 +18,6 @@ import os from sys import exit from netifaces import interfaces -import re from vyos.config import Config from vyos.configdict import get_interface_dict diff --git a/src/conf_mode/interfaces-erspan.py b/src/conf_mode/interfaces-erspan.py deleted file mode 100755 index 97ae3cf55..000000000 --- a/src/conf_mode/interfaces-erspan.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2018-2020 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import os - -from sys import exit -from copy import deepcopy -from netifaces import interfaces - -from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import get_interface_dict -from vyos.configdict import node_changed -from vyos.configdict import leaf_node_changed -from vyos.configverify import verify_mtu_ipv6 -from vyos.configverify import verify_tunnel -from vyos.ifconfig import Interface -from vyos.ifconfig import ERSpanIf -from vyos.ifconfig import ER6SpanIf -from vyos.template import is_ipv4 -from vyos.template import is_ipv6 -from vyos.util import dict_search -from vyos import ConfigError -from vyos import airbag -airbag.enable() - -def get_config(config=None): - """ - Retrive CLI config as dictionary. Dictionary can never be empty, as at least - the interface name will be added or a deleted flag - """ - if config: - conf = config - else: - conf = Config() - base = ['interfaces', 'erspan'] - erspan = get_interface_dict(conf, base) - - tmp = leaf_node_changed(conf, ['encapsulation']) - if tmp: - erspan.update({'encapsulation_changed': {}}) - - return erspan - -def verify(erspan): - if 'deleted' in erspan: - return None - - if 'encapsulation' not in erspan: - raise ConfigError('Unable to detect the following ERSPAN tunnel encapsulation'\ - '{ifname}!'.format(**erspan)) - - verify_mtu_ipv6(erspan) - verify_tunnel(erspan) - - key = dict_search('parameters.ip.key',erspan) - if key == None: - raise ConfigError('parameters.ip.key is mandatory for ERSPAN tunnel') - - -def generate(erspan): - return None - -def apply(erspan): - if 'deleted' in erspan or 'encapsulation_changed' in erspan: - if erspan['ifname'] in interfaces(): - tmp = Interface(erspan['ifname']) - tmp.remove() - if 'deleted' in erspan: - return None - - dispatch = { - 'erspan': ERSpanIf, - 'ip6erspan': ER6SpanIf - } - - # We need to re-map the tunnel encapsulation proto to a valid interface class - encap = erspan['encapsulation'] - klass = dispatch[encap] - - erspan_tunnel = klass(**erspan) - erspan_tunnel.change_options() - erspan_tunnel.update(erspan) - - return None - -if __name__ == '__main__': - try: - c = get_config() - generate(c) - verify(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces-tunnel.py index b63312750..4e6c8a9ab 100755 --- a/src/conf_mode/interfaces-tunnel.py +++ b/src/conf_mode/interfaces-tunnel.py @@ -34,7 +34,7 @@ from vyos.ifconfig import Interface from vyos.ifconfig import TunnelIf from vyos.template import is_ipv4 from vyos.template import is_ipv6 -from vyos.util import get_json_iface_options +from vyos.util import get_interface_config from vyos.util import dict_search from vyos import ConfigError from vyos import airbag @@ -61,6 +61,9 @@ def get_config(config=None): nhrp = conf.get_config_dict([], key_mangling=('-', '_'), get_first_key=True) if nhrp: tunnel.update({'nhrp' : list(nhrp.keys())}) + if 'encapsulation' in tunnel and tunnel['encapsulation'] not in ['erspan', 'ip6erspan']: + del tunnel['parameters']['erspan'] + return tunnel def verify(tunnel): @@ -72,14 +75,28 @@ def verify(tunnel): return None - if 'encapsulation' not in tunnel: - error = 'Must configure encapsulation for "{ifname}"!' - raise ConfigError(error.format(**tunnel)) + verify_tunnel(tunnel) + + if tunnel['encapsulation'] in ['erspan', 'ip6erspan']: + if dict_search('parameters.ip.key', tunnel) == None: + raise ConfigError('ERSPAN requires ip key parameter!') + + # this is a default field + ver = int(tunnel['parameters']['erspan']['version']) + if ver == 1: + if 'hw_id' in tunnel['parameters']['erspan']: + raise ConfigError('ERSPAN version 1 does not support hw-id!') + if 'direction' in tunnel['parameters']['erspan']: + raise ConfigError('ERSPAN version 1 does not support direction!') + elif ver == 2: + if 'idx' in tunnel['parameters']['erspan']: + raise ConfigError('ERSPAN version 2 does not index parameter!') + if 'direction' not in tunnel['parameters']['erspan']: + raise ConfigError('ERSPAN version 2 requires direction to be set!') verify_mtu_ipv6(tunnel) verify_address(tunnel) verify_vrf(tunnel) - verify_tunnel(tunnel) if 'source_interface' in tunnel: verify_interface_exists(tunnel['source_interface']) @@ -92,7 +109,6 @@ def verify(tunnel): if tunnel['encapsulation'] in ['ipip6', 'ip6ip6', 'ip6gre']: raise ConfigError('Can not disable PMTU discovery for given encapsulation') - def generate(tunnel): return None @@ -103,13 +119,13 @@ def apply(tunnel): # There is no other solution to destroy and recreate the tunnel. encap = '' remote = '' - tmp = get_json_iface_options(interface) + tmp = get_interface_config(interface) if tmp: encap = dict_search('linkinfo.info_kind', tmp) remote = dict_search('linkinfo.info_data.remote', tmp) - if ('deleted' in tunnel or 'encapsulation_changed' in tunnel or - encap in ['gretap', 'ip6gretap'] or remote in ['any']): + if ('deleted' in tunnel or 'encapsulation_changed' in tunnel or encap in + ['gretap', 'ip6gretap', 'erspan', 'ip6erspan'] or remote in ['any']): if interface in interfaces(): tmp = Interface(interface) tmp.remove() diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py index ce1db316c..e2bd6417d 100755 --- a/src/conf_mode/nat66.py +++ b/src/conf_mode/nat66.py @@ -28,7 +28,6 @@ from vyos.util import cmd from vyos.util import check_kmod from vyos.util import dict_search from vyos.template import is_ipv6 -from vyos.template import is_ip_network from vyos.xml import defaults from vyos import ConfigError from vyos import airbag @@ -80,8 +79,10 @@ def get_config(config=None): if not conf.exists(base): nat['helper_functions'] = 'remove' + nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_HELPER') nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK') - nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT','NAT_CONNTRACK') + nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_HELPER') + nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK') nat['deleted'] = '' return nat @@ -91,8 +92,10 @@ def get_config(config=None): nat['helper_functions'] = 'add' # Retrieve current table handler positions + nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_IGNORE') nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_PREROUTING_HOOK') - nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT','VYATTA_CT_OUTPUT_HOOK') + nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_IGNORE') + nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_OUTPUT_HOOK') else: nat['helper_functions'] = 'has' diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py new file mode 100755 index 000000000..f0348fe06 --- /dev/null +++ b/src/conf_mode/policy.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from sys import exit + +from vyos.config import Config +from vyos.configdict import dict_merge +from vyos.template import render_to_string +from vyos.util import dict_search +from vyos import ConfigError +from vyos import frr +from vyos import airbag +airbag.enable() + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + base = ['policy'] + policy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + return policy + +def verify(policy): + if not policy: + return None + + for policy_type in ['access_list', 'access_list6', 'as_path_list', + 'community_list', 'extcommunity_list', 'large_community_list', + 'prefix_list', 'prefix_list6', 'route_map']: + # Bail out early and continue with next policy type + if policy_type not in policy: + continue + + # instance can be an ACL name/number, prefix-list name or route-map name + for instance, instance_config in policy[policy_type].items(): + # If no rule was found within the instance ... sad, but we can leave + # early as nothing needs to be verified + if 'rule' not in instance_config: + continue + + # human readable instance name (hypen instead of underscore) + policy_hr = policy_type.replace('_', '-') + for rule, rule_config in instance_config['rule'].items(): + mandatory_error = f'must be specified for "{policy_hr} {instance} rule {rule}"!' + if 'action' not in rule_config: + raise ConfigError(f'Action {mandatory_error}') + + if policy_type == 'access_list': + if 'source' not in rule_config: + raise ConfigError(f'Source {mandatory_error}') + + if int(instance) in range(100, 200) or int(instance) in range(2000, 2700): + if 'destination' not in rule_config: + raise ConfigError(f'Destination {mandatory_error}') + + if policy_type == 'access_list6': + if 'source' not in rule_config: + raise ConfigError(f'Source {mandatory_error}') + + if policy_type in ['as_path_list', 'community_list', 'extcommunity_list', + 'large_community_list']: + if 'regex' not in rule_config: + raise ConfigError(f'Regex {mandatory_error}') + + + # route-maps tend to be a bit more complex so they get their own verify() section + if 'route_map' in policy: + for route_map, route_map_config in policy['route_map'].items(): + if 'rule' not in route_map_config: + continue + + for rule, rule_config in route_map_config['rule'].items(): + # Specified community-list must exist + tmp = dict_search('match.community.community_list', rule_config) + if tmp and tmp not in policy.get('community_list', []): + raise ConfigError(f'community-list {tmp} does not exist!') + + # Specified extended community-list must exist + tmp = dict_search('match.extcommunity', rule_config) + if tmp and tmp not in policy.get('extcommunity_list', []): + raise ConfigError(f'extcommunity-list {tmp} does not exist!') + + # Specified large-community-list must exist + tmp = dict_search('match.large_community.large_community_list', rule_config) + if tmp and tmp not in policy.get('large_community_list', []): + raise ConfigError(f'large-community-list {tmp} does not exist!') + + return None + +def generate(policy): + if not policy: + policy['new_frr_config'] = '' + return None + + policy['new_frr_config'] = render_to_string('frr/policy.frr.tmpl', policy) + return None + +def apply(policy): + bgp_daemon = 'bgpd' + zebra_daemon = 'zebra' + + # Save original configuration prior to starting any commit actions + frr_cfg = frr.FRRConfig() + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(bgp_daemon) + frr_cfg.modify_section(r'^bgp as-path access-list .*') + frr_cfg.modify_section(r'^bgp community-list .*') + frr_cfg.modify_section(r'^bgp extcommunity-list .*') + frr_cfg.modify_section(r'^bgp large-community-list .*') + frr_cfg.add_before('^line vty', policy['new_frr_config']) + frr_cfg.commit_configuration(bgp_daemon) + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section(r'^access-list .*') + frr_cfg.modify_section(r'^ipv6 access-list .*') + frr_cfg.modify_section(r'^ip prefix-list .*') + frr_cfg.modify_section(r'^ipv6 prefix-list .*') + frr_cfg.modify_section(r'^route-map .*') + frr_cfg.add_before('^line vty', policy['new_frr_config']) + frr_cfg.commit_configuration(zebra_daemon) + + # If FRR config is blank, rerun the blank commit x times due to frr-reload + # behavior/bug not properly clearing out on one commit. + if policy['new_frr_config'] == '': + for a in range(5): + frr_cfg.commit_configuration(zebra_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index a43eed504..dd70d6bab 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -22,7 +22,6 @@ from vyos.config import Config from vyos.configdict import dict_merge from vyos.template import is_ipv6 from vyos.template import render_to_string -from vyos.util import call from vyos.validate import is_ipv6_link_local from vyos.xml import defaults from vyos import ConfigError diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index 7dede74a1..2bdeb5bcc 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -17,12 +17,15 @@ import os from sys import exit +from sys import argv from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configverify import verify_prefix_list +from vyos.configverify import verify_route_map from vyos.template import is_ip +from vyos.template import is_interface from vyos.template import render_to_string -from vyos.util import call from vyos.util import dict_search from vyos.validate import is_addr_assigned from vyos import ConfigError @@ -30,39 +33,49 @@ from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'bgpd' - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'bgp'] + + vrf = None + if len(argv) > 1: + vrf = argv[1] + + base_path = ['protocols', 'bgp'] + + # eqivalent of the C foo ? 'a' : 'b' statement + base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path bgp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - # Bail out early if configuration tree does not exist + # Assign the name of our VRF context. This MUST be done before the return + # statement below, else on deletion we will delete the default instance + # instead of the VRF instance. + if vrf: bgp.update({'vrf' : vrf}) + if not conf.exists(base): + bgp.update({'deleted' : ''}) return bgp - # We also need some additional information from the config, - # prefix-lists and route-maps for instance. - base = ['policy'] - tmp = conf.get_config_dict(base, key_mangling=('-', '_')) - # As we only support one ASN (later checked in begin of verify()) we add the - # new information only to the first AS number - asn = next(iter(bgp)) - # Merge policy dict into bgp dict - bgp[asn] = dict_merge(tmp, bgp[asn]) + # We also need some additional information from the config, prefix-lists + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict + bgp = dict_merge(tmp, bgp) return bgp -def verify_remote_as(peer_config, asn_config): +def verify_remote_as(peer_config, bgp_config): if 'remote_as' in peer_config: return peer_config['remote_as'] if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] - tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', asn_config) + tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'interface' in peer_config: @@ -71,135 +84,192 @@ def verify_remote_as(peer_config, asn_config): if 'peer_group' in peer_config['interface']: peer_group_name = peer_config['interface']['peer_group'] - tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', asn_config) + tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp return None def verify(bgp): - if not bgp: + if not bgp or 'deleted' in bgp: return None - # Check if declared more than one ASN - if len(bgp) > 1: - raise ConfigError('Only one BGP AS number can be defined!') - - for asn, asn_config in bgp.items(): - # Common verification for both peer-group and neighbor statements - for neighbor in ['neighbor', 'peer_group']: - # bail out early if there is no neighbor or peer-group statement - # this also saves one indention level - if neighbor not in asn_config: - continue - - for peer, peer_config in asn_config[neighbor].items(): - # Only regular "neighbor" statement can have a peer-group set - # Check if the configure peer-group exists - if 'peer_group' in peer_config: - peer_group = peer_config['peer_group'] - if 'peer_group' not in asn_config or peer_group not in asn_config['peer_group']: - raise ConfigError(f'Specified peer-group "{peer_group}" for '\ - f'neighbor "{neighbor}" does not exist!') - - # ttl-security and ebgp-multihop can't be used in the same configration - if 'ebgp_multihop' in peer_config and 'ttl_security' in peer_config: - raise ConfigError('You can\'t set both ebgp-multihop and ttl-security hops') - - # Check spaces in the password - if 'password' in peer_config and ' ' in peer_config['password']: - raise ConfigError('You can\'t use spaces in the password') - - # Some checks can/must only be done on a neighbor and not a peer-group - if neighbor == 'neighbor': - # remote-as must be either set explicitly for the neighbor - # or for the entire peer-group - if not verify_remote_as(peer_config, asn_config): - raise ConfigError(f'Neighbor "{peer}" remote-as must be set!') - - # Only checks for ipv4 and ipv6 neighbors - # Check if neighbor address is assigned as system interface address - if is_ip(peer) and is_addr_assigned(peer): - raise ConfigError(f'Can\'t configure local address as neighbor "{peer}"') - - for afi in ['ipv4_unicast', 'ipv6_unicast', 'l2vpn_evpn']: - # Bail out early if address family is not configured - if 'address_family' not in peer_config or afi not in peer_config['address_family']: - continue - - afi_config = peer_config['address_family'][afi] - # Validate if configured Prefix list exists - if 'prefix_list' in afi_config: - for tmp in ['import', 'export']: - if tmp not in afi_config['prefix_list']: - # bail out early - continue - # get_config_dict() mangles all '-' characters to '_' this is legitimate, thus all our - # compares will run on '_' as also '_' is a valid name for a prefix-list - prefix_list = afi_config['prefix_list'][tmp].replace('-', '_') - if afi == 'ipv4_unicast': - if dict_search(f'policy.prefix_list.{prefix_list}', asn_config) == None: - raise ConfigError(f'prefix-list "{prefix_list}" used for "{tmp}" does not exist!') - elif afi == 'ipv6_unicast': - if dict_search(f'policy.prefix_list6.{prefix_list}', asn_config) == None: - raise ConfigError(f'prefix-list6 "{prefix_list}" used for "{tmp}" does not exist!') - - if 'route_map' in afi_config: - for tmp in ['import', 'export']: - if tmp in afi_config['route_map']: - # get_config_dict() mangles all '-' characters to '_' this is legitim, thus all our - # compares will run on '_' as also '_' is a valid name for a route-map - route_map = afi_config['route_map'][tmp].replace('-', '_') - if dict_search(f'policy.route_map.{route_map}', asn_config) == None: - raise ConfigError(f'route-map "{route_map}" used for "{tmp}" does not exist!') - - if 'route_reflector_client' in afi_config: - if 'remote_as' in peer_config and asn != peer_config['remote_as']: - raise ConfigError('route-reflector-client only supported for iBGP peers') - else: - if 'peer_group' in peer_config: - peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', asn_config) - if peer_group_as != None and peer_group_as != asn: - raise ConfigError('route-reflector-client only supported for iBGP peers') - - # Throw an error if a peer group is not configured for allow range - for prefix in dict_search('listen.range', asn_config) or []: - # we can not use dict_search() here as prefix contains dots ... - if 'peer_group' not in asn_config['listen']['range'][prefix]: - raise ConfigError(f'Listen range for prefix "{prefix}" has no peer group configured.') + if 'local_as' not in bgp: + raise ConfigError('BGP local-as number must be defined!') + + # Common verification for both peer-group and neighbor statements + for neighbor in ['neighbor', 'peer_group']: + # bail out early if there is no neighbor or peer-group statement + # this also saves one indention level + if neighbor not in bgp: + continue + + for peer, peer_config in bgp[neighbor].items(): + # Only regular "neighbor" statement can have a peer-group set + # Check if the configure peer-group exists + if 'peer_group' in peer_config: + peer_group = peer_config['peer_group'] + if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: + raise ConfigError(f'Specified peer-group "{peer_group}" for '\ + f'neighbor "{neighbor}" does not exist!') + + if 'local_as' in peer_config: + if len(peer_config['local_as']) > 1: + raise ConfigError('Only one local-as number may be specified!') + + # Neighbor local-as override can not be the same as the local-as + # we use for this BGP instane! + asn = list(peer_config['local_as'].keys())[0] + if asn == bgp['local_as']: + raise ConfigError('Cannot have local-as same as BGP AS number') + + # ttl-security and ebgp-multihop can't be used in the same configration + if 'ebgp_multihop' in peer_config and 'ttl_security' in peer_config: + raise ConfigError('You can\'t set both ebgp-multihop and ttl-security hops') + + # Check if neighbor has both override capability and strict capability match configured at the same time. + if 'override_capability' in peer_config and 'strict_capability_match' in peer_config: + raise ConfigError(f'Neighbor "{peer}" cannot have both override-capability and strict-capability-match configured at the same time!') + + # Check spaces in the password + if 'password' in peer_config and ' ' in peer_config['password']: + raise ConfigError('You can\'t use spaces in the password') + + # Some checks can/must only be done on a neighbor and not a peer-group + if neighbor == 'neighbor': + # remote-as must be either set explicitly for the neighbor + # or for the entire peer-group + if not verify_remote_as(peer_config, bgp): + raise ConfigError(f'Neighbor "{peer}" remote-as must be set!') + + # Only checks for ipv4 and ipv6 neighbors + # Check if neighbor address is assigned as system interface address + if is_ip(peer) and is_addr_assigned(peer): + raise ConfigError(f'Can not configure a local address as neighbor "{peer}"') + elif is_interface(peer): + if 'peer_group' in peer_config: + raise ConfigError(f'peer-group must be set under the interface node of "{peer}"') + if 'remote_as' in peer_config: + raise ConfigError(f'remote-as must be set under the interface node of "{peer}"') + + for afi in ['ipv4_unicast', 'ipv4_multicast', 'ipv4_labeled_unicast', 'ipv4_flowspec', + 'ipv6_unicast', 'ipv6_multicast', 'ipv6_labeled_unicast', 'ipv6_flowspec', + 'l2vpn_evpn']: + # Bail out early if address family is not configured + if 'address_family' not in peer_config or afi not in peer_config['address_family']: + continue + + # Check if neighbor has both ipv4 unicast and ipv4 labeled unicast configured at the same time. + if 'ipv4_unicast' in peer_config['address_family'] and 'ipv4_labeled_unicast' in peer_config['address_family']: + raise ConfigError(f'Neighbor "{peer}" cannot have both ipv4-unicast and ipv4-labeled-unicast configured at the same time!') + + # Check if neighbor has both ipv6 unicast and ipv6 labeled unicast configured at the same time. + if 'ipv6_unicast' in peer_config['address_family'] and 'ipv6_labeled_unicast' in peer_config['address_family']: + raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!') + + afi_config = peer_config['address_family'][afi] + # Validate if configured Prefix list exists + if 'prefix_list' in afi_config: + for tmp in ['import', 'export']: + if tmp not in afi_config['prefix_list']: + # bail out early + continue + if afi == 'ipv4_unicast': + verify_prefix_list(afi_config['prefix_list'][tmp], bgp) + elif afi == 'ipv6_unicast': + verify_prefix_list(afi_config['prefix_list'][tmp], bgp, version='6') + + if 'route_map' in afi_config: + for tmp in ['import', 'export']: + if tmp in afi_config['route_map']: + verify_route_map(afi_config['route_map'][tmp], bgp) + + if 'route_reflector_client' in afi_config: + if 'remote_as' in peer_config and bgp['local_as'] != peer_config['remote_as']: + raise ConfigError('route-reflector-client only supported for iBGP peers') + else: + if 'peer_group' in peer_config: + peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', bgp) + if peer_group_as != None and peer_group_as != bgp['local_as']: + raise ConfigError('route-reflector-client only supported for iBGP peers') + + # Throw an error if a peer group is not configured for allow range + for prefix in dict_search('listen.range', bgp) or []: + # we can not use dict_search() here as prefix contains dots ... + if 'peer_group' not in bgp['listen']['range'][prefix]: + raise ConfigError(f'Listen range for prefix "{prefix}" has no peer group configured.') + + peer_group = bgp['listen']['range'][prefix]['peer_group'] + if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: + raise ConfigError(f'Peer-group "{peer_group}" for listen range "{prefix}" does not exist!') + + if not verify_remote_as(bgp['listen']['range'][prefix], bgp): + raise ConfigError(f'Peer-group "{peer_group}" requires remote-as to be set!') + + # Throw an error if the global administrative distance parameters aren't all filled out. + if dict_search('parameters.distance', bgp) == None: + pass + else: + if dict_search('parameters.distance.global', bgp): + for key in ['external', 'internal', 'local']: + if dict_search(f'parameters.distance.global.{key}', bgp) == None: + raise ConfigError('Missing mandatory configuration option for '\ + f'global administrative distance {key}!') + + # Throw an error if the address family specific administrative distance parameters aren't all filled out. + if dict_search('address_family', bgp) == None: + pass + else: + for address_family_name in dict_search('address_family', bgp): + if dict_search(f'address_family.{address_family_name}.distance', bgp) == None: + pass else: - peer_group = asn_config['listen']['range'][prefix]['peer_group'] - # the peer group must also exist - if not dict_search(f'peer_group.{peer_group}', asn_config): - raise ConfigError(f'Peer-group "{peer_group}" for listen range "{prefix}" does not exist!') + for key in ['external', 'internal', 'local']: + if dict_search(f'address_family.{address_family_name}.distance.{key}', bgp) == None: + address_family_name = address_family_name.replace('_', '-') + raise ConfigError('Missing mandatory configuration option for '\ + f'{address_family_name} administrative distance {key}!') return None def generate(bgp): - if not bgp: + if not bgp or 'deleted' in bgp: bgp['new_frr_config'] = '' return None - # only one BGP AS is supported, so we can directly send the first key - # of the config dict - asn = list(bgp.keys())[0] - bgp[asn]['asn'] = asn - - bgp['new_frr_config'] = render_to_string('frr/bgp.frr.tmpl', bgp[asn]) + bgp['new_frr_config'] = render_to_string('frr/bgp.frr.tmpl', bgp) return None def apply(bgp): + bgp_daemon = 'bgpd' + zebra_daemon = 'zebra' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(f'^router bgp \d+$', '') + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section(r'^ip protocol bgp route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.commit_configuration(zebra_daemon) + + # Generate empty helper string which can be ammended to FRR commands, it + # will be either empty (default VRF) or contain the "vrf <name" statement + vrf = '' + if 'vrf' in bgp: + vrf = ' vrf ' + bgp['vrf'] + + frr_cfg.load_configuration(bgp_daemon) + frr_cfg.modify_section(f'^router bgp \d+{vrf}$', '') frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bgp['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(bgp_daemon) # If FRR config is blank, rerun the blank commit x times due to frr-reload # behavior/bug not properly clearing out on one commit. if bgp['new_frr_config'] == '': for a in range(5): - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(bgp_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() return None diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index b7afad473..4aea59bfd 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020 VyOS maintainers and contributors +# Copyright (C) 2020-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -17,14 +17,17 @@ import os from sys import exit +from sys import argv from vyos.config import Config +from vyos.configdict import dict_merge from vyos.configdict import node_changed -from vyos import ConfigError -from vyos.util import call +from vyos.configverify import verify_common_route_maps +from vyos.configverify import verify_interface_exists from vyos.util import dict_search -from vyos.template import render +from vyos.util import get_interface_config from vyos.template import render_to_string +from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() @@ -34,126 +37,185 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'isis'] - isis = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + vrf = None + if len(argv) > 1: + vrf = argv[1] + + base_path = ['protocols', 'isis'] + + # eqivalent of the C foo ? 'a' : 'b' statement + base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path + isis = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True) + + # Assign the name of our VRF context. This MUST be done before the return + # statement below, else on deletion we will delete the default instance + # instead of the VRF instance. + if vrf: isis['vrf'] = vrf + + # As we no re-use this Python handler for both VRF and non VRF instances for + # IS-IS we need to find out if any interfaces changed so properly adjust + # the FRR configuration and not by acctident change interfaces from a + # different VRF. + interfaces_removed = node_changed(conf, base + ['interface']) + if interfaces_removed: + isis['interface_removed'] = list(interfaces_removed) + + # Bail out early if configuration tree does not exist + if not conf.exists(base): + isis.update({'deleted' : ''}) + return isis + + # We also need some additional information from the config, prefix-lists + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict + isis = dict_merge(tmp, isis) return isis def verify(isis): # bail out early - looks like removal from running config - if not isis: + if not isis or 'deleted' in isis: return None - for process, isis_config in isis.items(): - # If more then one isis process is defined (Frr only supports one) - # http://docs.frrouting.org/en/latest/isisd.html#isis-router - if len(isis) > 1: - raise ConfigError('Only one isis process can be defined') - - # If network entity title (net) not defined - if 'net' not in isis_config: - raise ConfigError('ISIS net format iso is mandatory!') - - # If interface not set - if 'interface' not in isis_config: - raise ConfigError('ISIS interface is mandatory!') - - # If md5 and plaintext-password set at the same time - if 'area_password' in isis_config: - if {'md5', 'plaintext_password'} <= set(isis_config['encryption']): - raise ConfigError('Can not use both md5 and plaintext-password for ISIS area-password!') - - # If one param from delay set, but not set others - if 'spf_delay_ietf' in isis_config: - required_timers = ['holddown', 'init_delay', 'long_delay', 'short_delay', 'time_to_learn'] - exist_timers = [] - for elm_timer in required_timers: - if elm_timer in isis_config['spf_delay_ietf']: - exist_timers.append(elm_timer) - - exist_timers = set(required_timers).difference(set(exist_timers)) - if len(exist_timers) > 0: - raise ConfigError('All types of delay must be specified: ' + ', '.join(exist_timers).replace('_', '-')) - - # If Redistribute set, but level don't set - if 'redistribute' in isis_config: - proc_level = isis_config.get('level','').replace('-','_') - for proto, proto_config in isis_config.get('redistribute', {}).get('ipv4', {}).items(): + if 'net' not in isis: + raise ConfigError('Network entity is mandatory!') + + # last byte in IS-IS area address must be 0 + tmp = isis['net'].split('.') + if int(tmp[-1]) != 0: + raise ConfigError('Last byte of IS-IS network entity title must always be 0!') + + verify_common_route_maps(isis) + + # If interface not set + if 'interface' not in isis: + raise ConfigError('Interface used for routing updates is mandatory!') + + for interface in isis['interface']: + verify_interface_exists(interface) + if 'vrf' in isis: + # If interface specific options are set, we must ensure that the + # interface is bound to our requesting VRF. Due to the VyOS + # priorities the interface is bound to the VRF after creation of + # the VRF itself, and before any routing protocol is configured. + vrf = isis['vrf'] + tmp = get_interface_config(interface) + if 'master' not in tmp or tmp['master'] != vrf: + raise ConfigError(f'Interface {interface} is not a member of VRF {vrf}!') + + # If md5 and plaintext-password set at the same time + if 'area_password' in isis: + if {'md5', 'plaintext_password'} <= set(isis['encryption']): + raise ConfigError('Can not use both md5 and plaintext-password for ISIS area-password!') + + # If one param from delay set, but not set others + if 'spf_delay_ietf' in isis: + required_timers = ['holddown', 'init_delay', 'long_delay', 'short_delay', 'time_to_learn'] + exist_timers = [] + for elm_timer in required_timers: + if elm_timer in isis['spf_delay_ietf']: + exist_timers.append(elm_timer) + + exist_timers = set(required_timers).difference(set(exist_timers)) + if len(exist_timers) > 0: + raise ConfigError('All types of delay must be specified: ' + ', '.join(exist_timers).replace('_', '-')) + + # If Redistribute set, but level don't set + if 'redistribute' in isis: + proc_level = isis.get('level','').replace('-','_') + for afi in ['ipv4']: + if afi not in isis['redistribute']: + continue + + for proto, proto_config in isis['redistribute'][afi].items(): if 'level_1' not in proto_config and 'level_2' not in proto_config: - raise ConfigError('Redistribute level-1 or level-2 should be specified in \"protocols isis {} redistribute ipv4 {}\"'.format(process, proto)) - for redistribute_level in proto_config.keys(): - if proc_level and proc_level != 'level_1_2' and proc_level != redistribute_level: - raise ConfigError('\"protocols isis {0} redistribute ipv4 {2} {3}\" cannot be used with \"protocols isis {0} level {1}\"'.format(process, proc_level, proto, redistribute_level)) - - # Segment routing checks - if dict_search('segment_routing', isis_config): - if dict_search('segment_routing.global_block', isis_config): - high_label_value = dict_search('segment_routing.global_block.high_label_value', isis_config) - low_label_value = dict_search('segment_routing.global_block.low_label_value', isis_config) - # If segment routing global block high value is blank, throw error - if low_label_value and not high_label_value: - raise ConfigError('Segment routing global block high value must not be left blank') - # If segment routing global block low value is blank, throw error - if high_label_value and not low_label_value: - raise ConfigError('Segment routing global block low value must not be left blank') - # If segment routing global block low value is higher than the high value, throw error - if int(low_label_value) > int(high_label_value): - raise ConfigError('Segment routing global block low value must be lower than high value') - - if dict_search('segment_routing.local_block', isis_config): - high_label_value = dict_search('segment_routing.local_block.high_label_value', isis_config) - low_label_value = dict_search('segment_routing.local_block.low_label_value', isis_config) - # If segment routing local block high value is blank, throw error - if low_label_value and not high_label_value: - raise ConfigError('Segment routing local block high value must not be left blank') - # If segment routing local block low value is blank, throw error - if high_label_value and not low_label_value: - raise ConfigError('Segment routing local block low value must not be left blank') - # If segment routing local block low value is higher than the high value, throw error - if int(low_label_value) > int(high_label_value): - raise ConfigError('Segment routing local block low value must be lower than high value') + raise ConfigError(f'Redistribute level-1 or level-2 should be specified in ' \ + f'"protocols isis {process} redistribute {afi} {proto}"!') + + for redistr_level, redistr_config in proto_config.items(): + if proc_level and proc_level != 'level_1_2' and proc_level != redistr_level: + raise ConfigError(f'"protocols isis {process} redistribute {afi} {proto} {redistr_level}" ' \ + f'can not be used with \"protocols isis {process} level {proc_level}\"') + + # Segment routing checks + if dict_search('segment_routing.global_block', isis): + high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) + low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) + + # If segment routing global block high value is blank, throw error + if (low_label_value and not high_label_value) or (high_label_value and not low_label_value): + raise ConfigError('Segment routing global block requires both low and high value!') + + # If segment routing global block low value is higher than the high value, throw error + if int(low_label_value) > int(high_label_value): + raise ConfigError('Segment routing global block low value must be lower than high value') + + if dict_search('segment_routing.local_block', isis): + high_label_value = dict_search('segment_routing.local_block.high_label_value', isis) + low_label_value = dict_search('segment_routing.local_block.low_label_value', isis) + + # If segment routing local block high value is blank, throw error + if (low_label_value and not high_label_value) or (high_label_value and not low_label_value): + raise ConfigError('Segment routing local block requires both high and low value!') + + # If segment routing local block low value is higher than the high value, throw error + if int(low_label_value) > int(high_label_value): + raise ConfigError('Segment routing local block low value must be lower than high value') return None def generate(isis): - if not isis: + if not isis or 'deleted' in isis: isis['new_frr_config'] = '' return None - # only one ISIS process is supported, so we can directly send the first key - # of the config dict - process = list(isis.keys())[0] - isis[process]['process'] = process - - isis['new_frr_config'] = render_to_string('frr/isis.frr.tmpl', - isis[process]) - + isis['new_frr_config'] = render_to_string('frr/isis.frr.tmpl', isis) return None def apply(isis): + isis_daemon = 'isisd' + zebra_daemon = 'zebra' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(daemon='isisd') - frr_cfg.modify_section(r'interface \S+', '') - frr_cfg.modify_section(f'router isis \S+', '') + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section(r'^ip protocol isis route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.commit_configuration(zebra_daemon) + + # Generate empty helper string which can be ammended to FRR commands, it + # will be either empty (default VRF) or contain the "vrf <name" statement + vrf = '' + if 'vrf' in isis: + vrf = ' vrf ' + isis['vrf'] + + frr_cfg.load_configuration(isis_daemon) + frr_cfg.modify_section(f'^router isis VyOS{vrf}$', '') + + for key in ['interface', 'interface_removed']: + if key not in isis: + continue + for interface in isis[key]: + frr_cfg.modify_section(f'^interface {interface}{vrf}$', '') + frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', isis['new_frr_config']) - frr_cfg.commit_configuration(daemon='isisd') + frr_cfg.commit_configuration(isis_daemon) # If FRR config is blank, rerun the blank commit x times due to frr-reload # behavior/bug not properly clearing out on one commit. if isis['new_frr_config'] == '': for a in range(5): - frr_cfg.commit_configuration(daemon='isisd') - - # Debugging - ''' - print('') - print('--------- DEBUGGING ----------') - print(f'Existing config:\n{frr_cfg["original_config"]}\n\n') - print(f'Replacement config:\n{isis["new_frr_config"]}\n\n') - print(f'Modified config:\n{frr_cfg["modified_config"]}\n\n') - ''' + frr_cfg.commit_configuration(isis_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() return None diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py index aefe7c23e..a6cd5c9db 100755 --- a/src/conf_mode/protocols_ospf.py +++ b/src/conf_mode/protocols_ospf.py @@ -17,37 +17,64 @@ import os from sys import exit +from sys import argv from vyos.config import Config from vyos.configdict import dict_merge -from vyos.configverify import verify_route_maps +from vyos.configdict import node_changed +from vyos.configverify import verify_common_route_maps +from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists from vyos.template import render_to_string -from vyos.util import call from vyos.util import dict_search +from vyos.util import get_interface_config from vyos.xml import defaults from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'ospfd' - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'ospf'] - ospf = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + + vrf = None + if len(argv) > 1: + vrf = argv[1] + + base_path = ['protocols', 'ospf'] + + # eqivalent of the C foo ? 'a' : 'b' statement + base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospf'] or base_path + ospf = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True) + + # Assign the name of our VRF context. This MUST be done before the return + # statement below, else on deletion we will delete the default instance + # instead of the VRF instance. + if vrf: ospf['vrf'] = vrf + + # As we no re-use this Python handler for both VRF and non VRF instances for + # OSPF we need to find out if any interfaces changed so properly adjust + # the FRR configuration and not by acctident change interfaces from a + # different VRF. + interfaces_removed = node_changed(conf, base + ['interface']) + if interfaces_removed: + ospf['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does not exist if not conf.exists(base): + ospf.update({'deleted' : ''}) return ospf # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. - default_values = defaults(base) + # XXX: Note that we can not call defaults(base), as defaults does not work + # on an instance of a tag node. As we use the exact same CLI definition for + # both the non-vrf and vrf version this is absolutely safe! + default_values = defaults(base_path) # We have to cleanup the default dict, as default values could enable features # which are not explicitly enabled on the CLI. Example: default-information @@ -63,6 +90,7 @@ def get_config(config=None): for protocol in ['bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: if dict_search(f'redistribute.{protocol}', ospf) is None: del default_values['redistribute'][protocol] + # XXX: T2665: we currently have no nice way for defaults under tag nodes, # clean them out and add them manually :( del default_values['neighbor'] @@ -100,10 +128,12 @@ def get_config(config=None): ospf['interface'][interface]) # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify() - base = ['policy'] - tmp = conf.get_config_dict(base, key_mangling=('-', '_')) - # Merge policy dict into OSPF dict + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict ospf = dict_merge(tmp, ospf) return ospf @@ -112,7 +142,11 @@ def verify(ospf): if not ospf: return None - verify_route_maps(ospf) + verify_common_route_maps(ospf) + + # As we can have a default-information route-map, we need to validate it! + route_map_name = dict_search('default_information.originate.route_map', ospf) + if route_map_name: verify_route_map(route_map_name, ospf) if 'interface' in ospf: for interface in ospf['interface']: @@ -121,12 +155,22 @@ def verify(ospf): # time. FRR will only activate the last option set via CLI. if {'hello_multiplier', 'dead_interval'} <= set(ospf['interface'][interface]): raise ConfigError(f'Can not use hello-multiplier and dead-interval ' \ - f'concurrently for "{interface}"!') + f'concurrently for {interface}!') + + if 'vrf' in ospf: + # If interface specific options are set, we must ensure that the + # interface is bound to our requesting VRF. Due to the VyOS + # priorities the interface is bound to the VRF after creation of + # the VRF itself, and before any routing protocol is configured. + vrf = ospf['vrf'] + tmp = get_interface_config(interface) + if 'master' not in tmp or tmp['master'] != vrf: + raise ConfigError(f'Interface {interface} is not a member of VRF {vrf}!') return None def generate(ospf): - if not ospf: + if not ospf or 'deleted' in ospf: ospf['new_frr_config'] = '' return None @@ -134,19 +178,43 @@ def generate(ospf): return None def apply(ospf): + ospf_daemon = 'ospfd' + zebra_daemon = 'zebra' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(r'^interface \S+', '') - frr_cfg.modify_section('^router ospf$', '') + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section(r'^ip protocol ospf route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.commit_configuration(zebra_daemon) + + # Generate empty helper string which can be ammended to FRR commands, it + # will be either empty (default VRF) or contain the "vrf <name" statement + vrf = '' + if 'vrf' in ospf: + vrf = ' vrf ' + ospf['vrf'] + + frr_cfg.load_configuration(ospf_daemon) + frr_cfg.modify_section(f'^router ospf{vrf}$', '') + + for key in ['interface', 'interface_removed']: + if key not in ospf: + continue + for interface in ospf[key]: + frr_cfg.modify_section(f'^interface {interface}{vrf}$', '') + frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospf['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(ospf_daemon) # If FRR config is blank, rerun the blank commit x times due to frr-reload # behavior/bug not properly clearing out on one commit. if ospf['new_frr_config'] == '': for a in range(5): - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(ospf_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() return None diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py index 6f068b196..1964e9d34 100755 --- a/src/conf_mode/protocols_ospfv3.py +++ b/src/conf_mode/protocols_ospfv3.py @@ -20,9 +20,8 @@ from sys import exit from vyos.config import Config from vyos.configdict import dict_merge -from vyos.configverify import verify_route_maps +from vyos.configverify import verify_common_route_maps from vyos.template import render_to_string -from vyos.util import call from vyos.ifconfig import Interface from vyos.xml import defaults from vyos import ConfigError @@ -45,10 +44,12 @@ def get_config(config=None): return ospfv3 # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify() - base = ['policy'] - tmp = conf.get_config_dict(base, key_mangling=('-', '_')) - # Merge policy dict into OSPF dict + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict ospfv3 = dict_merge(tmp, ospfv3) return ospfv3 @@ -57,7 +58,7 @@ def verify(ospfv3): if not ospfv3: return None - verify_route_maps(ospfv3) + verify_common_route_maps(ospfv3) if 'interface' in ospfv3: for ifname, if_config in ospfv3['interface'].items(): @@ -91,6 +92,9 @@ def apply(ospfv3): for a in range(5): frr_cfg.commit_configuration(frr_daemon) + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() + return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py index 6db5143c5..907ac54ac 100755 --- a/src/conf_mode/protocols_rip.py +++ b/src/conf_mode/protocols_rip.py @@ -20,8 +20,9 @@ from sys import exit from vyos.config import Config from vyos.configdict import dict_merge -from vyos.configverify import verify_route_maps -from vyos.util import call +from vyos.configverify import verify_common_route_maps +from vyos.configverify import verify_access_list +from vyos.configverify import verify_prefix_list from vyos.util import dict_search from vyos.xml import defaults from vyos.template import render_to_string @@ -30,8 +31,6 @@ from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'ripd' - def get_config(config=None): if config: conf = config @@ -51,10 +50,12 @@ def get_config(config=None): rip = dict_merge(default_values, rip) # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify() - base = ['policy'] - tmp = conf.get_config_dict(base, key_mangling=('-', '_')) - # Merge policy dict into OSPF dict + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict rip = dict_merge(tmp, rip) return rip @@ -63,21 +64,19 @@ def verify(rip): if not rip: return None + verify_common_route_maps(rip) + acl_in = dict_search('distribute_list.access_list.in', rip) - if acl_in and acl_in not in (dict_search('policy.access_list', rip) or []): - raise ConfigError(f'Inbound ACL "{acl_in}" does not exist!') + if acl_in: verify_access_list(acl_in, rip) acl_out = dict_search('distribute_list.access_list.out', rip) - if acl_out and acl_out not in (dict_search('policy.access_list', rip) or []): - raise ConfigError(f'Outbound ACL "{acl_out}" does not exist!') + if acl_out: verify_access_list(acl_out, rip) - prefix_list_in = dict_search('distribute_list.prefix_list.in', rip) - if prefix_list_in and prefix_list_in.replace('-','_') not in (dict_search('policy.prefix_list', rip) or []): - raise ConfigError(f'Inbound prefix-list "{prefix_list_in}" does not exist!') + prefix_list_in = dict_search('distribute_list.prefix-list.in', rip) + if prefix_list_in: verify_prefix_list(prefix_list_in, rip) prefix_list_out = dict_search('distribute_list.prefix_list.out', rip) - if prefix_list_out and prefix_list_out.replace('-','_') not in (dict_search('policy.prefix_list', rip) or []): - raise ConfigError(f'Outbound prefix-list "{prefix_list_out}" does not exist!') + if prefix_list_out: verify_prefix_list(prefix_list_out, rip) if 'interface' in rip: for interface, interface_options in rip['interface'].items(): @@ -89,8 +88,6 @@ def verify(rip): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') - verify_route_maps(rip) - def generate(rip): if not rip: rip['new_frr_config'] = '' @@ -101,20 +98,33 @@ def generate(rip): return None def apply(rip): + rip_daemon = 'ripd' + zebra_daemon = 'zebra' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section(r'^ip protocol rip route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.commit_configuration(zebra_daemon) + + frr_cfg.load_configuration(rip_daemon) frr_cfg.modify_section(r'key chain \S+', '') frr_cfg.modify_section(r'interface \S+', '') - frr_cfg.modify_section('router rip', '') + frr_cfg.modify_section('^router rip$', '') + frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', rip['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(rip_daemon) # If FRR config is blank, rerun the blank commit x times due to frr-reload # behavior/bug not properly clearing out on one commit. if rip['new_frr_config'] == '': for a in range(5): - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(rip_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() return None diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py index 8cc5de64a..44c080546 100755 --- a/src/conf_mode/protocols_ripng.py +++ b/src/conf_mode/protocols_ripng.py @@ -20,8 +20,9 @@ from sys import exit from vyos.config import Config from vyos.configdict import dict_merge -from vyos.configverify import verify_route_maps -from vyos.util import call +from vyos.configverify import verify_common_route_maps +from vyos.configverify import verify_access_list +from vyos.configverify import verify_prefix_list from vyos.util import dict_search from vyos.xml import defaults from vyos.template import render_to_string @@ -51,35 +52,33 @@ def get_config(config=None): ripng = dict_merge(default_values, ripng) # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify() - base = ['policy'] - tmp = conf.get_config_dict(base, key_mangling=('-', '_')) - # Merge policy dict into OSPF dict + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict ripng = dict_merge(tmp, ripng) - import pprint - pprint.pprint(ripng) return ripng def verify(ripng): if not ripng: return None + verify_common_route_maps(ripng) + acl_in = dict_search('distribute_list.access_list.in', ripng) - if acl_in and acl_in not in (dict_search('policy.access_list6', ripng) or []): - raise ConfigError(f'Inbound access-list6 "{acl_in}" does not exist!') + if acl_in: verify_access_list(acl_in, ripng, version='6') acl_out = dict_search('distribute_list.access_list.out', ripng) - if acl_out and acl_out not in (dict_search('policy.access_list6', ripng) or []): - raise ConfigError(f'Outbound access-list6 "{acl_out}" does not exist!') + if acl_out: verify_access_list(acl_out, ripng, version='6') prefix_list_in = dict_search('distribute_list.prefix_list.in', ripng) - if prefix_list_in and prefix_list_in.replace('-','_') not in (dict_search('policy.prefix_list6', ripng) or []): - raise ConfigError(f'Inbound prefix-list6 "{prefix_list_in}" does not exist!') + if prefix_list_in: verify_prefix_list(prefix_list_in, ripng, version='6') prefix_list_out = dict_search('distribute_list.prefix_list.out', ripng) - if prefix_list_out and prefix_list_out.replace('-','_') not in (dict_search('policy.prefix_list6', ripng) or []): - raise ConfigError(f'Outbound prefix-list6 "{prefix_list_out}" does not exist!') + if prefix_list_out: verify_prefix_list(prefix_list_out, ripng, version='6') if 'interface' in ripng: for interface, interface_options in ripng['interface'].items(): @@ -91,17 +90,12 @@ def verify(ripng): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') - verify_route_maps(ripng) - def generate(ripng): if not ripng: ripng['new_frr_config'] = '' return None ripng['new_frr_config'] = render_to_string('frr/ripng.frr.tmpl', ripng) - import pprint - pprint.pprint(ripng['new_frr_config']) - return None def apply(ripng): @@ -120,6 +114,9 @@ def apply(ripng): for a in range(5): frr_cfg.commit_configuration(frr_daemon) + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() + return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py index 75b870b05..d8f99efb8 100755 --- a/src/conf_mode/protocols_rpki.py +++ b/src/conf_mode/protocols_rpki.py @@ -21,7 +21,6 @@ from sys import exit from vyos.config import Config from vyos.configdict import dict_merge from vyos.template import render_to_string -from vyos.util import call from vyos.util import dict_search from vyos.xml import defaults from vyos import ConfigError diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index 5d101b33e..1d45cb71c 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -17,29 +17,66 @@ import os from sys import exit +from sys import argv from vyos.config import Config +from vyos.configdict import dict_merge +from vyos.configverify import verify_common_route_maps +from vyos.configverify import verify_vrf from vyos.template import render_to_string -from vyos.util import call -from vyos.configverify import verify_route_maps from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'staticd' - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'static'] + + vrf = None + if len(argv) > 1: + vrf = argv[1] + + base_path = ['protocols', 'static'] + # eqivalent of the C foo ? 'a' : 'b' statement + base = vrf and ['vrf', 'name', vrf, 'protocols', 'static'] or base_path static = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + + # Assign the name of our VRF context + if vrf: static['vrf'] = vrf + + # We also need some additional information from the config, prefix-lists + # and route-maps for instance. They will be used in verify(). + # + # XXX: one MUST always call this without the key_mangling() option! See + # vyos.configverify.verify_common_route_maps() for more information. + tmp = conf.get_config_dict(['policy']) + # Merge policy dict into "regular" config dict + static = dict_merge(tmp, static) + return static def verify(static): - verify_route_maps(static) + verify_common_route_maps(static) + + for route in ['route', 'route6']: + # if there is no route(6) key in the dictionary we can immediately + # bail out early + if route not in static: + continue + + # When leaking routes to other VRFs we must ensure that the destination + # VRF exists + for prefix, prefix_options in static[route].items(): + # both the interface and next-hop CLI node can have a VRF subnode, + # thus we check this using a for loop + for type in ['interface', 'next_hop']: + if type in prefix_options: + for interface, interface_config in prefix_options[type].items(): + verify_vrf(interface_config) + return None def generate(static): @@ -47,19 +84,37 @@ def generate(static): return None def apply(static): + static_daemon = 'staticd' + zebra_daemon = 'zebra' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(r'^ip route .*', '') - frr_cfg.modify_section(r'^ipv6 route .*', '') + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section(r'^ip protocol static route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.commit_configuration(zebra_daemon) + + frr_cfg.load_configuration(static_daemon) + + if 'vrf' in static: + vrf = static['vrf'] + frr_cfg.modify_section(f'^vrf {vrf}$', '') + else: + frr_cfg.modify_section(r'^ip route .*', '') + frr_cfg.modify_section(r'^ipv6 route .*', '') + frr_cfg.add_before(r'(interface .*|line vty)', static['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(static_daemon) # If FRR config is blank, rerun the blank commit x times due to frr-reload # behavior/bug not properly clearing out on one commit. if static['new_frr_config'] == '': for a in range(5): - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.commit_configuration(static_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() return None diff --git a/src/conf_mode/protocols_vrf.py b/src/conf_mode/protocols_vrf.py deleted file mode 100755 index 227e7d5e1..000000000 --- a/src/conf_mode/protocols_vrf.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2021 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import os - -from sys import exit - -from vyos.config import Config -from vyos.template import render_to_string -from vyos.util import call -from vyos import ConfigError -from vyos import frr -from vyos import airbag -airbag.enable() - -frr_daemon = 'staticd' - -def get_config(config=None): - if config: - conf = config - else: - conf = Config() - base = ['protocols', 'vrf'] - vrf = conf.get_config_dict(base, key_mangling=('-', '_')) - return vrf - -def verify(vrf): - - return None - -def generate(vrf): - vrf['new_frr_config'] = render_to_string('frr/vrf.frr.tmpl', vrf) - return None - -def apply(vrf): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(r'vrf \S+', '') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', vrf['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) - - # If FRR config is blank, rerun the blank commit x times due to frr-reload - # behavior/bug not properly clearing out on one commit. - if vrf['new_frr_config'] == '': - for a in range(5): - frr_cfg.commit_configuration(frr_daemon) - - return None - -if __name__ == '__main__': - try: - c = get_config() - verify(c) - generate(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) diff --git a/src/conf_mode/service_console-server.py b/src/conf_mode/service_console-server.py index 6e94a19ae..51050e702 100755 --- a/src/conf_mode/service_console-server.py +++ b/src/conf_mode/service_console-server.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2020 VyOS maintainers and contributors +# Copyright (C) 2018-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -17,6 +17,7 @@ import os from sys import exit +from psutil import process_iter from vyos.config import Config from vyos.configdict import dict_merge @@ -60,14 +61,19 @@ def verify(proxy): if not proxy: return None + processes = process_iter(['name', 'cmdline']) if 'device' in proxy: - for device in proxy['device']: - if 'speed' not in proxy['device'][device]: - raise ConfigError(f'Serial port speed must be defined for "{device}"!') + for device, device_config in proxy['device'].items(): + for process in processes: + if 'agetty' in process.name() and device in process.cmdline(): + raise ConfigError(f'Port "{device}" already provides a '\ + 'console used by "system console"!') + + if 'speed' not in device_config: + raise ConfigError(f'Port "{device}" requires speed to be set!') - if 'ssh' in proxy['device'][device]: - if 'port' not in proxy['device'][device]['ssh']: - raise ConfigError(f'SSH port must be defined for "{device}"!') + if 'ssh' in device_config and 'port' not in device_config['ssh']: + raise ConfigError(f'Port "{device}" requires SSH port to be set!') return None @@ -77,13 +83,13 @@ def generate(proxy): render(config_file, 'conserver/conserver.conf.tmpl', proxy) if 'device' in proxy: - for device in proxy['device']: - if 'ssh' not in proxy['device'][device]: + for device, device_config in proxy['device'].items(): + if 'ssh' not in device_config: continue tmp = { 'device' : device, - 'port' : proxy['device'][device]['ssh']['port'], + 'port' : device_config['ssh']['port'], } render(dropbear_systemd_file.format(**tmp), 'conserver/dropbear@.service.tmpl', tmp) @@ -102,10 +108,10 @@ def apply(proxy): call('systemctl restart conserver-server.service') if 'device' in proxy: - for device in proxy['device']: - if 'ssh' not in proxy['device'][device]: + for device, device_config in proxy['device'].items(): + if 'ssh' not in device_config: continue - port = proxy['device'][device]['ssh']['port'] + port = device_config['ssh']['port'] call(f'systemctl restart dropbear@{port}.service') return None diff --git a/src/conf_mode/system-login.py b/src/conf_mode/system-login.py index 99af5c757..c8b81d80a 100755 --- a/src/conf_mode/system-login.py +++ b/src/conf_mode/system-login.py @@ -158,11 +158,29 @@ def generate(login): env = os.environ.copy() env['vyos_libexec_dir'] = '/usr/libexec/vyos' - call(f"/opt/vyatta/sbin/my_delete system login user '{user}' " \ - f"authentication plaintext-password", env=env) - - call(f"/opt/vyatta/sbin/my_set system login user '{user}' " \ - f"authentication encrypted-password '{encrypted_password}'", env=env) + # Set default commands for re-adding user with encrypted password + del_user_plain = f"system login user '{user}' authentication plaintext-password" + add_user_encrypt = f"system login user '{user}' authentication encrypted-password '{encrypted_password}'" + + lvl = env['VYATTA_EDIT_LEVEL'] + # We're in config edit level, for example "edit system login" + # Change default commands for re-adding user with encrypted password + if lvl != '/': + # Replace '/system/login' to 'system login' + lvl = lvl.strip('/').split('/') + # Convert command str to list + del_user_plain = del_user_plain.split() + # New command exclude level, for example "edit system login" + del_user_plain = del_user_plain[len(lvl):] + # Convert string to list + del_user_plain = " ".join(del_user_plain) + + add_user_encrypt = add_user_encrypt.split() + add_user_encrypt = add_user_encrypt[len(lvl):] + add_user_encrypt = " ".join(add_user_encrypt) + + call(f"/opt/vyatta/sbin/my_delete {del_user_plain}", env=env) + call(f"/opt/vyatta/sbin/my_set {add_user_encrypt}", env=env) else: try: if getspnam(user).sp_pwdp == dict_search('authentication.encrypted_password', user_config): diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py index 6c6e219a5..a39da8991 100755 --- a/src/conf_mode/vrf.py +++ b/src/conf_mode/vrf.py @@ -23,16 +23,19 @@ from vyos.config import Config from vyos.configdict import node_changed from vyos.ifconfig import Interface from vyos.template import render +from vyos.template import render_to_string +from vyos.util import call from vyos.util import cmd from vyos.util import dict_search +from vyos.util import get_interface_config from vyos import ConfigError +from vyos import frr from vyos import airbag airbag.enable() -config_file = r'/etc/iproute2/rt_tables.d/vyos-vrf.conf' +frr_daemon = 'zebra' -def _cmd(command): - cmd(command, raising=ConfigError, message='Error changing VRF') +config_file = r'/etc/iproute2/rt_tables.d/vyos-vrf.conf' def list_rules(): command = 'ip -j -4 rule show' @@ -111,8 +114,7 @@ def verify(vrf): # routing table id can't be changed - OS restriction if os.path.isdir(f'/sys/class/net/{name}'): - tmp = loads(cmd(f'ip -j -d link show {name}'))[0] - tmp = str(dict_search('linkinfo.info_data.table', tmp)) + tmp = str(dict_search('linkinfo.info_data.table', get_interface_config(name))) if tmp and tmp != config['table']: raise ConfigError(f'VRF "{name}" table id modification not possible!') @@ -125,6 +127,7 @@ def verify(vrf): def generate(vrf): render(config_file, 'vrf/vrf.conf.tmpl', vrf) + vrf['new_frr_config'] = render_to_string('frr/vrf.frr.tmpl', vrf) return None def apply(vrf): @@ -140,14 +143,14 @@ def apply(vrf): bind_all = '0' if 'bind_to_all' in vrf: bind_all = '1' - _cmd(f'sysctl -wq net.ipv4.tcp_l3mdev_accept={bind_all}') - _cmd(f'sysctl -wq net.ipv4.udp_l3mdev_accept={bind_all}') + call(f'sysctl -wq net.ipv4.tcp_l3mdev_accept={bind_all}') + call(f'sysctl -wq net.ipv4.udp_l3mdev_accept={bind_all}') for tmp in (dict_search('vrf_remove', vrf) or []): if os.path.isdir(f'/sys/class/net/{tmp}'): - _cmd(f'ip -4 route del vrf {tmp} unreachable default metric 4278198272') - _cmd(f'ip -6 route del vrf {tmp} unreachable default metric 4278198272') - _cmd(f'ip link delete dev {tmp}') + call(f'ip -4 route del vrf {tmp} unreachable default metric 4278198272') + call(f'ip -6 route del vrf {tmp} unreachable default metric 4278198272') + call(f'ip link delete dev {tmp}') if 'name' in vrf: for name, config in vrf['name'].items(): @@ -156,16 +159,16 @@ def apply(vrf): if not os.path.isdir(f'/sys/class/net/{name}'): # For each VRF apart from your default context create a VRF # interface with a separate routing table - _cmd(f'ip link add {name} type vrf table {table}') + call(f'ip link add {name} type vrf table {table}') # The kernel Documentation/networking/vrf.txt also recommends # adding unreachable routes to the VRF routing tables so that routes # afterwards are taken. - _cmd(f'ip -4 route add vrf {name} unreachable default metric 4278198272') - _cmd(f'ip -6 route add vrf {name} unreachable default metric 4278198272') + call(f'ip -4 route add vrf {name} unreachable default metric 4278198272') + call(f'ip -6 route add vrf {name} unreachable default metric 4278198272') # We also should add proper loopback IP addresses to the newly # created VRFs for services bound to the loopback address (SNMP, NTP) - _cmd(f'ip -4 addr add 127.0.0.1/8 dev {name}') - _cmd(f'ip -6 addr add ::1/128 dev {name}') + call(f'ip -4 addr add 127.0.0.1/8 dev {name}') + call(f'ip -6 addr add ::1/128 dev {name}') # set VRF description for e.g. SNMP monitoring vrf_if = Interface(name) @@ -199,18 +202,34 @@ def apply(vrf): # change preference when VRFs are enabled and local lookup table is default if not local_pref and 'name' in vrf: for af in ['-4', '-6']: - _cmd(f'ip {af} rule add pref 32765 table local') - _cmd(f'ip {af} rule del pref 0') + call(f'ip {af} rule add pref 32765 table local') + call(f'ip {af} rule del pref 0') # return to default lookup preference when no VRF is configured if 'name' not in vrf: for af in ['-4', '-6']: - _cmd(f'ip {af} rule add pref 0 table local') - _cmd(f'ip {af} rule del pref 32765') + call(f'ip {af} rule add pref 0 table local') + call(f'ip {af} rule del pref 32765') # clean out l3mdev-table rule if present if 1000 in [r.get('priority') for r in list_rules() if r.get('priority') == 1000]: - _cmd(f'ip {af} rule del pref 1000') + call(f'ip {af} rule del pref 1000') + + # add configuration to FRR + frr_cfg = frr.FRRConfig() + frr_cfg.load_configuration(frr_daemon) + frr_cfg.modify_section(f'^vrf [a-zA-Z-]*$', '') + frr_cfg.add_before(r'(interface .*|line vty)', vrf['new_frr_config']) + frr_cfg.commit_configuration(frr_daemon) + + # If FRR config is blank, rerun the blank commit x times due to frr-reload + # behavior/bug not properly clearing out on one commit. + if vrf['new_frr_config'] == '': + for a in range(5): + frr_cfg.commit_configuration(frr_daemon) + + # Save configuration to /run/frr/config/frr.conf + frr.save_configuration() return None diff --git a/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient b/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient index d5d90632c..f737148dc 100644 --- a/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient +++ b/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient @@ -2,26 +2,35 @@ if [ -z ${CONTROLLED_STOP} ] ; then # stop dhclient for this interface, if it is not current one # get PID for current dhclient - current_dhclient=`ps --no-headers --format ppid --pid $$ | awk '{ print $1 }'` + current_dhclient=`ps --no-headers --format ppid --pid $$ | awk '{ print \$1 }'` # get PID for master process (current can be a fork) - master_dhclient=`ps --no-headers --format ppid --pid $current_dhclient | awk '{ print $1 }'` + master_dhclient=`ps --no-headers --format ppid --pid $current_dhclient | awk '{ print \$1 }'` # get IP version for current dhclient - ipversion_arg=`ps --no-headers --format args --pid $current_dhclient | awk '{ print $2 }'` + ipversion_arg=`ps --no-headers --format args --pid $current_dhclient | awk 'match(\$0, /\s-(4|6)\s/, IPV) { printf("%s", IPV[1]) }'` # get list of all dhclient running for current interface - dhclients_pids=(`ps --no-headers --format pid,args -C dhclient | awk -v IFACE="/sbin/dhclient $ipversion_arg .*$interface$" '$0 ~ IFACE { print $1 }'`) + if [[ $ipversion_arg == "6" ]]; then + dhclients_pids=(`pgrep -f "dhclient.*\s-6\s.*\s$interface(\s|$)"`) + else + dhclients_pids=(`ps --no-headers --format pid,args -C dhclient | awk "{ if(match(\\$0, /\s${interface}(\s|$)/) && !match(\\$0, /\s-6\s/)) printf(\"%s\n\", \\$1) }"`) + fi logmsg info "Current dhclient PID: $current_dhclient, Parent PID: $master_dhclient, IP version: $ipversion_arg, All dhclients for interface $interface: ${dhclients_pids[@]}" # stop all dhclients for current interface, except current one for dhclient in ${dhclients_pids[@]}; do if ([ $dhclient -ne $current_dhclient ] && [ $dhclient -ne $master_dhclient ]); then - logmsg info "Stopping dhclient with PID: ${dhclient}" # get path to PID-file of dhclient process - local dhclient_pidfile=`ps --no-headers --format args --pid $dhclient | awk 'match($0, ".*-pf (/.*pid) .*", PF) { print PF[1] }'` + local dhclient_pidfile=`ps --no-headers --format args --pid $dhclient | awk 'match(\$0, ".*-pf (/.*pid) .*", PF) { print PF[1] }'` # stop dhclient with native command - this will run dhclient-script with correct reason unlike simple kill - dhclient -e CONTROLLED_STOP=yes -x -pf $dhclient_pidfile + logmsg info "Stopping dhclient with PID: ${dhclient}, PID file: $dhclient_pidfile" + if [[ -e $dhclient_pidfile ]]; then + dhclient -e CONTROLLED_STOP=yes -x -pf $dhclient_pidfile + else + logmsg error "PID file $dhclient_pidfile does not exists, killing dhclient with SIGTERM signal" + kill -s 15 ${dhclient} + fi fi done fi diff --git a/src/etc/dhcp/dhclient-enter-hooks.d/03-vyos-ipwrapper b/src/etc/dhcp/dhclient-enter-hooks.d/03-vyos-ipwrapper index fc035766b..74a7e83bf 100644 --- a/src/etc/dhcp/dhclient-enter-hooks.d/03-vyos-ipwrapper +++ b/src/etc/dhcp/dhclient-enter-hooks.d/03-vyos-ipwrapper @@ -3,6 +3,9 @@ # default route distance IF_METRIC=${IF_METRIC:-210} +# Check if interface is inside a VRF +VRF_OPTION=$(/usr/sbin/ip -j -d link show ${interface} | awk '{if(match($0, /.*"master":"(\w+)".*"info_slave_kind":"vrf"/, IFACE_DETAILS)) printf("vrf %s", IFACE_DETAILS[1])}') + # get status of FRR function frr_alive () { /usr/lib/frr/watchfrr.sh all_status @@ -51,12 +54,7 @@ function iptovtysh () { shift 2 fi - # Add route to VRF routing table - local VTYSH_VRF_NAME=$(/usr/sbin/ip link show dev $VTYSH_DEV | sed -nre '1s/.* master ([^ ]*) .*/\1/p') - if /usr/sbin/ip -d link show dev $VTYSH_DEV | grep -q "vrf_slave"; then - VTYSH_VRF="vrf $VTYSH_VRF_NAME" - fi - VTYSH_CMD="ip route $VTYSH_NETADDR $VTYSH_GATEWAY $VTYSH_DEV tag $VTYSH_TAG $VTYSH_DISTANCE $VTYSH_VRF" + VTYSH_CMD="ip route $VTYSH_NETADDR $VTYSH_GATEWAY $VTYSH_DEV tag $VTYSH_TAG $VTYSH_DISTANCE $VRF_OPTION" # delete route if the command is "del" if [ "$VTYSH_ACTION" == "del" ] ; then @@ -67,10 +65,10 @@ function iptovtysh () { # delete the same route from kernel before adding new one function delroute () { - logmsg info "Checking if the route presented in kernel: $@" - if /usr/sbin/ip route show $@ | grep -qx "$1 " ; then - logmsg info "Deleting IP route: \"/usr/sbin/ip route del $@\"" - /usr/sbin/ip route del $@ + logmsg info "Checking if the route presented in kernel: $@ $VRF_OPTION" + if /usr/sbin/ip route show $@ $VRF_OPTION | grep -qx "$1 " ; then + logmsg info "Deleting IP route: \"/usr/sbin/ip route del $@ $VRF_OPTION\"" + /usr/sbin/ip route del $@ $VRF_OPTION fi } @@ -90,8 +88,7 @@ function ip () { else # add ip route to kernel logmsg info "Modifying routes in kernel: \"/usr/sbin/ip $@\"" - /usr/sbin/ip $@ + /usr/sbin/ip $@ $VRF_OPTION fi fi } - diff --git a/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup b/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup index edb7c7b27..694d53b6b 100644 --- a/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup +++ b/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup @@ -17,14 +17,8 @@ if [[ $reason =~ (EXPIRE|FAIL|RELEASE|STOP) ]]; then # try to delete default ip route for router in $old_routers; do - # check if we are bound to a VRF - local vrf_name=$(basename /sys/class/net/${interface}/upper_* | sed -e 's/upper_//') - if [ -n $vrf_name ]; then - vrf="vrf $vrf_name" - fi - - logmsg info "Deleting default route: via $router dev ${interface} ${if_metric:+metric $if_metric} ${vrf}" - ip -4 route del default via $router dev ${interface} ${if_metric:+metric $if_metric} ${vrf} + logmsg info "Deleting default route: via $router dev ${interface} ${if_metric:+metric $if_metric}" + ip -4 route del default via $router dev ${interface} ${if_metric:+metric $if_metric} if_metric=$((if_metric+1)) done diff --git a/src/etc/udev/rules.d/99-vyos-wwan.rules b/src/etc/udev/rules.d/99-vyos-wwan.rules new file mode 100644 index 000000000..67f30a3dd --- /dev/null +++ b/src/etc/udev/rules.d/99-vyos-wwan.rules @@ -0,0 +1,11 @@ +ACTION!="add|change", GOTO="mbim_to_qmi_rules_end" + +SUBSYSTEM!="usb", GOTO="mbim_to_qmi_rules_end" + +# ignore any device with only one configuration +ATTR{bNumConfigurations}=="1", GOTO="mbim_to_qmi_rules_end" + +# force Sierra Wireless MC7710 to configuration #1 +ATTR{idVendor}=="1199",ATTR{idProduct}=="68a2",ATTR{bConfigurationValue}="1" + +LABEL="mbim_to_qmi_rules_end" diff --git a/src/migration-scripts/bgp/0-to-1 b/src/migration-scripts/bgp/0-to-1 new file mode 100755 index 000000000..b1d5a6514 --- /dev/null +++ b/src/migration-scripts/bgp/0-to-1 @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T3417: migrate IS-IS tagNode to node as we can only have one IS-IS process + +from sys import argv +from sys import exit + +from vyos.configtree import ConfigTree + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['protocols', 'bgp'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +# Only one BGP process is supported, thus this operation is savea +asn = config.list_nodes(base) +bgp_base = base + asn + +# We need a temporary copy of the config +tmp_base = ['protocols', 'bgp2'] +config.copy(bgp_base, tmp_base) + +# Now it's save to delete the old configuration +config.delete(base) + +# Rename temporary copy to new final config and set new "local-as" option +config.rename(tmp_base, 'bgp') +config.set(base + ['local-as'], value=asn[0]) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print(f'Failed to save the modified config: {e}') + exit(1) diff --git a/src/migration-scripts/isis/0-to-1 b/src/migration-scripts/isis/0-to-1 new file mode 100755 index 000000000..93cbbbed5 --- /dev/null +++ b/src/migration-scripts/isis/0-to-1 @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T3417: migrate IS-IS tagNode to node as we can only have one IS-IS process + +from sys import argv +from sys import exit + +from vyos.configtree import ConfigTree + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['protocols', 'isis'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +# Only one IS-IS process is supported, thus this operation is save +isis_base = base + config.list_nodes(base) + +# We need a temporary copy of the config +tmp_base = ['protocols', 'isis2'] +config.copy(isis_base, tmp_base) + +# Now it's save to delete the old configuration +config.delete(base) + +# Rename temporary copy to new final config (IS-IS domain key is static and no +# longer required to be set via CLI) +config.rename(tmp_base, 'isis') + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print(f'Failed to save the modified config: {e}') + exit(1) diff --git a/src/migration-scripts/vrf/1-to-2 b/src/migration-scripts/vrf/1-to-2 new file mode 100755 index 000000000..20128e957 --- /dev/null +++ b/src/migration-scripts/vrf/1-to-2 @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# - T3344: migrate routing options from "protocols vrf" to "vrf <name> protocols" + +from sys import argv +from sys import exit +from vyos.configtree import ConfigTree + +if (len(argv) < 2): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['protocols', 'vrf'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +vrf_base = ['vrf', 'name'] +config.set(vrf_base) +config.set_tag(vrf_base) + +# Copy all existing static routes to the new base node under "vrf name <name> protocols static" +for vrf in config.list_nodes(base): + static_base = base + [vrf, 'static'] + if not config.exists(static_base): + continue + + new_static_base = vrf_base + [vrf, 'protocols'] + config.set(new_static_base) + config.copy(static_base, new_static_base + ['static']) + +# Now delete the old configuration +config.delete(base) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + exit(1) diff --git a/src/op_mode/containers_op.py b/src/op_mode/containers_op.py new file mode 100755 index 000000000..1e3fc3a8f --- /dev/null +++ b/src/op_mode/containers_op.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +from vyos.configquery import query_context, ConfigQueryError +from vyos.util import cmd + +config, op = query_context() + +parser = argparse.ArgumentParser() +parser.add_argument("-a", "--all", action="store_true", help="Show all containers") +parser.add_argument("-i", "--image", action="store_true", help="Show container images") +parser.add_argument("-n", "--networks", action="store_true", help="Show container images") +parser.add_argument("-p", "--pull", action="store", help="Pull image for container") +parser.add_argument("-d", "--remove", action="store", help="Delete container image") + +if not config.exists(['container']): + print('Containers not configured') + exit(0) + +if __name__ == '__main__': + args = parser.parse_args() + + if args.all: + print(cmd('podman ps --all')) + exit(0) + if args.image: + print(cmd('podman image ls')) + exit(0) + if args.networks: + print(cmd('podman network ls')) + exit(0) + if args.pull: + image = args.pull + try: + print(cmd(f'sudo podman image pull {image}')) + except: + print(f'Can\'t find or download image "{image}"') + exit(0) + if args.remove: + image = args.remove + try: + print(cmd(f'sudo podman image rm {image}')) + except: + print(f'Can\'t delete image "{image}"') + exit(0) diff --git a/src/op_mode/ppp-server-ctrl.py b/src/op_mode/ppp-server-ctrl.py index 171107b4a..670cdf879 100755 --- a/src/op_mode/ppp-server-ctrl.py +++ b/src/op_mode/ppp-server-ctrl.py @@ -59,7 +59,10 @@ def main(): output, err = popen(cmd_dict['cmd_base'].format(cmd_dict['vpn_types'][args.proto]) + args.action + ses_pattern, stderr=DEVNULL, decode='utf-8') if not err: - print(output) + try: + print(output) + except: + sys.exit(0) else: print("{} server is not running".format(args.proto)) diff --git a/src/op_mode/show_ipsec_sa.py b/src/op_mode/show_ipsec_sa.py index b7927fcc2..c98ced158 100755 --- a/src/op_mode/show_ipsec_sa.py +++ b/src/op_mode/show_ipsec_sa.py @@ -43,8 +43,11 @@ for sa in sas: # list_sas() returns a list of single-item dicts for peer in sa: parent_sa = sa[peer] + child_sas = parent_sa["child-sas"] + installed_sas = {k: v for k, v in child_sas.items() if v["state"] == b"INSTALLED"} - if parent_sa["state"] == b"ESTABLISHED": + # parent_sa["state"] = IKE state, child_sas["state"] = ESP state + if parent_sa["state"] == b"ESTABLISHED" and installed_sas: state = "up" else: state = "down" @@ -61,15 +64,13 @@ for sa in sas: remote_id = "N/A" # The counters can only be obtained from the child SAs - child_sas = parent_sa["child-sas"] - installed_sas = {k: v for k, v in child_sas.items() if v["state"] == b"INSTALLED"} - if not installed_sas: data = [peer, state, "N/A", "N/A", "N/A", "N/A", "N/A", "N/A"] sa_data.append(data) else: for csa in installed_sas: isa = installed_sas[csa] + csa_name = isa['name'] bytes_in = hurry.filesize.size(int(isa["bytes-in"].decode())) bytes_out = hurry.filesize.size(int(isa["bytes-out"].decode())) @@ -103,7 +104,7 @@ for sa in sas: if dh_group: proposal = "{0}/{1}".format(proposal, dh_group) - data = [peer, state, uptime, bytes_str, pkts_str, remote_host, remote_id, proposal] + data = [csa_name, state, uptime, bytes_str, pkts_str, remote_host, remote_id, proposal] sa_data.append(data) headers = ["Connection", "State", "Uptime", "Bytes In/Out", "Packets In/Out", "Remote address", "Remote ID", "Proposal"] diff --git a/src/op_mode/show_nat66_rules.py b/src/op_mode/show_nat66_rules.py new file mode 100755 index 000000000..a25e146a7 --- /dev/null +++ b/src/op_mode/show_nat66_rules.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import jmespath +import json + +from argparse import ArgumentParser +from jinja2 import Template +from sys import exit +from vyos.util import cmd +from vyos.util import dict_search + +parser = ArgumentParser() +group = parser.add_mutually_exclusive_group() +group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true") +group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true") +args = parser.parse_args() + +if args.source or args.destination: + tmp = cmd('sudo nft -j list table ip6 nat') + tmp = json.loads(tmp) + + format_nat66_rule = '{0: <10} {1: <50} {2: <50} {3: <10}' + print(format_nat66_rule.format("Rule", "Source" if args.source else "Destination", "Translation", "Outbound Interface" if args.source else "Inbound Interface")) + print(format_nat66_rule.format("----", "------" if args.source else "-----------", "-----------", "------------------" if args.source else "-----------------")) + + data_json = jmespath.search('nftables[?rule].rule[?chain]', tmp) + for idx in range(0, len(data_json)): + data = data_json[idx] + + # The following key values must exist + # When the rule JSON does not have some keys, this is not a rule we can work with + continue_rule = False + for key in ['comment', 'chain', 'expr']: + if key not in data: + continue_rule = True + continue + if continue_rule: + continue + + comment = data['comment'] + + # Check the annotation to see if the annotation format is created by VYOS + continue_rule = True + for comment_prefix in ['SRC-NAT66-', 'DST-NAT66-']: + if comment_prefix in comment: + continue_rule = False + if continue_rule: + continue + + # When log is detected from the second index of expr, then this rule should be ignored + if 'log' in data['expr'][2]: + continue + + rule = comment.replace('SRC-NAT66-','') + rule = rule.replace('DST-NAT66-','') + chain = data['chain'] + if not (args.source and chain == 'POSTROUTING') or (not args.source and chain == 'PREROUTING'): + continue + interface = dict_search('match.right', data['expr'][0]) + srcdest = dict_search('match.right.prefix.addr', data['expr'][2]) + if srcdest: + addr_tmp = dict_search('match.right.prefix.len', data['expr'][2]) + if addr_tmp: + srcdest = srcdest + '/' + str(addr_tmp) + else: + srcdest = dict_search('match.right', data['expr'][2]) + + tran_addr = dict_search('snat.addr.prefix.addr' if args.source else 'dnat.addr.prefix.addr', data['expr'][3]) + if tran_addr: + addr_tmp = dict_search('snat.addr.prefix.len' if args.source else 'dnat.addr.prefix.len', data['expr'][3]) + if addr_tmp: + srcdest = srcdest + '/' + str(addr_tmp) + else: + if 'masquerade' in data['expr'][3]: + tran_addr = 'masquerade' + else: + tran_addr = dict_search('snat.addr' if args.source else 'dnat.addr', data['expr'][3]) + + print(format_nat66_rule.format(rule, srcdest, tran_addr, interface)) + + exit(0) +else: + parser.print_help() + exit(1) + diff --git a/src/op_mode/show_nat66_statistics.py b/src/op_mode/show_nat66_statistics.py new file mode 100755 index 000000000..bc81692ae --- /dev/null +++ b/src/op_mode/show_nat66_statistics.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2018 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import jmespath +import json + +from argparse import ArgumentParser +from jinja2 import Template +from sys import exit +from vyos.util import cmd + +OUT_TMPL_SRC=""" +rule pkts bytes interface +---- ---- ----- --------- +{% for r in output %} +{% if r.comment %} +{% set packets = r.counter.packets %} +{% set bytes = r.counter.bytes %} +{% set interface = r.interface %} +{# remove rule comment prefix #} +{% set comment = r.comment | replace('SRC-NAT66-', '') | replace('DST-NAT66-', '') %} +{{ "%-4s" | format(comment) }} {{ "%9s" | format(packets) }} {{ "%12s" | format(bytes) }} {{ interface }} +{% endif %} +{% endfor %} +""" + +parser = ArgumentParser() +group = parser.add_mutually_exclusive_group() +group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true") +group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true") +args = parser.parse_args() + +if args.source or args.destination: + tmp = cmd('sudo nft -j list table ip6 nat') + tmp = json.loads(tmp) + + source = r"nftables[?rule.chain=='POSTROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" + destination = r"nftables[?rule.chain=='PREROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" + data = { + 'output' : jmespath.search(source if args.source else destination, tmp), + 'direction' : 'source' if args.source else 'destination' + } + + tmpl = Template(OUT_TMPL_SRC, lstrip_blocks=True) + print(tmpl.render(data)) + exit(0) +else: + parser.print_help() + exit(1) + diff --git a/src/op_mode/show_nat66_translations.py b/src/op_mode/show_nat66_translations.py new file mode 100755 index 000000000..045d64065 --- /dev/null +++ b/src/op_mode/show_nat66_translations.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2020 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +''' +show nat translations +''' + +import os +import sys +import ipaddress +import argparse +import xmltodict + +from vyos.util import popen +from vyos.util import DEVNULL + +conntrack = '/usr/sbin/conntrack' + +verbose_format = "%-20s %-18s %-20s %-18s" +normal_format = "%-20s %-20s %-4s %-8s %s" + + +def headers(verbose, pipe): + if verbose: + return verbose_format % ('Pre-NAT src', 'Pre-NAT dst', 'Post-NAT src', 'Post-NAT dst') + return normal_format % ('Pre-NAT', 'Post-NAT', 'Prot', 'Timeout', 'Type' if pipe else '') + + +def command(srcdest, proto, ipaddr): + command = f'{conntrack} -o xml -L -f ipv6' + + if proto: + command += f' -p {proto}' + + if srcdest == 'source': + command += ' -n' + if ipaddr: + command += f' --orig-src {ipaddr}' + if srcdest == 'destination': + command += ' -g' + if ipaddr: + command += f' --orig-dst {ipaddr}' + + return command + + +def run(command): + xml, code = popen(command,stderr=DEVNULL) + if code: + sys.exit('conntrack failed') + return xml + + +def content(xmlfile): + xml = '' + with open(xmlfile,'r') as r: + xml += r.read() + return xml + + +def pipe(): + xml = '' + while True: + line = sys.stdin.readline() + xml += line + if '</conntrack>' in line: + break + + sys.stdin = open('/dev/tty') + return xml + + +def process(data, stats, protocol, pipe, verbose, flowtype=''): + if not data: + return + + parsed = xmltodict.parse(data) + + print(headers(verbose, pipe)) + + # to help the linter to detect typos + ORIGINAL = 'original' + REPLY = 'reply' + INDEPENDANT = 'independent' + SPORT = 'sport' + DPORT = 'dport' + SRC = 'src' + DST = 'dst' + + for rule in parsed['conntrack']['flow']: + src, dst, sport, dport, proto = {}, {}, {}, {}, {} + packet_count, byte_count = {}, {} + timeout, use = 0, 0 + + rule_type = rule.get('type', '') + + for meta in rule['meta']: + # print(meta) + direction = meta['@direction'] + + if direction in (ORIGINAL, REPLY): + if 'layer3' in meta: + l3 = meta['layer3'] + src[direction] = l3[SRC] + dst[direction] = l3[DST] + + if 'layer4' in meta: + l4 = meta['layer4'] + sp = l4.get(SPORT, '') + dp = l4.get(DPORT, '') + if sp: + sport[direction] = sp + if dp: + dport[direction] = dp + proto[direction] = l4.get('@protoname','') + + if stats and 'counters' in meta: + packet_count[direction] = meta['packets'] + byte_count[direction] = meta['bytes'] + continue + + if direction == INDEPENDANT: + timeout = meta['timeout'] + use = meta['use'] + continue + + in_src = '%s:%s' % (src[ORIGINAL], sport[ORIGINAL]) if ORIGINAL in sport else src[ORIGINAL] + in_dst = '%s:%s' % (dst[ORIGINAL], dport[ORIGINAL]) if ORIGINAL in dport else dst[ORIGINAL] + + # inverted the the perl code !!? + out_dst = '%s:%s' % (dst[REPLY], dport[REPLY]) if REPLY in dport else dst[REPLY] + out_src = '%s:%s' % (src[REPLY], sport[REPLY]) if REPLY in sport else src[REPLY] + + if flowtype == 'source': + v = ORIGINAL in sport and REPLY in dport + f = '%s:%s' % (src[ORIGINAL], sport[ORIGINAL]) if v else src[ORIGINAL] + t = '%s:%s' % (dst[REPLY], dport[REPLY]) if v else dst[REPLY] + else: + v = ORIGINAL in dport and REPLY in sport + f = '%s:%s' % (dst[ORIGINAL], dport[ORIGINAL]) if v else dst[ORIGINAL] + t = '%s:%s' % (src[REPLY], sport[REPLY]) if v else src[REPLY] + + # Thomas: I do not believe proto should be an option + p = proto.get('original', '') + if protocol and p != protocol: + continue + + if verbose: + msg = verbose_format % (in_src, in_dst, out_dst, out_src) + p = f'{p}: ' if p else '' + msg += f'\n {p}{f} ==> {t}' + msg += f' timeout: {timeout}' if timeout else '' + msg += f' use: {use} ' if use else '' + msg += f' type: {rule_type}' if rule_type else '' + print(msg) + else: + print(normal_format % (f, t, p, timeout, rule_type if rule_type else '')) + + if stats: + for direction in ('original', 'reply'): + if direction in packet_count: + print(' %-8s: packets %s, bytes %s' % direction, packet_count[direction], byte_count[direction]) + + +def main(): + parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__) + parser.add_argument('--verbose', help='provide more details about the flows', action='store_true') + parser.add_argument('--proto', help='filter by protocol', default='', type=str) + parser.add_argument('--file', help='read the conntrack xml from a file', type=str) + parser.add_argument('--stats', help='add usage statistics', action='store_true') + parser.add_argument('--type', help='NAT type (source, destination)', required=True, type=str) + parser.add_argument('--ipaddr', help='source ip address to filter on', type=ipaddress.ip_address) + parser.add_argument('--pipe', help='read conntrack xml data from stdin', action='store_true') + + arg = parser.parse_args() + + if arg.type not in ('source', 'destination'): + sys.exit('Unknown NAT type!') + + if arg.pipe: + process(pipe(), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) + elif arg.file: + process(content(arg.file), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) + else: + try: + process(run(command(arg.type, arg.proto, arg.ipaddr)), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) + except: + pass + +if __name__ == '__main__': + main() diff --git a/src/op_mode/show_nat_rules.py b/src/op_mode/show_nat_rules.py new file mode 100755 index 000000000..68cff61c8 --- /dev/null +++ b/src/op_mode/show_nat_rules.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import jmespath +import json + +from argparse import ArgumentParser +from jinja2 import Template +from sys import exit +from vyos.util import cmd +from vyos.util import dict_search + +parser = ArgumentParser() +group = parser.add_mutually_exclusive_group() +group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true") +group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true") +args = parser.parse_args() + +if args.source or args.destination: + tmp = cmd('sudo nft -j list table ip nat') + tmp = json.loads(tmp) + + format_nat66_rule = '{0: <10} {1: <50} {2: <50} {3: <10}' + print(format_nat66_rule.format("Rule", "Source" if args.source else "Destination", "Translation", "Outbound Interface" if args.source else "Inbound Interface")) + print(format_nat66_rule.format("----", "------" if args.source else "-----------", "-----------", "------------------" if args.source else "-----------------")) + + data_json = jmespath.search('nftables[?rule].rule[?chain]', tmp) + for idx in range(0, len(data_json)): + data = data_json[idx] + + # The following key values must exist + # When the rule JSON does not have some keys, this is not a rule we can work with + continue_rule = False + for key in ['comment', 'chain', 'expr']: + if key not in data: + continue_rule = True + continue + if continue_rule: + continue + + comment = data['comment'] + + # Check the annotation to see if the annotation format is created by VYOS + continue_rule = True + for comment_prefix in ['SRC-NAT-', 'DST-NAT-']: + if comment_prefix in comment: + continue_rule = False + if continue_rule: + continue + + rule = int(''.join(list(filter(str.isdigit, comment)))) + chain = data['chain'] + if not (args.source and chain == 'POSTROUTING') or (not args.source and chain == 'PREROUTING'): + continue + interface = dict_search('match.right', data['expr'][0]) + srcdest = dict_search('match.right.prefix.addr', data['expr'][1]) + if srcdest: + addr_tmp = dict_search('match.right.prefix.len', data['expr'][1]) + if addr_tmp: + srcdest = srcdest + '/' + str(addr_tmp) + else: + srcdest = dict_search('match.right', data['expr'][1]) + tran_addr = dict_search('snat.addr.prefix.addr' if args.source else 'dnat.addr.prefix.addr', data['expr'][3]) + if tran_addr: + addr_tmp = dict_search('snat.addr.prefix.len' if args.source else 'dnat.addr.prefix.len', data['expr'][3]) + if addr_tmp: + srcdest = srcdest + '/' + str(addr_tmp) + else: + if 'masquerade' in data['expr'][3]: + tran_addr = 'masquerade' + elif 'log' in data['expr'][3]: + continue + else: + tran_addr = dict_search('snat.addr' if args.source else 'dnat.addr', data['expr'][3]) + + print(format_nat66_rule.format(rule, srcdest, tran_addr, interface)) + + exit(0) +else: + parser.print_help() + exit(1) + diff --git a/src/op_mode/show_nat_statistics.py b/src/op_mode/show_nat_statistics.py index 482993d06..c568c8305 100755 --- a/src/op_mode/show_nat_statistics.py +++ b/src/op_mode/show_nat_statistics.py @@ -44,7 +44,7 @@ group.add_argument("--destination", help="Show statistics for configured destina args = parser.parse_args() if args.source or args.destination: - tmp = cmd('sudo nft -j list table nat') + tmp = cmd('sudo nft -j list table ip nat') tmp = json.loads(tmp) source = r"nftables[?rule.chain=='POSTROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" diff --git a/src/op_mode/show_ntp.sh b/src/op_mode/show_ntp.sh new file mode 100755 index 000000000..e9dd6c5c9 --- /dev/null +++ b/src/op_mode/show_ntp.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +basic=0 +info=0 + +while [[ "$#" -gt 0 ]]; do + case $1 in + --info) info=1 ;; + --basic) basic=1 ;; + --server) server=$2; shift ;; + *) echo "Unknown parameter passed: $1" ;; + esac + shift +done + +if ! ps -C ntpd &>/dev/null; then + echo NTP daemon disabled + exit 1 +fi + +PID=$(pgrep ntpd) +VRF_NAME=$(ip vrf identify ${PID}) + +if [ ! -z ${VRF_NAME} ]; then + VRF_CMD="sudo ip vrf exec ${VRF_NAME}" +fi + +if [ $basic -eq 1 ]; then + $VRF_CMD ntpq -n -c peers +elif [ $info -eq 1 ]; then + echo "=== sysingo ===" + $VRF_CMD ntpq -n -c sysinfo + echo + echo "=== kerninfo ===" + $VRF_CMD ntpq -n -c kerninfo +elif [ ! -z $server ]; then + $VRF_CMD /usr/sbin/ntpdate -q $server +fi + diff --git a/src/services/vyos-configd b/src/services/vyos-configd index 1e60e53df..6f770b696 100755 --- a/src/services/vyos-configd +++ b/src/services/vyos-configd @@ -25,7 +25,7 @@ import logging import signal import importlib.util import zmq -from contextlib import redirect_stdout, redirect_stderr +from contextlib import contextmanager from vyos.defaults import directories from vyos.configsource import ConfigSourceString, ConfigSourceError @@ -108,20 +108,42 @@ conf_mode_scripts = dict(zip(imports, modules)) exclude_set = {key_name_from_file_name(f) for f in filenames if f not in include} include_set = {key_name_from_file_name(f) for f in filenames if f in include} +@contextmanager +def stdout_redirected(filename, mode): + saved_stdout_fd = None + destination_file = None + try: + sys.stdout.flush() + saved_stdout_fd = os.dup(sys.stdout.fileno()) + destination_file = open(filename, mode) + os.dup2(destination_file.fileno(), sys.stdout.fileno()) + yield + finally: + if saved_stdout_fd is not None: + os.dup2(saved_stdout_fd, sys.stdout.fileno()) + os.close(saved_stdout_fd) + if destination_file is not None: + destination_file.close() + +def explicit_print(path, mode, msg): + try: + with open(path, mode) as f: + f.write(f"\n{msg}\n\n") + except OSError: + logger.critical("error explicit_print") -def run_script(script, config) -> int: +def run_script(script, config, args) -> int: + if args: + script.argv = args config.set_level([]) try: - with open(session_out, session_mode) as f, redirect_stdout(f): - with redirect_stderr(f): - c = script.get_config(config) - script.verify(c) - script.generate(c) - script.apply(c) + c = script.get_config(config) + script.verify(c) + script.generate(c) + script.apply(c) except ConfigError as e: logger.critical(e) - with open(session_out, session_mode) as f, redirect_stdout(f): - print(f"{e}\n") + explicit_print(session_out, session_mode, str(e)) return R_ERROR_COMMIT except Exception as e: logger.critical(e) @@ -165,7 +187,7 @@ def initialization(socket): session_out = None # if not a 'live' session, for example on boot, write to file - if not session_out or '/dev/pts' not in session_out: + if not session_out or not os.path.isfile('/tmp/vyos-config-status'): session_out = script_stdout_log session_mode = 'a' @@ -186,22 +208,26 @@ def process_node_data(config, data) -> int: return R_ERROR_DAEMON script_name = None + args = None - res = re.match(r'^.+\/([^/].+).py(VYOS_TAGNODE_VALUE=.+)?', data) + res = re.match(r'^(VYOS_TAGNODE_VALUE=[^/]+)?.*\/([^/]+).py(.*)', data) if res.group(1): - script_name = res.group(1) - if res.group(2): - env = res.group(2).split('=') + env = res.group(1).split('=') os.environ[env[0]] = env[1] - + if res.group(2): + script_name = res.group(2) if not script_name: logger.critical(f"Missing script_name") return R_ERROR_DAEMON + if res.group(3): + args = res.group(3).split() + args.insert(0, f'{script_name}.py') - if script_name in exclude_set: + if script_name not in include_set: return R_PASS - result = run_script(conf_mode_scripts[script_name], config) + with stdout_redirected(session_out, session_mode): + result = run_script(conf_mode_scripts[script_name], config, args) return result diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 703628558..8069d7146 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -1,6 +1,6 @@ -#!/usr/bin/env python3 +#!/usr/share/vyos-http-api-tools/bin/python3 # -# Copyright (C) 2019 VyOS maintainers and contributors +# Copyright (C) 2019-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -19,25 +19,37 @@ import os import sys import grp +import copy import json +import logging import traceback import threading -import signal +from typing import List, Union, Callable, Dict -import vyos.config - -from flask import Flask, request -from waitress import serve +import uvicorn +from fastapi import FastAPI, Depends, Request, Response, HTTPException +from fastapi.responses import HTMLResponse +from fastapi.exceptions import RequestValidationError +from fastapi.routing import APIRoute +from pydantic import BaseModel, StrictStr, validator -from functools import wraps +import vyos.config from vyos.configsession import ConfigSession, ConfigSessionError - DEFAULT_CONFIG_FILE = '/etc/vyos/http-api.conf' CFG_GROUP = 'vyattacfg' -app = Flask(__name__) +debug = True + +logger = logging.getLogger(__name__) +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) + +if debug: + logger.setLevel(logging.DEBUG) +else: + logger.setLevel(logging.INFO) # Giant lock! lock = threading.Lock() @@ -56,55 +68,310 @@ def check_auth(key_list, key): def error(code, msg): resp = {"success": False, "error": msg, "data": None} - return json.dumps(resp), code + resp = json.dumps(resp) + return HTMLResponse(resp, status_code=code) def success(data): resp = {"success": True, "data": data, "error": None} - return json.dumps(resp) - -def get_command(f): - @wraps(f) - def decorated_function(*args, **kwargs): - cmd = request.form.get("data") - if not cmd: - return error(400, "Non-empty data field is required") - try: - cmd = json.loads(cmd) - except Exception as e: - return error(400, "Failed to parse JSON: {0}".format(e)) - return f(cmd, *args, **kwargs) - - return decorated_function - -def auth_required(f): - @wraps(f) - def decorated_function(*args, **kwargs): - key = request.form.get("key") - api_keys = app.config['vyos_keys'] - id = check_auth(api_keys, key) - if not id: - return error(401, "Valid API key is required") - return f(*args, **kwargs) - - return decorated_function - -@app.route('/configure', methods=['POST']) -@get_command -@auth_required -def configure_op(commands): - session = app.config['vyos_session'] + resp = json.dumps(resp) + return HTMLResponse(resp) + +# Pydantic models for validation +# Pydantic will cast when possible, so use StrictStr +# validators added as needed for additional constraints +# schema_extra adds anotations to OpenAPI, to add examples + +class ApiModel(BaseModel): + key: StrictStr + +class BaseConfigureModel(BaseModel): + op: StrictStr + path: List[StrictStr] + value: StrictStr = None + + @validator("path", pre=True, always=True) + def check_non_empty(cls, path): + assert len(path) > 0 + return path + +class ConfigureModel(ApiModel): + op: StrictStr + path: List[StrictStr] + value: StrictStr = None + + @validator("path", pre=True, always=True) + def check_non_empty(cls, path): + assert len(path) > 0 + return path + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "set | delete | comment", + "path": ['config', 'mode', 'path'], + } + } + +class ConfigureListModel(ApiModel): + commands: List[BaseConfigureModel] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "commands": "list of commands", + } + } + +class RetrieveModel(ApiModel): + op: StrictStr + path: List[StrictStr] + configFormat: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "returnValue | returnValues | exists | showConfig", + "path": ['config', 'mode', 'path'], + "configFormat": "json (default) | json_ast | raw", + + } + } + +class ConfigFileModel(ApiModel): + op: StrictStr + file: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "save | load", + "file": "filename", + } + } + +class ImageModel(ApiModel): + op: StrictStr + url: StrictStr = None + name: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "add | delete", + "url": "imagelocation", + "name": "imagename", + } + } + +class GenerateModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "generate", + "path": ["op", "mode", "path"], + } + } + +class ShowModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "show", + "path": ["op", "mode", "path"], + } + } + +class Success(BaseModel): + success: bool + data: Union[str, bool, Dict] + error: str + +class Error(BaseModel): + success: bool = False + data: Union[str, bool, Dict] + error: str + +responses = { + 200: {'model': Success}, + 400: {'model': Error}, + 422: {'model': Error, 'description': 'Validation Error'}, + 500: {'model': Error} +} + +def auth_required(data: ApiModel): + key = data.key + api_keys = app.state.vyos_keys + id = check_auth(api_keys, key) + if not id: + raise HTTPException(status_code=401, detail="Valid API key is required") + app.state.vyos_id = id + +# override Request and APIRoute classes in order to convert form request to json; +# do all explicit validation here, for backwards compatability of error messages; +# the explicit validation may be dropped, if desired, in favor of native +# validation by FastAPI/Pydantic, as is used for application/json requests +class MultipartRequest(Request): + ERR_MISSING_KEY = False + ERR_MISSING_DATA = False + ERR_NOT_JSON = False + ERR_NOT_DICT = False + ERR_NO_OP = False + ERR_NO_PATH = False + ERR_EMPTY_PATH = False + ERR_PATH_NOT_LIST = False + ERR_VALUE_NOT_STRING = False + ERR_PATH_NOT_LIST_OF_STR = False + offending_command = {} + exception = None + async def body(self) -> bytes: + if not hasattr(self, "_body"): + forms = {} + merge = {} + body = await super().body() + self._body = body + + form_data = await self.form() + if form_data: + logger.debug("processing form data") + for k, v in form_data.multi_items(): + forms[k] = v + + if 'data' not in forms: + self.ERR_MISSING_DATA = True + else: + try: + tmp = json.loads(forms['data']) + except json.JSONDecodeError as e: + self.ERR_NOT_JSON = True + self.exception = e + tmp = {} + if isinstance(tmp, list): + merge['commands'] = tmp + else: + merge = tmp + + if 'commands' in merge: + cmds = merge['commands'] + else: + cmds = copy.deepcopy(merge) + cmds = [cmds] + + for c in cmds: + if not isinstance(c, dict): + self.ERR_NOT_DICT = True + self.offending_command = c + elif 'op' not in c: + self.ERR_NO_OP = True + self.offending_command = c + elif 'path' not in c: + self.ERR_NO_PATH = True + self.offending_command = c + elif not c['path']: + self.ERR_EMPTY_PATH = True + self.offending_command = c + elif not isinstance(c['path'], list): + self.ERR_PATH_NOT_LIST = True + self.offending_command = c + elif not all(isinstance(el, str) for el in c['path']): + self.ERR_PATH_NOT_LIST_OF_STR = True + self.offending_command = c + elif 'value' in c and not isinstance(c['value'], str): + self.ERR_VALUE_NOT_STRING = True + self.offending_command = c + + if 'key' not in forms and 'key' not in merge: + self.ERR_MISSING_KEY = True + if 'key' in forms and 'key' not in merge: + merge['key'] = forms['key'] + + new_body = json.dumps(merge) + new_body = new_body.encode() + self._body = new_body + + return self._body + +class MultipartRoute(APIRoute): + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + request = MultipartRequest(request.scope, request.receive) + endpoint = request.url.path + try: + response: Response = await original_route_handler(request) + except HTTPException as e: + return error(e.status_code, e.detail) + except Exception as e: + if request.ERR_MISSING_KEY: + return error(422, "Valid API key is required") + if request.ERR_MISSING_DATA: + return error(422, "Non-empty data field is required") + if request.ERR_NOT_JSON: + return error(400, "Failed to parse JSON: {0}".format(request.exception)) + if endpoint == '/configure': + if request.ERR_NOT_DICT: + return error(400, "Malformed command \"{0}\": any command must be a dict".format(json.dumps(request.offending_command))) + if request.ERR_NO_OP: + return error(400, "Malformed command \"{0}\": missing \"op\" field".format(json.dumps(request.offending_command))) + if request.ERR_NO_PATH: + return error(400, "Malformed command \"{0}\": missing \"path\" field".format(json.dumps(request.offending_command))) + if request.ERR_EMPTY_PATH: + return error(400, "Malformed command \"{0}\": empty path".format(json.dumps(request.offending_command))) + if request.ERR_PATH_NOT_LIST: + return error(400, "Malformed command \"{0}\": \"path\" field must be a list".format(json.dumps(request.offending_command))) + if request.ERR_VALUE_NOT_STRING: + return error(400, "Malformed command \"{0}\": \"value\" field must be a string".format(json.dumps(request.offending_command))) + if request.ERR_PATH_NOT_LIST_OF_STR: + return error(400, "Malformed command \"{0}\": \"path\" field must be a list of strings".format(json.dumps(request.offending_command))) + if endpoint in ('/retrieve','/generate','/show'): + if request.ERR_NO_OP or request.ERR_NO_PATH: + return error(400, "Missing required field. \"op\" and \"path\" fields are required") + if endpoint in ('/config-file', '/image'): + if request.ERR_NO_OP: + return error(400, "Missing required field \"op\"") + + raise e + + return response + + return custom_route_handler + +app = FastAPI(debug=True, + title="VyOS API", + version="0.1.0", + responses={**responses}, + dependencies=[Depends(auth_required)]) + +app.router.route_class = MultipartRoute + +@app.exception_handler(RequestValidationError) +async def validation_exception_handler(request, exc): + return error(400, str(exc.errors()[0])) + +@app.post('/configure') +def configure_op(data: Union[ConfigureModel, ConfigureListModel]): + session = app.state.vyos_session env = session.get_session_env() config = vyos.config.Config(session_env=env) - strict_field = request.form.get("strict") - if strict_field == "true": - strict = True - else: - strict = False - # Allow users to pass just one command - if not isinstance(commands, list): - commands = [commands] + if not isinstance(data, ConfigureListModel): + data = [data] + else: + data = data.commands # We don't want multiple people/apps to be able to commit at once, # or modify the shared session while someone else is doing the same, @@ -114,53 +381,25 @@ def configure_op(commands): status = 200 error_msg = None try: - for c in commands: - # What we've got may not even be a dict - if not isinstance(c, dict): - raise ConfigSessionError("Malformed command \"{0}\": any command must be a dict".format(json.dumps(c))) - - # Missing op or path is a show stopper - if not ('op' in c): - raise ConfigSessionError("Malformed command \"{0}\": missing \"op\" field".format(json.dumps(c))) - if not ('path' in c): - raise ConfigSessionError("Malformed command \"{0}\": missing \"path\" field".format(json.dumps(c))) - - # Missing value is fine, substitute for empty string - if 'value' in c: - value = c['value'] - else: - value = "" - - op = c['op'] - path = c['path'] - - if not path: - raise ConfigSessionError("Malformed command \"{0}\": empty path".format(json.dumps(c))) - - # Type checking - if not isinstance(path, list): - raise ConfigSessionError("Malformed command \"{0}\": \"path\" field must be a list".format(json.dumps(c))) + for c in data: + op = c.op + path = c.path - if not isinstance(value, str): - raise ConfigSessionError("Malformed command \"{0}\": \"value\" field must be a string".format(json.dumps(c))) - - # Account for the case when value field is present and set to null - if not value: + if c.value: + value = c.value + else: value = "" - # For vyos.configsessios calls that have no separate value arguments, + # For vyos.configsession calls that have no separate value arguments, # and for type checking too - try: - cfg_path = " ".join(path + [value]).strip() - except TypeError: - raise ConfigSessionError("Malformed command \"{0}\": \"path\" field must be a list of strings".format(json.dumps(c))) + cfg_path = " ".join(path + [value]).strip() if op == 'set': # XXX: it would be nice to do a strict check for "path already exists", # but there's probably no way to do that session.set(path, value=value) elif op == 'delete': - if strict and not config.exists(cfg_path): + if app.state.vyos_strict and not config.exists(cfg_path): raise ConfigSessionError("Cannot delete [{0}]: path/value does not exist".format(cfg_path)) session.delete(path, value=value) elif op == 'comment': @@ -169,16 +408,16 @@ def configure_op(commands): raise ConfigSessionError("\"{0}\" is not a valid operation".format(op)) # end for session.commit() - print("Configuration modified via HTTP API using key \"{0}\"".format(id)) + logger.info(f"Configuration modified via HTTP API using key '{app.state.vyos_id}'") except ConfigSessionError as e: session.discard() status = 400 - if app.config['vyos_debug']: - print(traceback.format_exc(), file=sys.stderr) + if app.state.vyos_debug: + logger.critical(f"ConfigSessionError:\n {traceback.format_exc()}") error_msg = str(e) except Exception as e: session.discard() - print(traceback.format_exc(), file=sys.stderr) + logger.critical(traceback.format_exc()) status = 500 # Don't give the details away to the outer world @@ -188,22 +427,17 @@ def configure_op(commands): if status != 200: return error(status, error_msg) - else: - return success(None) -@app.route('/retrieve', methods=['POST']) -@get_command -@auth_required -def retrieve_op(command): - session = app.config['vyos_session'] + return success(None) + +@app.post("/retrieve") +def retrieve_op(data: RetrieveModel): + session = app.state.vyos_session env = session.get_session_env() config = vyos.config.Config(session_env=env) - try: - op = command['op'] - path = " ".join(command['path']) - except KeyError: - return error(400, "Missing required field. \"op\" and \"path\" fields are required") + op = data.op + path = " ".join(data.path) try: if op == 'returnValue': @@ -214,10 +448,10 @@ def retrieve_op(command): res = config.exists(path) elif op == 'showConfig': config_format = 'json' - if 'configFormat' in command: - config_format = command['configFormat'] + if data.configFormat: + config_format = data.configFormat - res = session.show_config(path=command['path']) + res = session.show_config(path=data.path) if config_format == 'json': config_tree = vyos.configtree.ConfigTree(res) res = json.loads(config_tree.to_json()) @@ -233,33 +467,28 @@ def retrieve_op(command): except ConfigSessionError as e: return error(400, str(e)) except Exception as e: - print(traceback.format_exc(), file=sys.stderr) + logger.critical(traceback.format_exc()) return error(500, "An internal error occured. Check the logs for details.") return success(res) -@app.route('/config-file', methods=['POST']) -@get_command -@auth_required -def config_file_op(command): - session = app.config['vyos_session'] +@app.post('/config-file') +def config_file_op(data: ConfigFileModel): + session = app.state.vyos_session - try: - op = command['op'] - except KeyError: - return error(400, "Missing required field \"op\"") + op = data.op try: if op == 'save': - try: - path = command['file'] - except KeyError: + if data.file: + path = data.file + else: path = '/config/config.boot' res = session.save_config(path) elif op == 'load': - try: - path = command['file'] - except KeyError: + if data.file: + path = data.file + else: return error(400, "Missing required field \"file\"") res = session.migrate_and_load_config(path) res = session.commit() @@ -268,33 +497,28 @@ def config_file_op(command): except ConfigSessionError as e: return error(400, str(e)) except Exception as e: - print(traceback.format_exc(), file=sys.stderr) + logger.critical(traceback.format_exc()) return error(500, "An internal error occured. Check the logs for details.") return success(res) -@app.route('/image', methods=['POST']) -@get_command -@auth_required -def image_op(command): - session = app.config['vyos_session'] +@app.post('/image') +def image_op(data: ImageModel): + session = app.state.vyos_session - try: - op = command['op'] - except KeyError: - return error(400, "Missing required field \"op\"") + op = data.op try: if op == 'add': - try: - url = command['url'] - except KeyError: + if data.url: + url = data.url + else: return error(400, "Missing required field \"url\"") res = session.install_image(url) elif op == 'delete': - try: - name = command['name'] - except KeyError: + if data.name: + name = data.name + else: return error(400, "Missing required field \"name\"") res = session.remove_image(name) else: @@ -302,26 +526,17 @@ def image_op(command): except ConfigSessionError as e: return error(400, str(e)) except Exception as e: - print(traceback.format_exc(), file=sys.stderr) + logger.critical(traceback.format_exc()) return error(500, "An internal error occured. Check the logs for details.") return success(res) +@app.post('/generate') +def generate_op(data: GenerateModel): + session = app.state.vyos_session -@app.route('/generate', methods=['POST']) -@get_command -@auth_required -def generate_op(command): - session = app.config['vyos_session'] - - try: - op = command['op'] - path = command['path'] - except KeyError: - return error(400, "Missing required field. \"op\" and \"path\" fields are required") - - if not isinstance(path, list): - return error(400, "Malformed command: \"path\" field must be a list of strings") + op = data.op + path = data.path try: if op == 'generate': @@ -331,25 +546,17 @@ def generate_op(command): except ConfigSessionError as e: return error(400, str(e)) except Exception as e: - print(traceback.format_exc(), file=sys.stderr) + logger.critical(traceback.format_exc()) return error(500, "An internal error occured. Check the logs for details.") return success(res) -@app.route('/show', methods=['POST']) -@get_command -@auth_required -def show_op(command): - session = app.config['vyos_session'] +@app.post('/show') +def show_op(data: ShowModel): + session = app.state.vyos_session - try: - op = command['op'] - path = command['path'] - except KeyError: - return error(400, "Missing required field. \"op\" and \"path\" fields are required") - - if not isinstance(path, list): - return error(400, "Malformed command: \"path\" field must be a list of strings") + op = data.op + path = data.path try: if op == 'show': @@ -359,14 +566,11 @@ def show_op(command): except ConfigSessionError as e: return error(400, str(e)) except Exception as e: - print(traceback.format_exc(), file=sys.stderr) + logger.critical(traceback.format_exc()) return error(500, "An internal error occured. Check the logs for details.") return success(res) -def shutdown(): - raise KeyboardInterrupt - if __name__ == '__main__': # systemd's user and group options don't work, do it by hand here, # else no one else will be able to commit @@ -380,21 +584,20 @@ if __name__ == '__main__': try: server_config = load_server_config() except Exception as e: - print("Failed to load the HTTP API server config: {0}".format(e)) + logger.critical("Failed to load the HTTP API server config: {0}".format(e)) session = ConfigSession(os.getpid()) - app.config['vyos_session'] = session - app.config['vyos_keys'] = server_config['api_keys'] - app.config['vyos_debug'] = server_config['debug'] - - def sig_handler(signum, frame): - shutdown() + app.state.vyos_session = session + app.state.vyos_keys = server_config['api_keys'] - signal.signal(signal.SIGTERM, sig_handler) + app.state.vyos_debug = True if server_config['debug'] == 'true' else False + app.state.vyos_strict = True if server_config['strict'] == 'true' else False try: - serve(app, host=server_config["listen_address"], - port=server_config["port"]) + uvicorn.run(app, host=server_config["listen_address"], + port=int(server_config["port"]), + proxy_headers=True) except OSError as e: - print(f"OSError {e}") + logger.critical(f"OSError {e}") + sys.exit(1) diff --git a/src/shim/vyshim.c b/src/shim/vyshim.c index 196e3221e..cae8b6152 100644 --- a/src/shim/vyshim.c +++ b/src/shim/vyshim.c @@ -75,28 +75,32 @@ int main(int argc, char* argv[]) void *context = zmq_ctx_new(); void *requester = zmq_socket(context, ZMQ_REQ); + int ex_index; int init_timeout = 0; debug_print("Connecting to vyos-configd ...\n"); zmq_connect(requester, SOCKET_PATH); + for (int i = 1; i < argc ; i++) { + strncat(&string_node_data[0], argv[i], 127); + } + + debug_print("data to send: %s\n", string_node_data); + + char *test = strstr(string_node_data, "VYOS_TAGNODE_VALUE"); + ex_index = test ? 2 : 1; + if (access(COMMIT_MARKER, F_OK) != -1) { init_timeout = initialization(requester); if (!init_timeout) remove(COMMIT_MARKER); } - int end = argc > 3 ? 2 : argc - 1; - // if initial communication failed, pass through execution of script if (init_timeout) { - int ret = pass_through(argv, end); + int ret = pass_through(argv, ex_index); return ret; } - for (int i = end; i > 0 ; i--) { - strncat(&string_node_data[0], argv[i], 127); - } - char error_code[1]; debug_print("Sending node data ...\n"); char *string_node_data_msg = mkjson(MKJSON_OBJ, 2, @@ -116,13 +120,13 @@ int main(int argc, char* argv[]) if (err & PASS) { debug_print("Received PASS\n"); - int ret = pass_through(argv, end); + int ret = pass_through(argv, ex_index); return ret; } if (err & ERROR_DAEMON) { debug_print("Received ERROR_DAEMON\n"); - int ret = pass_through(argv, end); + int ret = pass_through(argv, ex_index); return ret; } @@ -232,14 +236,14 @@ int initialization(void* Requester) return 0; } -int pass_through(char **argv, int end) +int pass_through(char **argv, int ex_index) { - char *newargv[] = { NULL, NULL }; + char **newargv = NULL; pid_t child_pid; - newargv[0] = argv[end]; - if (end > 1) { - putenv(argv[end - 1]); + newargv = &argv[ex_index]; + if (ex_index > 1) { + putenv(argv[ex_index - 1]); } debug_print("pass-through invoked\n"); @@ -248,9 +252,9 @@ int pass_through(char **argv, int end) debug_print("fork() failed\n"); return -1; } else if (child_pid == 0) { - if (-1 == execv(argv[end], newargv)) { + if (-1 == execv(argv[ex_index], newargv)) { debug_print("pass_through execve failed %s: %s\n", - argv[end], strerror(errno)); + argv[ex_index], strerror(errno)); return -1; } } else if (child_pid > 0) { diff --git a/src/systemd/vyos-http-api.service b/src/systemd/vyos-http-api.service index 4fa68b4ff..ba5df5984 100644 --- a/src/systemd/vyos-http-api.service +++ b/src/systemd/vyos-http-api.service @@ -5,9 +5,8 @@ Requires=vyos-router.service [Service] ExecStartPre=/usr/libexec/vyos/init/vyos-config -ExecStart=/usr/bin/python3 -u /usr/libexec/vyos/services/vyos-http-api-server +ExecStart=/usr/libexec/vyos/services/vyos-http-api-server Type=idle -KillMode=process SyslogIdentifier=vyos-http-api SyslogFacility=daemon diff --git a/src/tests/test_dict_search.py b/src/tests/test_dict_search.py index 6a0fc74ad..991722f0f 100644 --- a/src/tests/test_dict_search.py +++ b/src/tests/test_dict_search.py @@ -20,6 +20,7 @@ from vyos.util import dict_search data = { 'string': 'fooo', 'nested': {'string': 'bar', 'empty': '', 'list': ['foo', 'bar']}, + 'non': {}, 'list': ['bar', 'baz'], 'dict': {'key_1': {}, 'key_2': 'vyos'} } @@ -30,7 +31,8 @@ class TestDictSearch(TestCase): def test_non_existing_keys(self): # TestDictSearch: Return False when querying for non-existent key - self.assertFalse(dict_search('non_existing', data)) + self.assertEqual(dict_search('non_existing', data), None) + self.assertEqual(dict_search('non.existing.fancy.key', data), None) def test_string(self): # TestDictSearch: Return value when querying string @@ -50,8 +52,14 @@ class TestDictSearch(TestCase): def test_nested_dict_key_empty(self): # TestDictSearch: Return False when querying for a nested string whose last key is empty + self.assertEqual(dict_search('nested.empty', data), '') self.assertFalse(dict_search('nested.empty', data)) def test_nested_list(self): # TestDictSearch: Return list items when querying nested list self.assertEqual(dict_search('nested.list', data), data['nested']['list']) + + def test_invalid_input(self): + # TestDictSearch: Return list items when querying nested list + self.assertEqual(dict_search('nested.list', None), None) + self.assertEqual(dict_search(None, data), None) diff --git a/src/tests/test_template.py b/src/tests/test_template.py index 7800d007f..67c0fe84a 100644 --- a/src/tests/test_template.py +++ b/src/tests/test_template.py @@ -14,13 +14,23 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import os import vyos.template + from unittest import TestCase class TestVyOSTemplate(TestCase): def setUp(self): pass + def test_is_interface(self): + for interface in ['lo', 'eth0']: + if os.path.exists(f'/sys/class/net/{interface}'): + self.assertTrue(vyos.template.is_interface(interface)) + else: + self.assertFalse(vyos.template.is_interface(interface)) + self.assertFalse(vyos.template.is_interface('non-existent')) + def test_is_ip(self): self.assertTrue(vyos.template.is_ip('192.0.2.1')) self.assertTrue(vyos.template.is_ip('2001:db8::1')) diff --git a/src/validators/interface-name b/src/validators/interface-name index 72e9fd54a..5bac671b1 100755 --- a/src/validators/interface-name +++ b/src/validators/interface-name @@ -14,14 +14,21 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import os import re -import sys + +from sys import argv +from sys import exit pattern = '^(bond|br|dum|en|ersp|eth|gnv|lan|l2tp|l2tpeth|macsec|peth|ppp|pppoe|pptp|sstp|tun|vti|vtun|vxlan|wg|wlan|wlm)[0-9]+(.\d+)?|lo$' if __name__ == '__main__': - if len(sys.argv) != 2: - sys.exit(1) - if not re.match(pattern, sys.argv[1]): - sys.exit(1) - sys.exit(0) + if len(argv) != 2: + exit(1) + interface = argv[1] + + if re.match(pattern, interface): + exit(0) + if os.path.exists(f'/sys/class/net/{interface}'): + exit(0) + exit(1) diff --git a/src/validators/ipv6-eui64-prefix b/src/validators/ipv6-eui64-prefix new file mode 100755 index 000000000..d7f262633 --- /dev/null +++ b/src/validators/ipv6-eui64-prefix @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Validator used to check if given IPv6 prefix is of size /64 required by EUI64 + +from sys import argv +from sys import exit + +if __name__ == '__main__': + if len(argv) != 2: + exit(1) + + prefix = argv[1] + if prefix.split('/')[1] == '64': + exit(0) + + exit(1) |