diff options
Diffstat (limited to 'src')
144 files changed, 5999 insertions, 2158 deletions
diff --git a/src/completion/list_openvpn_users.py b/src/completion/list_openvpn_users.py new file mode 100755 index 000000000..c472dbeab --- /dev/null +++ b/src/completion/list_openvpn_users.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2019-2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import sys +import argparse + +from vyos.config import Config +from vyos.util import dict_search + +def get_user_from_interface(interface): + config = Config() + base = ['interfaces', 'openvpn', interface] + openvpn = config.get_config_dict(base, effective=True, key_mangling=('-', '_')) + users = [] + + try: + for user in (dict_search('server.client', openvpn[interface]) or []): + users.append(user.split(',')[0]) + except: + pass + + return users + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--interface", type=str, help="List users per interface") + args = parser.parse_args() + + users = [] + + users = get_user_from_interface(args.interface) + + print(" ".join(users)) + diff --git a/src/conf_mode/conntrack.py b/src/conf_mode/conntrack.py index 68877f794..c65ef9540 100755 --- a/src/conf_mode/conntrack.py +++ b/src/conf_mode/conntrack.py @@ -15,11 +15,14 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import os +import re from sys import exit from vyos.config import Config from vyos.configdict import dict_merge +from vyos.firewall import find_nftables_rule +from vyos.firewall import remove_nftables_rule from vyos.util import cmd from vyos.util import run from vyos.util import process_named_running @@ -43,8 +46,8 @@ module_map = { 'ko' : ['nf_nat_h323', 'nf_conntrack_h323'], }, 'nfs' : { - 'iptables' : ['VYATTA_CT_HELPER --table raw --proto tcp --dport 111 --jump CT --helper rpc', - 'VYATTA_CT_HELPER --table raw --proto udp --dport 111 --jump CT --helper rpc'], + 'nftables' : ['ct helper set "rpc_tcp" tcp dport "{111}" return', + 'ct helper set "rpc_udp" udp dport "{111}" return'] }, 'pptp' : { 'ko' : ['nf_nat_pptp', 'nf_conntrack_pptp'], @@ -53,9 +56,7 @@ module_map = { 'ko' : ['nf_nat_sip', 'nf_conntrack_sip'], }, 'sqlnet' : { - 'iptables' : ['VYATTA_CT_HELPER --table raw --proto tcp --dport 1521 --jump CT --helper tns', - 'VYATTA_CT_HELPER --table raw --proto tcp --dport 1525 --jump CT --helper tns', - 'VYATTA_CT_HELPER --table raw --proto tcp --dport 1536 --jump CT --helper tns'], + 'nftables' : ['ct helper set "tns_tcp" tcp dport "{1521,1525,1536}" return'] }, 'tftp' : { 'ko' : ['nf_nat_tftp', 'nf_conntrack_tftp'], @@ -93,6 +94,17 @@ def generate(conntrack): return None +def find_nftables_ct_rule(rule): + helper_search = re.search('ct helper set "(\w+)"', rule) + if helper_search: + rule = helper_search[1] + return find_nftables_rule('raw', 'VYOS_CT_HELPER', [rule]) + +def find_remove_rule(rule): + handle = find_nftables_ct_rule(rule) + if handle: + remove_nftables_rule('raw', 'VYOS_CT_HELPER', handle) + def apply(conntrack): # Depending on the enable/disable state of the ALG (Application Layer Gateway) # modules we need to either insmod or rmmod the helpers. @@ -103,20 +115,17 @@ def apply(conntrack): # Only remove the module if it's loaded if os.path.exists(f'/sys/module/{mod}'): cmd(f'rmmod {mod}') - if 'iptables' in module_config: - for rule in module_config['iptables']: - # Only install iptables rule if it does not exist - tmp = run(f'iptables --check {rule}') - if tmp == 0: cmd(f'iptables --delete {rule}') + if 'nftables' in module_config: + for rule in module_config['nftables']: + find_remove_rule(rule) else: if 'ko' in module_config: for mod in module_config['ko']: cmd(f'modprobe {mod}') - if 'iptables' in module_config: - for rule in module_config['iptables']: - # Only install iptables rule if it does not exist - tmp = run(f'iptables --check {rule}') - if tmp > 0: cmd(f'iptables --insert {rule}') + if 'nftables' in module_config: + for rule in module_config['nftables']: + if not find_nftables_ct_rule(rule): + cmd(f'nft insert rule ip raw VYOS_CT_HELPER {rule}') if process_named_running('conntrackd'): # Reload conntrack-sync daemon to fetch new sysctl values diff --git a/src/conf_mode/containers.py b/src/conf_mode/containers.py index 1e0197a13..2e14e0b25 100755 --- a/src/conf_mode/containers.py +++ b/src/conf_mode/containers.py @@ -30,8 +30,6 @@ from vyos.util import cmd from vyos.util import run from vyos.util import read_file from vyos.util import write_file -from vyos.util import is_systemd_service_active -from vyos.util import is_systemd_service_running from vyos.template import inc_ip from vyos.template import is_ipv4 from vyos.template import is_ipv6 @@ -102,7 +100,7 @@ def verify(container): # Check if the specified container network exists network_name = list(container_config['network'])[0] if network_name not in container['network']: - raise ConfigError('Container network "{network_name}" does not exist!') + raise ConfigError(f'Container network "{network_name}" does not exist!') if 'address' in container_config['network'][network_name]: if 'network' not in container_config: @@ -160,7 +158,7 @@ def verify(container): v6_prefix = 0 # If ipv4-prefix not defined for user-defined network if 'prefix' not in network_config: - raise ConfigError(f'prefix for network "{net}" must be defined!') + raise ConfigError(f'prefix for network "{network}" must be defined!') for prefix in network_config['prefix']: if is_ipv4(prefix): v4_prefix += 1 @@ -237,17 +235,6 @@ def apply(container): if os.path.exists(tmp): os.unlink(tmp) - service_name = 'podman.service' - if 'network' in container or 'name' in container: - # Start podman if it's required and not yet running - if not is_systemd_service_active(service_name): - _cmd(f'systemctl start {service_name}') - # Wait for podman to be running - while not is_systemd_service_running(service_name): - sleep(0.250) - else: - _cmd(f'systemctl stop {service_name}') - # Add container if 'name' in container: for name, container_config in container['name'].items(): @@ -271,6 +258,14 @@ def apply(container): tmp = run(f'podman image exists {image}') if tmp != 0: print(os.system(f'podman pull {image}')) + # Add capability options. Should be in uppercase + cap_add = '' + if 'cap_add' in container_config: + for c in container_config['cap_add']: + c = c.upper() + c = c.replace('-', '_') + cap_add += f' --cap-add={c}' + # Check/set environment options "-e foo=bar" env_opt = '' if 'environment' in container_config: @@ -299,7 +294,7 @@ def apply(container): dvol = vol_config['destination'] volume += f' -v {svol}:{dvol}' - container_base_cmd = f'podman run --detach --interactive --tty --replace ' \ + container_base_cmd = f'podman run --detach --interactive --tty --replace {cap_add} ' \ f'--memory {memory}m --memory-swap 0 --restart {restart} ' \ f'--name {name} {port} {volume} {env_opt}' if 'allow_host_networks' in container_config: @@ -310,7 +305,17 @@ def apply(container): if 'address' in container_config['network'][network]: address = container_config['network'][network]['address'] ipparam = f'--ip {address}' - _cmd(f'{container_base_cmd} --net {network} {ipparam} {image}') + + counter = 0 + while True: + if counter >= 10: + break + try: + _cmd(f'{container_base_cmd} --net {network} {ipparam} {image}') + break + except: + counter = counter +1 + sleep(0.5) return None diff --git a/src/conf_mode/dhcp_server.py b/src/conf_mode/dhcp_server.py index 28f2a4ca5..a8cef5ebf 100755 --- a/src/conf_mode/dhcp_server.py +++ b/src/conf_mode/dhcp_server.py @@ -151,9 +151,15 @@ def verify(dhcp): listen_ok = False subnets = [] failover_ok = False + shared_networks = len(dhcp['shared_network_name']) + disabled_shared_networks = 0 + # A shared-network requires a subnet definition for network, network_config in dhcp['shared_network_name'].items(): + if 'disable' in network_config: + disabled_shared_networks += 1 + if 'subnet' not in network_config: raise ConfigError(f'No subnets defined for {network}. At least one\n' \ 'lease subnet must be configured.') @@ -226,7 +232,7 @@ def verify(dhcp): # There must be one subnet connected to a listen interface. # This only counts if the network itself is not disabled! if 'disable' not in network_config: - if is_subnet_connected(subnet, primary=True): + if is_subnet_connected(subnet, primary=False): listen_ok = True # Subnets must be non overlapping @@ -243,6 +249,10 @@ def verify(dhcp): if net.overlaps(net2): raise ConfigError('Conflicting subnet ranges: "{net}" overlaps "{net2}"!') + # Prevent 'disable' for shared-network if only one network is configured + if (shared_networks - disabled_shared_networks) < 1: + raise ConfigError(f'At least one shared network must be active!') + if 'failover' in dhcp: if not failover_ok: raise ConfigError('DHCP failover must be enabled for at least one subnet!') diff --git a/src/conf_mode/dhcpv6_server.py b/src/conf_mode/dhcpv6_server.py index 175300bb0..e6a2e4486 100755 --- a/src/conf_mode/dhcpv6_server.py +++ b/src/conf_mode/dhcpv6_server.py @@ -128,7 +128,7 @@ def verify(dhcpv6): # Subnets must be unique if subnet in subnets: - raise ConfigError('DHCPv6 subnets must be unique! Subnet {0} defined multiple times!'.format(subnet['network'])) + raise ConfigError(f'DHCPv6 subnets must be unique! Subnet {subnet} defined multiple times!') subnets.append(subnet) # DHCPv6 requires at least one configured address range or one static mapping diff --git a/src/conf_mode/dns_forwarding.py b/src/conf_mode/dns_forwarding.py index c44e6c974..23a16df63 100755 --- a/src/conf_mode/dns_forwarding.py +++ b/src/conf_mode/dns_forwarding.py @@ -17,6 +17,7 @@ import os from sys import exit +from glob import glob from vyos.config import Config from vyos.configdict import dict_merge @@ -50,10 +51,12 @@ def get_config(config=None): if not conf.exists(base): return None - dns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + dns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. + # options which we need to update into the dictionary retrieved. default_values = defaults(base) + # T2665 due to how defaults under tag nodes work, we must clear these out before we merge + del default_values['authoritative_domain'] dns = dict_merge(default_values, dns) # some additions to the default dictionary @@ -66,20 +69,182 @@ def get_config(config=None): if conf.exists(base_nameservers_dhcp): dns.update({'system_name_server_dhcp': conf.return_values(base_nameservers_dhcp)}) - # Split the source_address property into separate IPv4 and IPv6 lists - # NOTE: In future versions of pdns-recursor (> 4.4.0), this logic can be removed - # as both IPv4 and IPv6 addresses can be specified in a single setting. - source_address_v4 = [] - source_address_v6 = [] - - for source_address in dns['source_address']: - if is_ipv6(source_address): - source_address_v6.append(source_address) - else: - source_address_v4.append(source_address) - - dns.update({'source_address_v4': source_address_v4}) - dns.update({'source_address_v6': source_address_v6}) + if 'authoritative_domain' in dns: + dns['authoritative_zones'] = [] + dns['authoritative_zone_errors'] = [] + for node in dns['authoritative_domain']: + zonedata = dns['authoritative_domain'][node] + if ('disable' in zonedata) or (not 'records' in zonedata): + continue + zone = { + 'name': node, + 'file': "{}/zone.{}.conf".format(pdns_rec_run_dir, node), + 'records': [], + } + + recorddata = zonedata['records'] + + for rtype in [ 'a', 'aaaa', 'cname', 'mx', 'ptr', 'txt', 'spf', 'srv', 'naptr' ]: + if rtype not in recorddata: + continue + for subnode in recorddata[rtype]: + if 'disable' in recorddata[rtype][subnode]: + continue + + rdata = recorddata[rtype][subnode] + + if rtype in [ 'a', 'aaaa' ]: + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + rdata = dict_merge(rdefaults, rdata) + + if not 'address' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: at least one address is required'.format(subnode, node)) + continue + + for address in rdata['address']: + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': address + }) + elif rtype in ['cname', 'ptr']: + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + rdata = dict_merge(rdefaults, rdata) + + if not 'target' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: target is required'.format(subnode, node)) + continue + + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': '{}.'.format(rdata['target']) + }) + elif rtype == 'mx': + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + del rdefaults['server'] + rdata = dict_merge(rdefaults, rdata) + + if not 'server' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: at least one server is required'.format(subnode, node)) + continue + + for servername in rdata['server']: + serverdata = rdata['server'][servername] + serverdefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'server']) # T2665 + serverdata = dict_merge(serverdefaults, serverdata) + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': '{} {}.'.format(serverdata['priority'], servername) + }) + elif rtype == 'txt': + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + rdata = dict_merge(rdefaults, rdata) + + if not 'value' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: at least one value is required'.format(subnode, node)) + continue + + for value in rdata['value']: + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': "\"{}\"".format(value.replace("\"", "\\\"")) + }) + elif rtype == 'spf': + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + rdata = dict_merge(rdefaults, rdata) + + if not 'value' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: value is required'.format(subnode, node)) + continue + + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': '"{}"'.format(rdata['value'].replace("\"", "\\\"")) + }) + elif rtype == 'srv': + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + del rdefaults['entry'] + rdata = dict_merge(rdefaults, rdata) + + if not 'entry' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: at least one entry is required'.format(subnode, node)) + continue + + for entryno in rdata['entry']: + entrydata = rdata['entry'][entryno] + entrydefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'entry']) # T2665 + entrydata = dict_merge(entrydefaults, entrydata) + + if not 'hostname' in entrydata: + dns['authoritative_zone_errors'].append('{}.{}: hostname is required for entry {}'.format(subnode, node, entryno)) + continue + + if not 'port' in entrydata: + dns['authoritative_zone_errors'].append('{}.{}: port is required for entry {}'.format(subnode, node, entryno)) + continue + + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': '{} {} {} {}.'.format(entrydata['priority'], entrydata['weight'], entrydata['port'], entrydata['hostname']) + }) + elif rtype == 'naptr': + rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665 + del rdefaults['rule'] + rdata = dict_merge(rdefaults, rdata) + + + if not 'rule' in rdata: + dns['authoritative_zone_errors'].append('{}.{}: at least one rule is required'.format(subnode, node)) + continue + + for ruleno in rdata['rule']: + ruledata = rdata['rule'][ruleno] + ruledefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'rule']) # T2665 + ruledata = dict_merge(ruledefaults, ruledata) + flags = "" + if 'lookup-srv' in ruledata: + flags += "S" + if 'lookup-a' in ruledata: + flags += "A" + if 'resolve-uri' in ruledata: + flags += "U" + if 'protocol-specific' in ruledata: + flags += "P" + + if 'order' in ruledata: + order = ruledata['order'] + else: + order = ruleno + + if 'regexp' in ruledata: + regexp= ruledata['regexp'].replace("\"", "\\\"") + else: + regexp = '' + + if ruledata['replacement']: + replacement = '{}.'.format(ruledata['replacement']) + else: + replacement = '' + + zone['records'].append({ + 'name': subnode, + 'type': rtype.upper(), + 'ttl': rdata['ttl'], + 'value': '{} {} "{}" "{}" "{}" {}'.format(order, ruledata['preference'], flags, ruledata['service'], regexp, replacement) + }) + + dns['authoritative_zones'].append(zone) return dns @@ -101,6 +266,11 @@ def verify(dns): if 'server' not in dns['domain'][domain]: raise ConfigError(f'No server configured for domain {domain}!') + if ('authoritative_zone_errors' in dns) and dns['authoritative_zone_errors']: + for error in dns['authoritative_zone_errors']: + print(error) + raise ConfigError('Invalid authoritative records have been defined') + if 'system' in dns: if not ('system_name_server' in dns or 'system_name_server_dhcp' in dns): print("Warning: No 'system name-server' or 'system " \ @@ -119,6 +289,15 @@ def generate(dns): render(pdns_rec_lua_conf_file, 'dns-forwarding/recursor.conf.lua.tmpl', dns, user=pdns_rec_user, group=pdns_rec_group) + for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'): + os.unlink(zone_filename) + + if 'authoritative_zones' in dns: + for zone in dns['authoritative_zones']: + render(zone['file'], 'dns-forwarding/recursor.zone.conf.tmpl', + zone, user=pdns_rec_user, group=pdns_rec_group) + + # if vyos-hostsd didn't create its files yet, create them (empty) for file in [pdns_rec_hostsd_lua_conf_file, pdns_rec_hostsd_zones_file]: with open(file, 'a'): @@ -134,6 +313,9 @@ def apply(dns): if os.path.isfile(pdns_rec_config_file): os.unlink(pdns_rec_config_file) + + for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'): + os.unlink(zone_filename) else: ### first apply vyos-hostsd config hc = hostsd_client() @@ -168,6 +350,12 @@ def apply(dns): if 'domain' in dns: hc.add_forward_zones(dns['domain']) + # hostsd generates NTAs for the authoritative zones + # the list and keys() are required as get returns a dict, not list + hc.delete_authoritative_zones(list(hc.get_authoritative_zones())) + if 'authoritative_zones' in dns: + hc.add_authoritative_zones(list(map(lambda zone: zone['name'], dns['authoritative_zones']))) + # call hostsd to generate forward-zones and its lua-config-file hc.apply() diff --git a/src/conf_mode/dynamic_dns.py b/src/conf_mode/dynamic_dns.py index c979feca7..a31e5ed75 100755 --- a/src/conf_mode/dynamic_dns.py +++ b/src/conf_mode/dynamic_dns.py @@ -131,9 +131,7 @@ def generate(dyndns): if not dyndns: return None - render(config_file, 'dynamic-dns/ddclient.conf.tmpl', dyndns, - permission=0o600) - + render(config_file, 'dynamic-dns/ddclient.conf.tmpl', dyndns) return None def apply(dyndns): diff --git a/src/conf_mode/firewall-interface.py b/src/conf_mode/firewall-interface.py new file mode 100755 index 000000000..3a17dc5a4 --- /dev/null +++ b/src/conf_mode/firewall-interface.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import re + +from sys import argv +from sys import exit + +from vyos.config import Config +from vyos.configdict import leaf_node_changed +from vyos.ifconfig import Section +from vyos.template import render +from vyos.util import cmd +from vyos.util import dict_search_args +from vyos.util import run +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +NFT_CHAINS = { + 'in': 'VYOS_FW_IN', + 'out': 'VYOS_FW_OUT', + 'local': 'VYOS_FW_LOCAL' +} +NFT6_CHAINS = { + 'in': 'VYOS_FW6_IN', + 'out': 'VYOS_FW6_OUT', + 'local': 'VYOS_FW6_LOCAL' +} + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + ifname = argv[1] + ifpath = Section.get_config_path(ifname) + if_firewall_path = f'interfaces {ifpath} firewall' + + if_firewall = conf.get_config_dict(if_firewall_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + if_firewall['ifname'] = ifname + if_firewall['firewall'] = conf.get_config_dict(['firewall'], key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + return if_firewall + +def verify(if_firewall): + # bail out early - looks like removal from running config + if not if_firewall: + return None + + for direction in ['in', 'out', 'local']: + if direction in if_firewall: + if 'name' in if_firewall[direction]: + name = if_firewall[direction]['name'] + + if 'name' not in if_firewall['firewall']: + raise ConfigError('Firewall name not configured') + + if name not in if_firewall['firewall']['name']: + raise ConfigError(f'Invalid firewall name "{name}"') + + if 'ipv6_name' in if_firewall[direction]: + name = if_firewall[direction]['ipv6_name'] + + if 'ipv6_name' not in if_firewall['firewall']: + raise ConfigError('Firewall ipv6-name not configured') + + if name not in if_firewall['firewall']['ipv6_name']: + raise ConfigError(f'Invalid firewall ipv6-name "{name}"') + + return None + +def generate(if_firewall): + return None + +def cleanup_rule(table, chain, ifname, new_name=None): + results = cmd(f'nft -a list chain {table} {chain}').split("\n") + retval = None + for line in results: + if f'ifname "{ifname}"' in line: + if new_name and f'jump {new_name}' in line: + # new_name is used to clear rules for any previously referenced chains + # returns true when rule exists and doesn't need to be created + retval = True + continue + + handle_search = re.search('handle (\d+)', line) + if handle_search: + run(f'nft delete rule {table} {chain} handle {handle_search[1]}') + return retval + +def apply(if_firewall): + ifname = if_firewall['ifname'] + + for direction in ['in', 'out', 'local']: + chain = NFT_CHAINS[direction] + ipv6_chain = NFT6_CHAINS[direction] + if_prefix = 'i' if direction in ['in', 'local'] else 'o' + + name = dict_search_args(if_firewall, direction, 'name') + if name: + rule_exists = cleanup_rule('ip filter', chain, ifname, name) + + if not rule_exists: + run(f'nft insert rule ip filter {chain} {if_prefix}ifname {ifname} counter jump {name}') + else: + cleanup_rule('ip filter', chain, ifname) + + ipv6_name = dict_search_args(if_firewall, direction, 'ipv6_name') + if ipv6_name: + rule_exists = cleanup_rule('ip6 filter', ipv6_chain, ifname, ipv6_name) + + if not rule_exists: + run(f'nft insert rule ip6 filter {ipv6_chain} {if_prefix}ifname {ifname} counter jump {ipv6_name}') + else: + cleanup_rule('ip6 filter', ipv6_chain, ifname) + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py index 8e6ce5b14..5ac48c9ba 100755 --- a/src/conf_mode/firewall.py +++ b/src/conf_mode/firewall.py @@ -16,50 +16,295 @@ import os +from glob import glob +from json import loads from sys import exit from vyos.config import Config from vyos.configdict import dict_merge -from vyos.configdict import node_changed -from vyos.configdict import leaf_node_changed +from vyos.configdiff import get_config_diff, Diff from vyos.template import render -from vyos.util import call +from vyos.util import cmd +from vyos.util import dict_search_args +from vyos.util import process_named_running +from vyos.util import run +from vyos.xml import defaults from vyos import ConfigError from vyos import airbag -from pprint import pprint airbag.enable() +nftables_conf = '/run/nftables.conf' -def get_config(config=None): +sysfs_config = { + 'all_ping': {'sysfs': '/proc/sys/net/ipv4/icmp_echo_ignore_all', 'enable': '0', 'disable': '1'}, + 'broadcast_ping': {'sysfs': '/proc/sys/net/ipv4/icmp_echo_ignore_broadcasts', 'enable': '0', 'disable': '1'}, + 'ip_src_route': {'sysfs': '/proc/sys/net/ipv4/conf/*/accept_source_route'}, + 'ipv6_receive_redirects': {'sysfs': '/proc/sys/net/ipv6/conf/*/accept_redirects'}, + 'ipv6_src_route': {'sysfs': '/proc/sys/net/ipv6/conf/*/accept_source_route', 'enable': '0', 'disable': '-1'}, + 'log_martians': {'sysfs': '/proc/sys/net/ipv4/conf/all/log_martians'}, + 'receive_redirects': {'sysfs': '/proc/sys/net/ipv4/conf/*/accept_redirects'}, + 'send_redirects': {'sysfs': '/proc/sys/net/ipv4/conf/*/send_redirects'}, + 'source_validation': {'sysfs': '/proc/sys/net/ipv4/conf/*/rp_filter', 'disable': '0', 'strict': '1', 'loose': '2'}, + 'syn_cookies': {'sysfs': '/proc/sys/net/ipv4/tcp_syncookies'}, + 'twa_hazards_protection': {'sysfs': '/proc/sys/net/ipv4/tcp_rfc1337'} +} + +preserve_chains = [ + 'INPUT', + 'FORWARD', + 'OUTPUT', + 'VYOS_FW_IN', + 'VYOS_FW_OUT', + 'VYOS_FW_LOCAL', + 'VYOS_FW_OUTPUT', + 'VYOS_POST_FW', + 'VYOS_FRAG_MARK', + 'VYOS_FW6_IN', + 'VYOS_FW6_OUT', + 'VYOS_FW6_LOCAL', + 'VYOS_FW6_OUTPUT', + 'VYOS_POST_FW6', + 'VYOS_FRAG6_MARK' +] + +valid_groups = [ + 'address_group', + 'network_group', + 'port_group' +] + +snmp_change_type = { + 'unknown': 0, + 'add': 1, + 'delete': 2, + 'change': 3 +} +snmp_event_source = 1 +snmp_trap_mib = 'VYATTA-TRAP-MIB' +snmp_trap_name = 'mgmtEventTrap' +def get_firewall_interfaces(conf): + out = {} + interfaces = conf.get_config_dict(['interfaces'], key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + def find_interfaces(iftype_conf, output={}, prefix=''): + for ifname, if_conf in iftype_conf.items(): + if 'firewall' in if_conf: + output[prefix + ifname] = if_conf['firewall'] + for vif in ['vif', 'vif_s', 'vif_c']: + if vif in if_conf: + output.update(find_interfaces(if_conf[vif], output, f'{prefix}{ifname}.')) + return output + for iftype, iftype_conf in interfaces.items(): + out.update(find_interfaces(iftype_conf)) + return out + +def get_config(config=None): if config: conf = config else: conf = Config() - base = ['nfirewall'] + base = ['firewall'] + + if not conf.exists(base): + return {} + firewall = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) - pprint(firewall) + default_values = defaults(base) + firewall = dict_merge(default_values, firewall) + + firewall['interfaces'] = get_firewall_interfaces(conf) + + if 'config_trap' in firewall and firewall['config_trap'] == 'enable': + diff = get_config_diff(conf) + firewall['trap_diff'] = diff.get_child_nodes_diff_str(base) + firewall['trap_targets'] = conf.get_config_dict(['service', 'snmp', 'trap-target'], + key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) return firewall +def verify_rule(firewall, rule_conf, ipv6): + if 'action' not in rule_conf: + raise ConfigError('Rule action must be defined') + + if 'fragment' in rule_conf: + if {'match_frag', 'match_non_frag'} <= set(rule_conf['fragment']): + raise ConfigError('Cannot specify both "match-frag" and "match-non-frag"') + + if 'ipsec' in rule_conf: + if {'match_ipsec', 'match_non_ipsec'} <= set(rule_conf['ipsec']): + raise ConfigError('Cannot specify both "match-ipsec" and "match-non-ipsec"') + + if 'recent' in rule_conf: + if not {'count', 'time'} <= set(rule_conf['recent']): + raise ConfigError('Recent "count" and "time" values must be defined') + + for side in ['destination', 'source']: + if side in rule_conf: + side_conf = rule_conf[side] + + if 'group' in side_conf: + if {'address_group', 'network_group'} <= set(side_conf['group']): + raise ConfigError('Only one address-group or network-group can be specified') + + for group in valid_groups: + if group in side_conf['group']: + group_name = side_conf['group'][group] + + fw_group = f'ipv6_{group}' if ipv6 and group in ['address_group', 'network_group'] else group + + if not dict_search_args(firewall, 'group', fw_group): + error_group = fw_group.replace("_", "-") + raise ConfigError(f'Group defined in rule but {error_group} is not configured') + + if group_name not in firewall['group'][fw_group]: + error_group = group.replace("_", "-") + raise ConfigError(f'Invalid {error_group} "{group_name}" on firewall rule') + + if 'port' in side_conf or dict_search_args(side_conf, 'group', 'port_group'): + if 'protocol' not in rule_conf: + raise ConfigError('Protocol must be defined if specifying a port or port-group') + + if rule_conf['protocol'] not in ['tcp', 'udp', 'tcp_udp']: + raise ConfigError('Protocol must be tcp, udp, or tcp_udp when specifying a port or port-group') + def verify(firewall): # bail out early - looks like removal from running config if not firewall: return None + if 'config_trap' in firewall and firewall['config_trap'] == 'enable': + if not firewall['trap_targets']: + raise ConfigError(f'Firewall config-trap enabled but "service snmp trap-target" is not defined') + + for name in ['name', 'ipv6_name']: + if name in firewall: + for name_id, name_conf in firewall[name].items(): + if name_id in preserve_chains: + raise ConfigError(f'Firewall name "{name_id}" is reserved for VyOS') + + if 'rule' in name_conf: + for rule_id, rule_conf in name_conf['rule'].items(): + verify_rule(firewall, rule_conf, name == 'ipv6_name') + + for ifname, if_firewall in firewall['interfaces'].items(): + for direction in ['in', 'out', 'local']: + name = dict_search_args(if_firewall, direction, 'name') + ipv6_name = dict_search_args(if_firewall, direction, 'ipv6_name') + + if name and not dict_search_args(firewall, 'name', name): + raise ConfigError(f'Firewall name "{name}" is still referenced on interface {ifname}') + + if ipv6_name and not dict_search_args(firewall, 'ipv6_name', ipv6_name): + raise ConfigError(f'Firewall ipv6-name "{ipv6_name}" is still referenced on interface {ifname}') + return None +def cleanup_commands(firewall): + commands = [] + for table in ['ip filter', 'ip6 filter']: + json_str = cmd(f'nft -j list table {table}') + obj = loads(json_str) + if 'nftables' not in obj: + continue + for item in obj['nftables']: + if 'chain' in item: + if item['chain']['name'] not in preserve_chains: + chain = item['chain']['name'] + if table == 'ip filter' and dict_search_args(firewall, 'name', chain): + commands.append(f'flush chain {table} {chain}') + elif table == 'ip6 filter' and dict_search_args(firewall, 'ipv6_name', chain): + commands.append(f'flush chain {table} {chain}') + else: + commands.append(f'delete chain {table} {chain}') + return commands + def generate(firewall): - if not firewall: - return None + if not os.path.exists(nftables_conf): + firewall['first_install'] = True + else: + firewall['cleanup_commands'] = cleanup_commands(firewall) + render(nftables_conf, 'firewall/nftables.tmpl', firewall) return None -def apply(firewall): - if not firewall: +def apply_sysfs(firewall): + for name, conf in sysfs_config.items(): + paths = glob(conf['sysfs']) + value = None + + if name in firewall: + conf_value = firewall[name] + + if conf_value in conf: + value = conf[conf_value] + elif conf_value == 'enable': + value = '1' + elif conf_value == 'disable': + value = '0' + + if value: + for path in paths: + with open(path, 'w') as f: + f.write(value) + +def post_apply_trap(firewall): + if 'first_install' in firewall: return None + if 'config_trap' not in firewall or firewall['config_trap'] != 'enable': + return None + + if not process_named_running('snmpd'): + return None + + trap_username = os.getlogin() + + for host, target_conf in firewall['trap_targets'].items(): + community = target_conf['community'] if 'community' in target_conf else 'public' + port = int(target_conf['port']) if 'port' in target_conf else 162 + + base_cmd = f'snmptrap -v2c -c {community} {host}:{port} 0 {snmp_trap_mib}::{snmp_trap_name} ' + + for change_type, changes in firewall['trap_diff'].items(): + for path_str, value in changes.items(): + objects = [ + f'mgmtEventUser s "{trap_username}"', + f'mgmtEventSource i {snmp_event_source}', + f'mgmtEventType i {snmp_change_type[change_type]}' + ] + + if change_type == 'add': + objects.append(f'mgmtEventCurrCfg s "{path_str} {value}"') + elif change_type == 'delete': + objects.append(f'mgmtEventPrevCfg s "{path_str} {value}"') + elif change_type == 'change': + objects.append(f'mgmtEventPrevCfg s "{path_str} {value[0]}"') + objects.append(f'mgmtEventCurrCfg s "{path_str} {value[1]}"') + + cmd(base_cmd + ' '.join(objects)) + +def apply(firewall): + if 'first_install' in firewall: + run('nfct helper add rpc inet tcp') + run('nfct helper add rpc inet udp') + run('nfct helper add tns inet tcp') + + install_result = run(f'nft -f {nftables_conf}') + if install_result == 1: + raise ConfigError('Failed to apply firewall') + + if 'state_policy' in firewall: + for chain in ['INPUT', 'OUTPUT', 'FORWARD']: + cmd(f'nft insert rule ip filter {chain} jump VYOS_STATE_POLICY') + cmd(f'nft insert rule ip6 filter {chain} jump VYOS_STATE_POLICY6') + + apply_sysfs(firewall) + + post_apply_trap(firewall) + return None if __name__ == '__main__': diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/flow_accounting_conf.py index 9cae29481..975f19acf 100755 --- a/src/conf_mode/flow_accounting_conf.py +++ b/src/conf_mode/flow_accounting_conf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2020 VyOS maintainers and contributors +# Copyright (C) 2018-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -16,121 +16,83 @@ import os import re + from sys import exit import ipaddress from ipaddress import ip_address -from jinja2 import FileSystemLoader, Environment +from vyos.config import Config +from vyos.configdict import dict_merge from vyos.ifconfig import Section from vyos.ifconfig import Interface -from vyos.config import Config -from vyos import ConfigError -from vyos.util import cmd from vyos.template import render - +from vyos.util import cmd +from vyos.validate import is_addr_assigned +from vyos.xml import defaults +from vyos import ConfigError from vyos import airbag airbag.enable() -# default values -default_sflow_server_port = 6343 -default_netflow_server_port = 2055 -default_plugin_pipe_size = 10 -default_captured_packet_size = 128 -default_netflow_version = '9' -default_sflow_agentip = 'auto' -uacctd_conf_path = '/etc/pmacct/uacctd.conf' -iptables_nflog_table = 'raw' -iptables_nflog_chain = 'VYATTA_CT_PREROUTING_HOOK' -egress_iptables_nflog_table = 'mangle' -egress_iptables_nflog_chain = 'FORWARD' - -# helper functions -# check if node exists and return True if this is true -def _node_exists(path): - vyos_config = Config() - if vyos_config.exists(path): - return True - -# get sFlow agent-ip if agent-address is "auto" (default behaviour) -def _sflow_default_agentip(config): - # check if any of BGP, OSPF, OSPFv3 protocols are configured and use router-id from there - if config.exists('protocols bgp'): - bgp_router_id = config.return_value("protocols bgp {} parameters router-id".format(config.list_nodes('protocols bgp')[0])) - if bgp_router_id: - return bgp_router_id - if config.return_value('protocols ospf parameters router-id'): - return config.return_value('protocols ospf parameters router-id') - if config.return_value('protocols ospfv3 parameters router-id'): - return config.return_value('protocols ospfv3 parameters router-id') - - # if router-id was not found, use first available ip of any interface - for iface in Section.interfaces(): - for address in Interface(iface).get_addr(): - # return an IP, if this is not loopback - regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$') - if regex_filter.search(address): - return regex_filter.search(address).group('ipaddr') - - # return nothing by default - return None - -# get iptables rule dict for chain in table -def _iptables_get_nflog(chain, table): +uacctd_conf_path = '/run/pmacct/uacctd.conf' +nftables_nflog_table = 'raw' +nftables_nflog_chain = 'VYOS_CT_PREROUTING_HOOK' +egress_nftables_nflog_table = 'inet mangle' +egress_nftables_nflog_chain = 'FORWARD' + +# get nftables rule dict for chain in table +def _nftables_get_nflog(chain, table): # define list with rules rules = [] # prepare regex for parsing rules - rule_pattern = "^-A (?P<rule_definition>{0} (\-i|\-o) (?P<interface>[\w\.\*\-]+).*--comment FLOW_ACCOUNTING_RULE.* -j NFLOG.*$)".format(chain) + rule_pattern = '[io]ifname "(?P<interface>[\w\.\*\-]+)".*handle (?P<handle>[\d]+)' rule_re = re.compile(rule_pattern) - for iptables_variant in ['iptables', 'ip6tables']: - # run iptables, save output and split it by lines - iptables_command = f'{iptables_variant} -t {table} -S {chain}' - tmp = cmd(iptables_command, message='Failed to get flows list') - - # parse each line and add information to list - for current_rule in tmp.splitlines(): - current_rule_parsed = rule_re.search(current_rule) - if current_rule_parsed: - rules.append({ 'interface': current_rule_parsed.groupdict()["interface"], 'iptables_variant': iptables_variant, 'table': table, 'rule_definition': current_rule_parsed.groupdict()["rule_definition"] }) + # run nftables, save output and split it by lines + nftables_command = f'nft -a list chain {table} {chain}' + tmp = cmd(nftables_command, message='Failed to get flows list') + # parse each line and add information to list + for current_rule in tmp.splitlines(): + if 'FLOW_ACCOUNTING_RULE' not in current_rule: + continue + current_rule_parsed = rule_re.search(current_rule) + if current_rule_parsed: + groups = current_rule_parsed.groupdict() + rules.append({ 'interface': groups["interface"], 'table': table, 'handle': groups["handle"] }) # return list with rules return rules -# modify iptables rules -def _iptables_config(configured_ifaces, direction): - # define list of iptables commands to modify settings - iptable_commands = [] - iptables_chain = iptables_nflog_chain - iptables_table = iptables_nflog_table +def _nftables_config(configured_ifaces, direction, length=None): + # define list of nftables commands to modify settings + nftable_commands = [] + nftables_chain = nftables_nflog_chain + nftables_table = nftables_nflog_table if direction == "egress": - iptables_chain = egress_iptables_nflog_chain - iptables_table = egress_iptables_nflog_table + nftables_chain = egress_nftables_nflog_chain + nftables_table = egress_nftables_nflog_table # prepare extended list with configured interfaces configured_ifaces_extended = [] for iface in configured_ifaces: - configured_ifaces_extended.append({ 'iface': iface, 'iptables_variant': 'iptables' }) - configured_ifaces_extended.append({ 'iface': iface, 'iptables_variant': 'ip6tables' }) + configured_ifaces_extended.append({ 'iface': iface }) - # get currently configured interfaces with iptables rules - active_nflog_rules = _iptables_get_nflog(iptables_chain, iptables_table) + # get currently configured interfaces with nftables rules + active_nflog_rules = _nftables_get_nflog(nftables_chain, nftables_table) # compare current active list with configured one and delete excessive interfaces, add missed active_nflog_ifaces = [] for rule in active_nflog_rules: - iptables = rule['iptables_variant'] interface = rule['interface'] if interface not in configured_ifaces: table = rule['table'] - rule = rule['rule_definition'] - iptable_commands.append(f'{iptables} -t {table} -D {rule}') + handle = rule['handle'] + nftable_commands.append(f'nft delete rule {table} {nftables_chain} handle {handle}') else: active_nflog_ifaces.append({ 'iface': interface, - 'iptables_variant': iptables, }) # do not create new rules for already configured interfaces @@ -141,244 +103,166 @@ def _iptables_config(configured_ifaces, direction): # create missed rules for iface_extended in configured_ifaces_extended: iface = iface_extended['iface'] - iptables = iface_extended['iptables_variant'] - iptables_op = "-i" - if direction == "egress": - iptables_op = "-o" - - rule_definition = f'{iptables_chain} {iptables_op} {iface} -m comment --comment FLOW_ACCOUNTING_RULE -j NFLOG --nflog-group 2 --nflog-size {default_captured_packet_size} --nflog-threshold 100' - iptable_commands.append(f'{iptables} -t {iptables_table} -I {rule_definition}') + iface_prefix = "o" if direction == "egress" else "i" + rule_definition = f'{iface_prefix}ifname "{iface}" counter log group 2 snaplen {length} queue-threshold 100 comment "FLOW_ACCOUNTING_RULE"' + nftable_commands.append(f'nft insert rule {nftables_table} {nftables_chain} {rule_definition}') - # change iptables - for command in iptable_commands: + # change nftables + for command in nftable_commands: cmd(command, raising=ConfigError) -def get_config(): - vc = Config() - vc.set_level('') - # Convert the VyOS config to an abstract internal representation - flow_config = { - 'flow-accounting-configured': vc.exists('system flow-accounting'), - 'buffer-size': vc.return_value('system flow-accounting buffer-size'), - 'enable-egress': _node_exists('system flow-accounting enable-egress'), - 'disable-imt': _node_exists('system flow-accounting disable-imt'), - 'syslog-facility': vc.return_value('system flow-accounting syslog-facility'), - 'interfaces': None, - 'sflow': { - 'configured': vc.exists('system flow-accounting sflow'), - 'agent-address': vc.return_value('system flow-accounting sflow agent-address'), - 'sampling-rate': vc.return_value('system flow-accounting sflow sampling-rate'), - 'servers': None - }, - 'netflow': { - 'configured': vc.exists('system flow-accounting netflow'), - 'engine-id': vc.return_value('system flow-accounting netflow engine-id'), - 'max-flows': vc.return_value('system flow-accounting netflow max-flows'), - 'sampling-rate': vc.return_value('system flow-accounting netflow sampling-rate'), - 'source-ip': vc.return_value('system flow-accounting netflow source-ip'), - 'version': vc.return_value('system flow-accounting netflow version'), - 'timeout': { - 'expint': vc.return_value('system flow-accounting netflow timeout expiry-interval'), - 'general': vc.return_value('system flow-accounting netflow timeout flow-generic'), - 'icmp': vc.return_value('system flow-accounting netflow timeout icmp'), - 'maxlife': vc.return_value('system flow-accounting netflow timeout max-active-life'), - 'tcp.fin': vc.return_value('system flow-accounting netflow timeout tcp-fin'), - 'tcp': vc.return_value('system flow-accounting netflow timeout tcp-generic'), - 'tcp.rst': vc.return_value('system flow-accounting netflow timeout tcp-rst'), - 'udp': vc.return_value('system flow-accounting netflow timeout udp') - }, - 'servers': None - } - } - - # get interfaces list - if vc.exists('system flow-accounting interface'): - flow_config['interfaces'] = vc.return_values('system flow-accounting interface') - - # get sFlow collectors list - if vc.exists('system flow-accounting sflow server'): - flow_config['sflow']['servers'] = [] - sflow_collectors = vc.list_nodes('system flow-accounting sflow server') - for collector in sflow_collectors: - port = default_sflow_server_port - if vc.return_value("system flow-accounting sflow server {} port".format(collector)): - port = vc.return_value("system flow-accounting sflow server {} port".format(collector)) - flow_config['sflow']['servers'].append({ 'address': collector, 'port': port }) - - # get NetFlow collectors list - if vc.exists('system flow-accounting netflow server'): - flow_config['netflow']['servers'] = [] - netflow_collectors = vc.list_nodes('system flow-accounting netflow server') - for collector in netflow_collectors: - port = default_netflow_server_port - if vc.return_value("system flow-accounting netflow server {} port".format(collector)): - port = vc.return_value("system flow-accounting netflow server {} port".format(collector)) - flow_config['netflow']['servers'].append({ 'address': collector, 'port': port }) - - # get sflow agent-id - if flow_config['sflow']['agent-address'] == None or flow_config['sflow']['agent-address'] == 'auto': - flow_config['sflow']['agent-address'] = _sflow_default_agentip(vc) - - # get NetFlow version - if not flow_config['netflow']['version']: - flow_config['netflow']['version'] = default_netflow_version - - # convert NetFlow engine-id format, if this is necessary - if flow_config['netflow']['engine-id'] and flow_config['netflow']['version'] == '5': - regex_filter = re.compile('^\d+$') - if regex_filter.search(flow_config['netflow']['engine-id']): - flow_config['netflow']['engine-id'] = "{}:0".format(flow_config['netflow']['engine-id']) - - # return dict with flow-accounting configuration - return flow_config - -def verify(config): - # Verify that configuration is valid - # skip all checks if flow-accounting was removed - if not config['flow-accounting-configured']: - return True +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['system', 'flow-accounting'] + if not conf.exists(base): + return None + + flow_accounting = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = defaults(base) + + # delete individual flow type default - should only be added if user uses + # this feature + for flow_type in ['sflow', 'netflow']: + if flow_type in default_values: + del default_values[flow_type] + flow_accounting = dict_merge(default_values, flow_accounting) + + for flow_type in ['sflow', 'netflow']: + if flow_type in flow_accounting: + default_values = defaults(base + [flow_type]) + # we need to merge individual server configurations + if 'server' in default_values: + del default_values['server'] + flow_accounting[flow_type] = dict_merge(default_values, flow_accounting[flow_type]) + + if 'server' in flow_accounting[flow_type]: + default_values = defaults(base + [flow_type, 'server']) + for server in flow_accounting[flow_type]['server']: + flow_accounting[flow_type]['server'][server] = dict_merge( + default_values,flow_accounting[flow_type]['server'][server]) + + return flow_accounting + +def verify(flow_config): + if not flow_config: + return None # check if at least one collector is enabled - if not (config['sflow']['configured'] or config['netflow']['configured'] or not config['disable-imt']): - raise ConfigError("You need to configure at least one sFlow or NetFlow protocol, or not set \"disable-imt\" for flow-accounting") + if 'sflow' not in flow_config and 'netflow' not in flow_config and 'disable_imt' in flow_config: + raise ConfigError('You need to configure at least sFlow or NetFlow, ' \ + 'or not set "disable-imt" for flow-accounting!') # Check if at least one interface is configured - if not config['interfaces']: - raise ConfigError("You need to configure at least one interface for flow-accounting") + if 'interface' not in flow_config: + raise ConfigError('Flow accounting requires at least one interface to ' \ + 'be configured!') # check that all configured interfaces exists in the system - for iface in config['interfaces']: - if not iface in Section.interfaces(): - # chnged from error to warning to allow adding dynamic interfaces and interface templates - # raise ConfigError("The {} interface is not presented in the system".format(iface)) - print("Warning: the {} interface is not presented in the system".format(iface)) + for interface in flow_config['interface']: + if interface not in Section.interfaces(): + # Changed from error to warning to allow adding dynamic interfaces + # and interface templates + print(f'Warning: Interface "{interface}" is not presented in the system') # check sFlow configuration - if config['sflow']['configured']: - # check if at least one sFlow collector is configured if sFlow configuration is presented - if not config['sflow']['servers']: - raise ConfigError("You need to configure at least one sFlow server") + if 'sflow' in flow_config: + # check if at least one sFlow collector is configured + if 'server' not in flow_config['sflow']: + raise ConfigError('You need to configure at least one sFlow server!') # check that all sFlow collectors use the same IP protocol version sflow_collector_ipver = None - for sflow_collector in config['sflow']['servers']: + for server in flow_config['sflow']['server']: if sflow_collector_ipver: - if sflow_collector_ipver != ip_address(sflow_collector['address']).version: + if sflow_collector_ipver != ip_address(server).version: raise ConfigError("All sFlow servers must use the same IP protocol") else: - sflow_collector_ipver = ip_address(sflow_collector['address']).version - + sflow_collector_ipver = ip_address(server).version # check agent-id for sFlow: we should avoid mixing IPv4 agent-id with IPv6 collectors and vice-versa - for sflow_collector in config['sflow']['servers']: - if ip_address(sflow_collector['address']).version != ip_address(config['sflow']['agent-address']).version: - raise ConfigError("Different IP address versions cannot be mixed in \"sflow agent-address\" and \"sflow server\". You need to set manually the same IP version for \"agent-address\" as for all sFlow servers") - - # check if configured sFlow agent-id exist in the system - agent_id_presented = None - for iface in Section.interfaces(): - for address in Interface(iface).get_addr(): - # check an IP, if this is not loopback - regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$') - if regex_filter.search(address): - if regex_filter.search(address).group('ipaddr') == config['sflow']['agent-address']: - agent_id_presented = True - break - if not agent_id_presented: - raise ConfigError("Your \"sflow agent-address\" does not exist in the system") + for server in flow_config['sflow']['server']: + if 'agent_address' in flow_config['sflow']: + if ip_address(server).version != ip_address(flow_config['sflow']['agent_address']).version: + raise ConfigError('IPv4 and IPv6 addresses can not be mixed in "sflow agent-address" and "sflow '\ + 'server". You need to set the same IP version for both "agent-address" and '\ + 'all sFlow servers') + + if 'agent_address' in flow_config['sflow']: + tmp = flow_config['sflow']['agent_address'] + if not is_addr_assigned(tmp): + print(f'Warning: Configured "sflow agent-address {tmp}" does not exist in the system!') # check NetFlow configuration - if config['netflow']['configured']: + if 'netflow' in flow_config: # check if at least one NetFlow collector is configured if NetFlow configuration is presented - if not config['netflow']['servers']: - raise ConfigError("You need to configure at least one NetFlow server") - - # check if configured netflow source-ip exist in the system - if config['netflow']['source-ip']: - source_ip_presented = None - for iface in Section.interfaces(): - for address in Interface(iface).get_addr(): - # check an IP - regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$') - if regex_filter.search(address): - if regex_filter.search(address).group('ipaddr') == config['netflow']['source-ip']: - source_ip_presented = True - break - if not source_ip_presented: - raise ConfigError("Your \"netflow source-ip\" does not exist in the system") - - # check if engine-id compatible with selected protocol version - if config['netflow']['engine-id']: + if 'server' not in flow_config['netflow']: + raise ConfigError('You need to configure at least one NetFlow server!') + + # Check if configured netflow source-address exist in the system + if 'source_address' in flow_config['netflow']: + if not is_addr_assigned(flow_config['netflow']['source_address']): + tmp = flow_config['netflow']['source_address'] + print(f'Warning: Configured "netflow source-address {tmp}" does not exist on the system!') + + # Check if engine-id compatible with selected protocol version + if 'engine_id' in flow_config['netflow']: v5_filter = '^(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]):(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$' v9v10_filter = '^(\d|[1-9]\d{1,8}|[1-3]\d{9}|4[01]\d{8}|42[0-8]\d{7}|429[0-3]\d{6}|4294[0-8]\d{5}|42949[0-5]\d{4}|429496[0-6]\d{3}|4294967[01]\d{2}|42949672[0-8]\d|429496729[0-5])$' - if config['netflow']['version'] == '5': + engine_id = flow_config['netflow']['engine_id'] + version = flow_config['netflow']['version'] + + if flow_config['netflow']['version'] == '5': regex_filter = re.compile(v5_filter) - if not regex_filter.search(config['netflow']['engine-id']): - raise ConfigError("You cannot use NetFlow engine-id {} together with NetFlow protocol version {}".format(config['netflow']['engine-id'], config['netflow']['version'])) + if not regex_filter.search(engine_id): + raise ConfigError(f'You cannot use NetFlow engine-id "{engine_id}" '\ + f'together with NetFlow protocol version "{version}"!') else: regex_filter = re.compile(v9v10_filter) - if not regex_filter.search(config['netflow']['engine-id']): - raise ConfigError("You cannot use NetFlow engine-id {} together with NetFlow protocol version {}".format(config['netflow']['engine-id'], config['netflow']['version'])) + if not regex_filter.search(flow_config['netflow']['engine_id']): + raise ConfigError(f'Can not use NetFlow engine-id "{engine_id}" together '\ + f'with NetFlow protocol version "{version}"!') # return True if all checks were passed return True -def generate(config): - # skip all checks if flow-accounting was removed - if not config['flow-accounting-configured']: - return True +def generate(flow_config): + if not flow_config: + return None - # Calculate all necessary values - if config['buffer-size']: - # circular queue size - config['plugin_pipe_size'] = int(config['buffer-size']) * 1024**2 - else: - config['plugin_pipe_size'] = default_plugin_pipe_size * 1024**2 - # transfer buffer size - # recommended value from pmacct developers 1/1000 of pipe size - config['plugin_buffer_size'] = int(config['plugin_pipe_size'] / 1000) - - # Prepare a timeouts string - timeout_string = '' - for timeout_type, timeout_value in config['netflow']['timeout'].items(): - if timeout_value: - if timeout_string == '': - timeout_string = "{}{}={}".format(timeout_string, timeout_type, timeout_value) - else: - timeout_string = "{}:{}={}".format(timeout_string, timeout_type, timeout_value) - config['netflow']['timeout_string'] = timeout_string + render(uacctd_conf_path, 'netflow/uacctd.conf.tmpl', flow_config) - render(uacctd_conf_path, 'netflow/uacctd.conf.tmpl', { - 'templatecfg': config, - 'snaplen': default_captured_packet_size, - }) - - -def apply(config): - # define variables - command = None +def apply(flow_config): + action = 'restart' # Check if flow-accounting was removed and define command - if not config['flow-accounting-configured']: - command = 'systemctl stop uacctd.service' - else: - command = 'systemctl restart uacctd.service' + if not flow_config: + _nftables_config([], 'ingress') + _nftables_config([], 'egress') - # run command to start or stop flow-accounting - cmd(command, raising=ConfigError, message='Failed to start/stop flow-accounting') + # Stop flow-accounting daemon and remove configuration file + cmd('systemctl stop uacctd.service') + if os.path.exists(uacctd_conf_path): + os.unlink(uacctd_conf_path) + return - # configure iptables rules for defined interfaces - if config['interfaces']: - _iptables_config(config['interfaces'], 'ingress') + # Start/reload flow-accounting daemon + cmd(f'systemctl restart uacctd.service') + + # configure nftables rules for defined interfaces + if 'interface' in flow_config: + _nftables_config(flow_config['interface'], 'ingress', flow_config['packet_length']) # configure egress the same way if configured otherwise remove it - if config['enable-egress']: - _iptables_config(config['interfaces'], 'egress') + if 'enable_egress' in flow_config: + _nftables_config(flow_config['interface'], 'egress', flow_config['packet_length']) else: - _iptables_config([], 'egress') - else: - _iptables_config([], 'ingress') - _iptables_config([], 'egress') + _nftables_config([], 'egress') if __name__ == '__main__': try: diff --git a/src/conf_mode/host_name.py b/src/conf_mode/host_name.py index a7135911d..87bad0dc6 100755 --- a/src/conf_mode/host_name.py +++ b/src/conf_mode/host_name.py @@ -79,7 +79,7 @@ def get_config(config=None): # system static-host-mapping for hn in conf.list_nodes(['system', 'static-host-mapping', 'host-name']): hosts['static_host_mapping'][hn] = {} - hosts['static_host_mapping'][hn]['address'] = conf.return_value(['system', 'static-host-mapping', 'host-name', hn, 'inet']) + hosts['static_host_mapping'][hn]['address'] = conf.return_values(['system', 'static-host-mapping', 'host-name', hn, 'inet']) hosts['static_host_mapping'][hn]['aliases'] = conf.return_values(['system', 'static-host-mapping', 'host-name', hn, 'alias']) return hosts diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py index 7e4b117c8..b5f5e919f 100755 --- a/src/conf_mode/http-api.py +++ b/src/conf_mode/http-api.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019 VyOS maintainers and contributors +# Copyright (C) 2019-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -13,25 +13,26 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -# -# import sys import os import json -import time + +from time import sleep from copy import deepcopy import vyos.defaults + from vyos.config import Config -from vyos import ConfigError +from vyos.template import render from vyos.util import cmd from vyos.util import call - +from vyos import ConfigError from vyos import airbag airbag.enable() -config_file = '/etc/vyos/http-api.conf' +api_conf_file = '/etc/vyos/http-api.conf' +systemd_service = '/run/systemd/system/vyos-http-api.service' vyos_conf_scripts_dir=vyos.defaults.directories['conf_mode'] @@ -49,21 +50,35 @@ def get_config(config=None): else: conf = Config() - if not conf.exists('service https api'): + base = ['service', 'https', 'api'] + if not conf.exists(base): return None - else: - conf.set_level('service https api') + # Do we run inside a VRF context? + vrf_path = ['service', 'https', 'vrf'] + if conf.exists(vrf_path): + http_api['vrf'] = conf.return_value(vrf_path) + + conf.set_level('service https api') if conf.exists('strict'): - http_api['strict'] = 'true' + http_api['strict'] = True if conf.exists('debug'): - http_api['debug'] = 'true' + http_api['debug'] = True + + if conf.exists('socket'): + http_api['socket'] = True if conf.exists('port'): port = conf.return_value('port') http_api['port'] = port + if conf.exists('cors'): + http_api['cors'] = {} + if conf.exists('cors allow-origin'): + origins = conf.return_values('cors allow-origin') + http_api['cors']['origins'] = origins[:] + if conf.exists('keys'): for name in conf.list_nodes('keys id'): if conf.exists('keys id {0} key'.format(name)): @@ -83,24 +98,31 @@ def verify(http_api): def generate(http_api): if http_api is None: + if os.path.exists(systemd_service): + os.unlink(systemd_service) return None if not os.path.exists('/etc/vyos'): os.mkdir('/etc/vyos') - with open(config_file, 'w') as f: + with open(api_conf_file, 'w') as f: json.dump(http_api, f, indent=2) + render(systemd_service, 'https/vyos-http-api.service.tmpl', http_api) return None def apply(http_api): + # Reload systemd manager configuration + call('systemctl daemon-reload') + service_name = 'vyos-http-api.service' + if http_api is not None: - call('systemctl restart vyos-http-api.service') + call(f'systemctl restart {service_name}') else: - call('systemctl stop vyos-http-api.service') + call(f'systemctl stop {service_name}') # Let uvicorn settle before restarting Nginx - time.sleep(2) + sleep(1) cmd(f'{vyos_conf_scripts_dir}/https.py', raising=ConfigError) diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py index be4380462..37fa36797 100755 --- a/src/conf_mode/https.py +++ b/src/conf_mode/https.py @@ -23,16 +23,19 @@ import vyos.defaults import vyos.certbot_util from vyos.config import Config +from vyos.configverify import verify_vrf from vyos import ConfigError from vyos.pki import wrap_certificate from vyos.pki import wrap_private_key from vyos.template import render from vyos.util import call +from vyos.util import write_file from vyos import airbag airbag.enable() config_file = '/etc/nginx/sites-available/default' +systemd_override = r'/etc/systemd/system/nginx.service.d/override.conf' cert_dir = '/etc/ssl/certs' key_dir = '/etc/ssl/private' certbot_dir = vyos.defaults.directories['certbot'] @@ -58,10 +61,11 @@ def get_config(config=None): else: conf = Config() - if not conf.exists('service https'): + base = ['service', 'https'] + if not conf.exists(base): return None - https = conf.get_config_dict('service https', get_first_key=True) + https = conf.get_config_dict(base, get_first_key=True) if https: https['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'), @@ -102,6 +106,8 @@ def verify(https): if not domains_found: raise ConfigError("At least one 'virtual-host <id> server-name' " "matching the 'certbot domain-name' is required.") + + verify_vrf(https) return None def generate(https): @@ -139,15 +145,17 @@ def generate(https): cert_path = os.path.join(cert_dir, f'{cert_name}.pem') key_path = os.path.join(key_dir, f'{cert_name}.pem') - with open(cert_path, 'w') as f: - f.write(wrap_certificate(pki_cert['certificate'])) + server_cert = str(wrap_certificate(pki_cert['certificate'])) + if 'ca-certificate' in cert_dict: + ca_cert = cert_dict['ca-certificate'] + server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate'])) - with open(key_path, 'w') as f: - f.write(wrap_private_key(pki_cert['private']['key'])) + write_file(cert_path, server_cert) + write_file(key_path, wrap_private_key(pki_cert['private']['key'])) vyos_cert_data = { - "crt": cert_path, - "key": key_path + 'crt': cert_path, + 'key': key_path } for block in server_block_list: @@ -184,6 +192,8 @@ def generate(https): vhosts = https.get('api-restrict', {}).get('virtual-host', []) if vhosts: api_data['vhost'] = vhosts[:] + if 'socket' in list(api_settings): + api_data['socket'] = True if api_data: vhost_list = api_data.get('vhost', []) @@ -205,10 +215,12 @@ def generate(https): } render(config_file, 'https/nginx.default.tmpl', data) - + render(systemd_override, 'https/override.conf.tmpl', https) return None def apply(https): + # Reload systemd manager configuration + call('systemctl daemon-reload') if https is not None: call('systemctl restart nginx.service') else: diff --git a/src/conf_mode/interfaces-openvpn.py b/src/conf_mode/interfaces-openvpn.py index 02b7f83bf..3b8fae710 100755 --- a/src/conf_mode/interfaces-openvpn.py +++ b/src/conf_mode/interfaces-openvpn.py @@ -16,6 +16,7 @@ import os import re +import tempfile from cryptography.hazmat.primitives.asymmetric import ec from glob import glob @@ -26,6 +27,7 @@ from ipaddress import IPv6Address from ipaddress import IPv6Network from ipaddress import summarize_address_range from netifaces import interfaces +from secrets import SystemRandom from shutil import rmtree from vyos.config import Config @@ -48,6 +50,7 @@ from vyos.util import chown from vyos.util import dict_search from vyos.util import dict_search_args from vyos.util import makedir +from vyos.util import read_file from vyos.util import write_file from vyos.validate import is_addr_assigned @@ -60,6 +63,10 @@ group = 'openvpn' cfg_dir = '/run/openvpn' cfg_file = '/run/openvpn/{ifname}.conf' +otp_path = '/config/auth/openvpn' +otp_file = '/config/auth/openvpn/{ifname}-otp-secrets' +secret_chars = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567') +service_file = '/run/systemd/system/openvpn@{ifname}.service.d/20-override.conf' def get_config(config=None): """ @@ -80,7 +87,20 @@ def get_config(config=None): if 'deleted' not in openvpn: openvpn['pki'] = tmp_pki + # We have to get the dict using 'get_config_dict' instead of 'get_interface_dict' + # as 'get_interface_dict' merges the defaults in, so we can not check for defaults in there. + tmp = conf.get_config_dict(base + [openvpn['ifname']], get_first_key=True) + + # We have to cleanup the config dict, as default values could enable features + # which are not explicitly enabled on the CLI. Example: server mfa totp + # originate comes with defaults, which will enable the + # totp plugin, even when not set via CLI so we + # need to check this first and drop those keys + if dict_search('server.mfa.totp', tmp) == None: + del openvpn['server']['mfa'] + openvpn['auth_user_pass_file'] = '/run/openvpn/{ifname}.pw'.format(**openvpn) + return openvpn def is_ec_private_key(pki, cert_name): @@ -134,7 +154,7 @@ def verify_pki(openvpn): if tls['certificate'] not in pki['certificate']: raise ConfigError(f'Invalid certificate on openvpn interface {interface}') - if dict_search_args(pki, 'certificate', tls['certificate'], 'private', 'password_protected'): + if dict_search_args(pki, 'certificate', tls['certificate'], 'private', 'password_protected') is not None: raise ConfigError(f'Cannot use encrypted private key on openvpn interface {interface}') if mode == 'server' and 'dh_params' not in tls and not is_ec_private_key(pki, tls['certificate']): @@ -169,6 +189,10 @@ def verify_pki(openvpn): def verify(openvpn): if 'deleted' in openvpn: + # remove totp secrets file if totp is not configured + if os.path.isfile(otp_file.format(**openvpn)): + os.remove(otp_file.format(**openvpn)) + verify_bridge_delete(openvpn) return None @@ -309,10 +333,10 @@ def verify(openvpn): if 'is_bridge_member' not in openvpn: raise ConfigError('Must specify "server subnet" or add interface to bridge in server mode') - - for client in (dict_search('client', openvpn) or []): - if len(client['ip']) > 1 or len(client['ipv6_ip']) > 1: - raise ConfigError(f'Server client "{client["name"]}": cannot specify more than 1 IPv4 and 1 IPv6 IP') + if hasattr(dict_search('server.client', openvpn), '__iter__'): + for client_k, client_v in dict_search('server.client', openvpn).items(): + if (client_v.get('ip') and len(client_v['ip']) > 1) or (client_v.get('ipv6_ip') and len(client_v['ipv6_ip']) > 1): + raise ConfigError(f'Server client "{client_k}": cannot specify more than 1 IPv4 and 1 IPv6 IP') if dict_search('server.client_ip_pool', openvpn): if not (dict_search('server.client_ip_pool.start', openvpn) and dict_search('server.client_ip_pool.stop', openvpn)): @@ -360,6 +384,29 @@ def verify(openvpn): if IPv6Address(client['ipv6_ip'][0]) in v6PoolNet: print(f'Warning: Client "{client["name"]}" IP {client["ipv6_ip"][0]} is in server IP pool, it is not reserved for this client.') + # add mfa users to the file the mfa plugin uses + if dict_search('server.mfa.totp', openvpn): + user_data = '' + if not os.path.isfile(otp_file.format(**openvpn)): + write_file(otp_file.format(**openvpn), user_data, + user=user, group=group, mode=0o644) + + ovpn_users = read_file(otp_file.format(**openvpn)) + for client in (dict_search('server.client', openvpn) or []): + exists = None + for ovpn_user in ovpn_users.split('\n'): + if re.search('^' + client + ' ', ovpn_user): + user_data += f'{ovpn_user}\n' + exists = 'true' + + if not exists: + random = SystemRandom() + totp_secret = ''.join(random.choice(secret_chars) for _ in range(16)) + user_data += f'{client} otp totp:sha1:base32:{totp_secret}::xxx *\n' + + write_file(otp_file.format(**openvpn), user_data, + user=user, group=group, mode=0o644) + else: # checks for both client and site-to-site go here if dict_search('server.reject_unconfigured_clients', openvpn): @@ -525,6 +572,7 @@ def generate_pki_files(openvpn): def generate(openvpn): interface = openvpn['ifname'] directory = os.path.dirname(cfg_file.format(**openvpn)) + plugin_dir = '/usr/lib/openvpn' # create base config directory on demand makedir(directory, user, group) # enforce proper permissions on /run/openvpn @@ -536,6 +584,11 @@ def generate(openvpn): if os.path.isdir(ccd_dir): rmtree(ccd_dir, ignore_errors=True) + # Remove systemd directories with overrides + service_dir = os.path.dirname(service_file.format(**openvpn)) + if os.path.isdir(service_dir): + rmtree(service_dir, ignore_errors=True) + if 'deleted' in openvpn or 'disable' in openvpn: return None @@ -571,14 +624,20 @@ def generate(openvpn): render(cfg_file.format(**openvpn), 'openvpn/server.conf.tmpl', openvpn, formater=lambda _: _.replace(""", '"'), user=user, group=group) + # Render 20-override.conf for OpenVPN service + render(service_file.format(**openvpn), 'openvpn/service-override.conf.tmpl', openvpn, + formater=lambda _: _.replace(""", '"'), user=user, group=group) + # Reload systemd services config to apply an override + call(f'systemctl daemon-reload') + return None def apply(openvpn): interface = openvpn['ifname'] - call(f'systemctl stop openvpn@{interface}.service') # Do some cleanup when OpenVPN is disabled/deleted if 'deleted' in openvpn or 'disable' in openvpn: + call(f'systemctl stop openvpn@{interface}.service') for cleanup_file in glob(f'/run/openvpn/{interface}.*'): if os.path.isfile(cleanup_file): os.unlink(cleanup_file) @@ -590,7 +649,7 @@ def apply(openvpn): # No matching OpenVPN process running - maybe it got killed or none # existed - nevertheless, spawn new OpenVPN process - call(f'systemctl start openvpn@{interface}.service') + call(f'systemctl reload-or-restart openvpn@{interface}.service') o = VTunIf(**openvpn) o.update(openvpn) diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces-tunnel.py index ef385d2e7..30f57ec0c 100755 --- a/src/conf_mode/interfaces-tunnel.py +++ b/src/conf_mode/interfaces-tunnel.py @@ -98,7 +98,7 @@ def verify(tunnel): # If tunnel source address any and key not set if tunnel['encapsulation'] in ['gre'] and \ - tunnel['source_address'] == '0.0.0.0' and \ + dict_search('source_address', tunnel) == '0.0.0.0' and \ dict_search('parameters.ip.key', tunnel) == None: raise ConfigError('Tunnel parameters ip key must be set!') @@ -107,19 +107,22 @@ def verify(tunnel): # Check pairs tunnel source-address/encapsulation/key with exists tunnels. # Prevent the same key for 2 tunnels with same source-address/encap. T2920 for tunnel_if in Section.interfaces('tunnel'): + # It makes no sense to run the test for re-used GRE keys on our + # own interface we are currently working on + if tunnel['ifname'] == tunnel_if: + continue tunnel_cfg = get_interface_config(tunnel_if) - exist_encap = tunnel_cfg['linkinfo']['info_kind'] - exist_source_address = tunnel_cfg['address'] - exist_key = tunnel_cfg['linkinfo']['info_data']['ikey'] - new_source_address = tunnel['source_address'] + # no match on encapsulation - bail out + if dict_search('linkinfo.info_kind', tunnel_cfg) != tunnel['encapsulation']: + continue + new_source_address = dict_search('source_address', tunnel) # Convert tunnel key to ip key, format "ip -j link show" # 1 => 0.0.0.1, 999 => 0.0.3.231 - orig_new_key = int(tunnel['parameters']['ip']['key']) - new_key = IPv4Address(orig_new_key) + orig_new_key = dict_search('parameters.ip.key', tunnel) + new_key = IPv4Address(int(orig_new_key)) new_key = str(new_key) - if tunnel['encapsulation'] == exist_encap and \ - new_source_address == exist_source_address and \ - new_key == exist_key: + if dict_search('address', tunnel_cfg) == new_source_address and \ + dict_search('linkinfo.info_data.ikey', tunnel_cfg) == new_key: raise ConfigError(f'Key "{orig_new_key}" for source-address "{new_source_address}" ' \ f'is already used for tunnel "{tunnel_if}"!') diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces-vxlan.py index 804f2d14f..1f097c4e3 100755 --- a/src/conf_mode/interfaces-vxlan.py +++ b/src/conf_mode/interfaces-vxlan.py @@ -44,6 +44,20 @@ def get_config(config=None): base = ['interfaces', 'vxlan'] vxlan = get_interface_dict(conf, base) + # We need to verify that no other VXLAN tunnel is configured when external + # mode is in use - Linux Kernel limitation + conf.set_level(base) + vxlan['other_tunnels'] = conf.get_config_dict([], key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # This if-clause is just to be sure - it will always evaluate to true + ifname = vxlan['ifname'] + if ifname in vxlan['other_tunnels']: + del vxlan['other_tunnels'][ifname] + if len(vxlan['other_tunnels']) == 0: + del vxlan['other_tunnels'] + return vxlan def verify(vxlan): @@ -63,8 +77,21 @@ def verify(vxlan): if not any(tmp in ['group', 'remote', 'source_address'] for tmp in vxlan): raise ConfigError('Group, remote or source-address must be configured') - if 'vni' not in vxlan: - raise ConfigError('Must configure VNI for VXLAN') + if 'vni' not in vxlan and 'external' not in vxlan: + raise ConfigError( + 'Must either configure VXLAN "vni" or use "external" CLI option!') + + if {'external', 'vni'} <= set(vxlan): + raise ConfigError('Can not specify both "external" and "VNI"!') + + if {'external', 'other_tunnels'} <= set(vxlan): + other_tunnels = ', '.join(vxlan['other_tunnels']) + raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\ + f'CLI option is used. Additional tunnels: {other_tunnels}') + + if 'gpe' in vxlan and 'external' not in vxlan: + raise ConfigError(f'VXLAN-GPE is only supported when "external" '\ + f'CLI option is used.') if 'source_interface' in vxlan: # VXLAN adds at least an overhead of 50 byte - we need to check the diff --git a/src/conf_mode/interfaces-wireless.py b/src/conf_mode/interfaces-wireless.py index 7b3de6e8a..af35b5f03 100755 --- a/src/conf_mode/interfaces-wireless.py +++ b/src/conf_mode/interfaces-wireless.py @@ -82,11 +82,12 @@ def get_config(config=None): tmp = conf.get_config_dict([], key_mangling=('-', '_'), get_first_key=True) if not (dict_search('security.wpa.passphrase', tmp) or dict_search('security.wpa.radius', tmp)): - del wifi['security']['wpa'] + if 'deleted' not in wifi: + del wifi['security']['wpa'] # defaults include RADIUS server specifics per TAG node which need to be # added to individual RADIUS servers instead - so we can simply delete them - if dict_search('security.wpa.radius.server.port', wifi): + if dict_search('security.wpa.radius.server.port', wifi) != None: del wifi['security']['wpa']['radius']['server']['port'] if not len(wifi['security']['wpa']['radius']['server']): del wifi['security']['wpa']['radius'] diff --git a/src/conf_mode/interfaces-wwan.py b/src/conf_mode/interfaces-wwan.py index faa5eb628..a4b033374 100755 --- a/src/conf_mode/interfaces-wwan.py +++ b/src/conf_mode/interfaces-wwan.py @@ -17,6 +17,7 @@ import os from sys import exit +from time import sleep from vyos.config import Config from vyos.configdict import get_interface_dict @@ -25,11 +26,18 @@ from vyos.configverify import verify_interface_exists from vyos.configverify import verify_vrf from vyos.ifconfig import WWANIf from vyos.util import cmd +from vyos.util import call from vyos.util import dict_search +from vyos.util import DEVNULL +from vyos.util import is_systemd_service_active +from vyos.util import write_file from vyos import ConfigError from vyos import airbag airbag.enable() +service_name = 'ModemManager.service' +cron_script = '/etc/cron.d/wwan' + def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the @@ -42,6 +50,20 @@ def get_config(config=None): base = ['interfaces', 'wwan'] wwan = get_interface_dict(conf, base) + # We need to know the amount of other WWAN interfaces as ModemManager needs + # to be started or stopped. + conf.set_level(base) + wwan['other_interfaces'] = conf.get_config_dict([], key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # This if-clause is just to be sure - it will always evaluate to true + ifname = wwan['ifname'] + if ifname in wwan['other_interfaces']: + del wwan['other_interfaces'][ifname] + if len(wwan['other_interfaces']) == 0: + del wwan['other_interfaces'] + return wwan def verify(wwan): @@ -59,9 +81,26 @@ def verify(wwan): return None def generate(wwan): + if 'deleted' in wwan: + return None + + if not os.path.exists(cron_script): + write_file(cron_script, '*/5 * * * * root /usr/libexec/vyos/vyos-check-wwan.py') return None def apply(wwan): + if not is_systemd_service_active(service_name): + cmd(f'systemctl start {service_name}') + + counter = 100 + # Wait until a modem is detected and then we can continue + while counter > 0: + counter -= 1 + tmp = cmd('mmcli -L') + if tmp != 'No modems were found': + break + sleep(0.250) + # we only need the modem number. wwan0 -> 0, wwan1 -> 1 modem = wwan['ifname'].lstrip('wwan') base_cmd = f'mmcli --modem {modem}' @@ -71,6 +110,15 @@ def apply(wwan): w = WWANIf(wwan['ifname']) if 'deleted' in wwan or 'disable' in wwan: w.remove() + + # There are no other WWAN interfaces - stop the daemon + if 'other_interfaces' not in wwan: + cmd(f'systemctl stop {service_name}') + # Clean CRON helper script which is used for to re-connect when + # RF signal is lost + if os.path.exists(cron_script): + os.unlink(cron_script) + return None ip_type = 'ipv4' @@ -88,9 +136,12 @@ def apply(wwan): options += ',user={user},password={password}'.format(**wwan['authentication']) command = f'{base_cmd} --simple-connect="{options}"' - cmd(command) + call(command, stdout=DEVNULL) w.update(wwan) + if 'other_interfaces' not in wwan and 'deleted' in wwan: + cmd(f'systemctl start {service_name}') + return None if __name__ == '__main__': diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py index 59939d0fb..96f8f6fb6 100755 --- a/src/conf_mode/nat.py +++ b/src/conf_mode/nat.py @@ -42,7 +42,7 @@ if LooseVersion(kernel_version()) > LooseVersion('5.1'): else: k_mod = ['nft_nat', 'nft_chain_nat_ipv4'] -iptables_nat_config = '/tmp/vyos-nat-rules.nft' +nftables_nat_config = '/tmp/vyos-nat-rules.nft' def get_handler(json, chain, target): """ Get nftable rule handler number of given chain/target combination. @@ -93,7 +93,6 @@ def get_config(config=None): nat[direction]['rule'][rule] = dict_merge(default_values, nat[direction]['rule'][rule]) - # read in current nftable (once) for further processing tmp = cmd('nft -j list table raw') nftable_json = json.loads(tmp) @@ -106,9 +105,9 @@ def get_config(config=None): nat['helper_functions'] = 'remove' # Retrieve current table handler positions - nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_HELPER') + nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER') nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK') - nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_HELPER') + nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_HELPER') nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK') nat['deleted'] = '' return nat @@ -119,10 +118,10 @@ def get_config(config=None): nat['helper_functions'] = 'add' # Retrieve current table handler positions - nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_IGNORE') - nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_PREROUTING_HOOK') - nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_IGNORE') - nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_OUTPUT_HOOK') + nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_IGNORE') + nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_PREROUTING_HOOK') + nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_IGNORE') + nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_OUTPUT_HOOK') return nat @@ -180,14 +179,14 @@ def verify(nat): return None def generate(nat): - render(iptables_nat_config, 'firewall/nftables-nat.tmpl', nat, + render(nftables_nat_config, 'firewall/nftables-nat.tmpl', nat, permission=0o755) return None def apply(nat): - cmd(f'{iptables_nat_config}') - if os.path.isfile(iptables_nat_config): - os.unlink(iptables_nat_config) + cmd(f'{nftables_nat_config}') + if os.path.isfile(nftables_nat_config): + os.unlink(nftables_nat_config) return None diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py index f8bc073bb..8bf2e8073 100755 --- a/src/conf_mode/nat66.py +++ b/src/conf_mode/nat66.py @@ -35,7 +35,7 @@ airbag.enable() k_mod = ['nft_nat', 'nft_chain_nat'] -iptables_nat_config = '/tmp/vyos-nat66-rules.nft' +nftables_nat66_config = '/tmp/vyos-nat66-rules.nft' ndppd_config = '/run/ndppd/ndppd.conf' def get_handler(json, chain, target): @@ -79,9 +79,9 @@ def get_config(config=None): if not conf.exists(base): nat['helper_functions'] = 'remove' - nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_HELPER') + nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER') nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK') - nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_HELPER') + nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_HELPER') nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK') nat['deleted'] = '' return nat @@ -92,10 +92,10 @@ def get_config(config=None): nat['helper_functions'] = 'add' # Retrieve current table handler positions - nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_IGNORE') - nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_PREROUTING_HOOK') - nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_IGNORE') - nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_OUTPUT_HOOK') + nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_IGNORE') + nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_PREROUTING_HOOK') + nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_IGNORE') + nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_OUTPUT_HOOK') else: nat['helper_functions'] = 'has' @@ -117,7 +117,7 @@ def verify(nat): raise ConfigError(f'{err_msg} outbound-interface not specified') if config['outbound_interface'] not in interfaces(): - print(f'WARNING: rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system') + raise ConfigError(f'WARNING: rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system') addr = dict_search('translation.address', config) if addr != None: @@ -145,22 +145,22 @@ def verify(nat): return None def generate(nat): - render(iptables_nat_config, 'firewall/nftables-nat66.tmpl', nat, permission=0o755) + render(nftables_nat66_config, 'firewall/nftables-nat66.tmpl', nat, permission=0o755) render(ndppd_config, 'ndppd/ndppd.conf.tmpl', nat, permission=0o755) return None def apply(nat): if not nat: return None - cmd(f'{iptables_nat_config}') + cmd(f'{nftables_nat66_config}') if 'deleted' in nat or not dict_search('source.rule', nat): cmd('systemctl stop ndppd') if os.path.isfile(ndppd_config): os.unlink(ndppd_config) else: cmd('systemctl restart ndppd') - if os.path.isfile(iptables_nat_config): - os.unlink(iptables_nat_config) + if os.path.isfile(nftables_nat66_config): + os.unlink(nftables_nat66_config) return None diff --git a/src/conf_mode/netns.py b/src/conf_mode/netns.py new file mode 100755 index 000000000..0924eb616 --- /dev/null +++ b/src/conf_mode/netns.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from sys import exit +from tempfile import NamedTemporaryFile + +from vyos.config import Config +from vyos.configdict import node_changed +from vyos.ifconfig import Interface +from vyos.util import call +from vyos.util import dict_search +from vyos.util import get_interface_config +from vyos import ConfigError +from vyos import airbag +airbag.enable() + + +def netns_interfaces(c, match): + """ + get NETNS bound interfaces + """ + matched = [] + old_level = c.get_level() + c.set_level(['interfaces']) + section = c.get_config_dict([], get_first_key=True) + for type in section: + interfaces = section[type] + for name in interfaces: + interface = interfaces[name] + if 'netns' in interface: + v = interface.get('netns', '') + if v == match: + matched.append(name) + + c.set_level(old_level) + return matched + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + base = ['netns'] + netns = conf.get_config_dict(base, get_first_key=True, + no_tag_node_value_mangle=True) + + # determine which NETNS has been removed + for name in node_changed(conf, base + ['name']): + if 'netns_remove' not in netns: + netns.update({'netns_remove' : {}}) + + netns['netns_remove'][name] = {} + # get NETNS bound interfaces + interfaces = netns_interfaces(conf, name) + if interfaces: netns['netns_remove'][name]['interface'] = interfaces + + return netns + +def verify(netns): + # ensure NETNS is not assigned to any interface + if 'netns_remove' in netns: + for name, config in netns['netns_remove'].items(): + if 'interface' in config: + raise ConfigError(f'Can not remove NETNS "{name}", it still has '\ + f'member interfaces!') + + if 'name' in netns: + for name, config in netns['name'].items(): + print(name) + + return None + + +def generate(netns): + if not netns: + return None + + return None + + +def apply(netns): + + for tmp in (dict_search('netns_remove', netns) or []): + if os.path.isfile(f'/run/netns/{tmp}'): + call(f'ip netns del {tmp}') + + if 'name' in netns: + for name, config in netns['name'].items(): + if not os.path.isfile(f'/run/netns/{name}'): + call(f'ip netns add {name}') + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/policy-route-interface.py b/src/conf_mode/policy-route-interface.py new file mode 100755 index 000000000..e81135a74 --- /dev/null +++ b/src/conf_mode/policy-route-interface.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import re + +from sys import argv +from sys import exit + +from vyos.config import Config +from vyos.ifconfig import Section +from vyos.template import render +from vyos.util import cmd +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + ifname = argv[1] + ifpath = Section.get_config_path(ifname) + if_policy_path = f'interfaces {ifpath} policy' + + if_policy = conf.get_config_dict(if_policy_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + if_policy['ifname'] = ifname + if_policy['policy'] = conf.get_config_dict(['policy'], key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + return if_policy + +def verify(if_policy): + # bail out early - looks like removal from running config + if not if_policy: + return None + + for route in ['route', 'ipv6_route']: + if route in if_policy: + if route not in if_policy['policy']: + raise ConfigError('Policy route not configured') + + route_name = if_policy[route] + + if route_name not in if_policy['policy'][route]: + raise ConfigError(f'Invalid policy route name "{name}"') + + return None + +def generate(if_policy): + return None + +def cleanup_rule(table, chain, ifname, new_name=None): + results = cmd(f'nft -a list chain {table} {chain}').split("\n") + retval = None + for line in results: + if f'oifname "{ifname}"' in line: + if new_name and f'jump {new_name}' in line: + # new_name is used to clear rules for any previously referenced chains + # returns true when rule exists and doesn't need to be created + retval = True + continue + + handle_search = re.search('handle (\d+)', line) + if handle_search: + cmd(f'nft delete rule {table} {chain} handle {handle_search[1]}') + return retval + +def apply(if_policy): + ifname = if_policy['ifname'] + + route_chain = 'VYOS_PBR_PREROUTING' + ipv6_route_chain = 'VYOS_PBR6_PREROUTING' + + if 'route' in if_policy: + name = 'VYOS_PBR_' + if_policy['route'] + rule_exists = cleanup_rule('ip mangle', route_chain, ifname, name) + + if not rule_exists: + cmd(f'nft insert rule ip mangle {route_chain} iifname {ifname} counter jump {name}') + else: + cleanup_rule('ip mangle', route_chain, ifname) + + if 'ipv6_route' in if_policy: + name = 'VYOS_PBR6_' + if_policy['ipv6_route'] + rule_exists = cleanup_rule('ip6 mangle', ipv6_route_chain, ifname, name) + + if not rule_exists: + cmd(f'nft insert rule ip6 mangle {ipv6_route_chain} iifname {ifname} counter jump {name}') + else: + cleanup_rule('ip6 mangle', ipv6_route_chain, ifname) + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/policy-route.py b/src/conf_mode/policy-route.py new file mode 100755 index 000000000..d098be68d --- /dev/null +++ b/src/conf_mode/policy-route.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from json import loads +from sys import exit + +from vyos.config import Config +from vyos.template import render +from vyos.util import cmd +from vyos.util import dict_search_args +from vyos.util import run +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +mark_offset = 0x7FFFFFFF +nftables_conf = '/run/nftables_policy.conf' + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['policy'] + + if not conf.exists(base + ['route']) and not conf.exists(base + ['ipv6-route']): + return None + + policy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + return policy + +def verify(policy): + # bail out early - looks like removal from running config + if not policy: + return None + + for route in ['route', 'ipv6_route']: + if route in policy: + for name, pol_conf in policy[route].items(): + if 'rule' in pol_conf: + for rule_id, rule_conf in pol_conf.items(): + icmp = 'icmp' if route == 'route' else 'icmpv6' + if icmp in rule_conf: + icmp_defined = False + if 'type_name' in rule_conf[icmp]: + icmp_defined = True + if 'code' in rule_conf[icmp] or 'type' in rule_conf[icmp]: + raise ConfigError(f'{name} rule {rule_id}: Cannot use ICMP type/code with ICMP type-name') + if 'code' in rule_conf[icmp]: + icmp_defined = True + if 'type' not in rule_conf[icmp]: + raise ConfigError(f'{name} rule {rule_id}: ICMP code can only be defined if ICMP type is defined') + if 'type' in rule_conf[icmp]: + icmp_defined = True + + if icmp_defined and 'protocol' not in rule_conf or rule_conf['protocol'] != icmp: + raise ConfigError(f'{name} rule {rule_id}: ICMP type/code or type-name can only be defined if protocol is ICMP') + if 'set' in rule_conf: + if 'tcp_mss' in rule_conf['set']: + tcp_flags = dict_search_args(rule_conf, 'tcp', 'flags') + if not tcp_flags or 'SYN' not in tcp_flags.split(","): + raise ConfigError(f'{name} rule {rule_id}: TCP SYN flag must be set to modify TCP-MSS') + if 'tcp' in rule_conf: + if 'flags' in rule_conf['tcp']: + if 'protocol' not in rule_conf or rule_conf['protocol'] != 'tcp': + raise ConfigError(f'{name} rule {rule_id}: TCP flags can only be set if protocol is set to TCP') + + + return None + +def generate(policy): + if not policy: + if os.path.exists(nftables_conf): + os.unlink(nftables_conf) + return None + + if not os.path.exists(nftables_conf): + policy['first_install'] = True + + render(nftables_conf, 'firewall/nftables-policy.tmpl', policy) + return None + +def apply_table_marks(policy): + for route in ['route', 'ipv6_route']: + if route in policy: + for name, pol_conf in policy[route].items(): + if 'rule' in pol_conf: + for rule_id, rule_conf in pol_conf['rule'].items(): + set_table = dict_search_args(rule_conf, 'set', 'table') + if set_table: + if set_table == 'main': + set_table = '254' + table_mark = mark_offset - int(set_table) + cmd(f'ip rule add fwmark {table_mark} table {set_table}') + +def cleanup_table_marks(): + json_rules = cmd('ip -j -N rule list') + rules = loads(json_rules) + for rule in rules: + if 'fwmark' not in rule or 'table' not in rule: + continue + fwmark = rule['fwmark'] + table = int(rule['table']) + if fwmark[:2] == '0x': + fwmark = int(fwmark, 16) + if (int(fwmark) == (mark_offset - table)): + cmd(f'ip rule del fwmark {fwmark} table {table}') + +def apply(policy): + if not policy or 'first_install' not in policy: + run(f'nft flush table ip mangle') + run(f'nft flush table ip6 mangle') + + if not policy: + cleanup_table_marks() + return None + + install_result = run(f'nft -f {nftables_conf}') + if install_result == 1: + raise ConfigError('Failed to apply policy based routing') + + if 'first_install' not in policy: + cleanup_table_marks() + + apply_table_marks(policy) + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py index 1a03d520b..e251396c7 100755 --- a/src/conf_mode/policy.py +++ b/src/conf_mode/policy.py @@ -171,9 +171,7 @@ def verify(policy): def generate(policy): if not policy: - policy['new_frr_config'] = '' return None - policy['new_frr_config'] = render_to_string('frr/policy.frr.tmpl', policy) return None @@ -190,8 +188,9 @@ def apply(policy): frr_cfg.modify_section(r'^bgp community-list .*') frr_cfg.modify_section(r'^bgp extcommunity-list .*') frr_cfg.modify_section(r'^bgp large-community-list .*') - frr_cfg.modify_section(r'^route-map .*') - frr_cfg.add_before('^line vty', policy['new_frr_config']) + frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', remove_stop_mark=True) + if 'new_frr_config' in policy: + frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) frr_cfg.commit_configuration(bgp_daemon) # The route-map used for the FIB (zebra) is part of the zebra daemon @@ -200,19 +199,11 @@ def apply(policy): frr_cfg.modify_section(r'^ipv6 access-list .*') frr_cfg.modify_section(r'^ip prefix-list .*') frr_cfg.modify_section(r'^ipv6 prefix-list .*') - frr_cfg.modify_section(r'^route-map .*') - frr_cfg.add_before('^line vty', policy['new_frr_config']) + frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', remove_stop_mark=True) + if 'new_frr_config' in policy: + frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) frr_cfg.commit_configuration(zebra_daemon) - # If FRR config is blank, rerun the blank commit x times due to frr-reload - # behavior/bug not properly clearing out on one commit. - if policy['new_frr_config'] == '': - for a in range(5): - frr_cfg.commit_configuration(zebra_daemon) - - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() - return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index 348bae59f..4ebc0989c 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -16,10 +16,9 @@ import os -from sys import exit - from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configverify import verify_vrf from vyos.template import is_ipv6 from vyos.template import render_to_string from vyos.validate import is_ipv6_link_local @@ -35,8 +34,9 @@ def get_config(config=None): else: conf = Config() base = ['protocols', 'bfd'] - bfd = conf.get_config_dict(base, get_first_key=True) - + bfd = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) # Bail out early if configuration tree does not exist if not conf.exists(base): return bfd @@ -79,28 +79,37 @@ def verify(bfd): # multihop and echo-mode cannot be used together if 'echo_mode' in peer_config: - raise ConfigError('Multihop and echo-mode cannot be used together') + raise ConfigError('BFD multihop and echo-mode cannot be used together') # multihop doesn't accept interface names if 'source' in peer_config and 'interface' in peer_config['source']: - raise ConfigError('Multihop and source interface cannot be used together') + raise ConfigError('BFD multihop and source interface cannot be used together') + + if 'profile' in peer_config: + profile_name = peer_config['profile'] + if 'profile' not in bfd or profile_name not in bfd['profile']: + raise ConfigError(f'BFD profile "{profile_name}" does not exist!') + + if 'vrf' in peer_config: + verify_vrf(peer_config) return None def generate(bfd): if not bfd: - bfd['new_frr_config'] = '' return None - - bfd['new_frr_config'] = render_to_string('frr/bfd.frr.tmpl', bfd) + bfd['new_frr_config'] = render_to_string('frr/bfdd.frr.tmpl', bfd) def apply(bfd): + bfd_daemon = 'bfdd' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration() - frr_cfg.modify_section('^bfd', '') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bfd['new_frr_config']) - frr_cfg.commit_configuration() + frr_cfg.load_configuration(bfd_daemon) + frr_cfg.modify_section('^bfd', stop_pattern='^exit', remove_stop_mark=True) + if 'new_frr_config' in bfd: + frr_cfg.add_before(frr.default_add_before, bfd['new_frr_config']) + frr_cfg.commit_configuration(bfd_daemon) return None diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index 68284e0f9..d8704727c 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -183,6 +183,28 @@ def verify(bgp): raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!') afi_config = peer_config['address_family'][afi] + + if 'conditionally_advertise' in afi_config: + if 'advertise_map' not in afi_config['conditionally_advertise']: + raise ConfigError('Must speficy advertise-map when conditionally-advertise is in use!') + # Verify advertise-map (which is a route-map) exists + verify_route_map(afi_config['conditionally_advertise']['advertise_map'], bgp) + + if ('exist_map' not in afi_config['conditionally_advertise'] and + 'non_exist_map' not in afi_config['conditionally_advertise']): + raise ConfigError('Must either speficy exist-map or non-exist-map when ' \ + 'conditionally-advertise is in use!') + + if {'exist_map', 'non_exist_map'} <= set(afi_config['conditionally_advertise']): + raise ConfigError('Can not specify both exist-map and non-exist-map for ' \ + 'conditionally-advertise!') + + if 'exist_map' in afi_config['conditionally_advertise']: + verify_route_map(afi_config['conditionally_advertise']['exist_map'], bgp) + + if 'non_exist_map' in afi_config['conditionally_advertise']: + verify_route_map(afi_config['conditionally_advertise']['non_exist_map'], bgp) + # Validate if configured Prefix list exists if 'prefix_list' in afi_config: for tmp in ['import', 'export']: @@ -255,21 +277,11 @@ def verify(bgp): tmp = dict_search(f'route_map.vpn.{export_import}', afi_config) if tmp: verify_route_map(tmp, bgp) - if afi in ['l2vpn_evpn'] and 'vrf' not in bgp: - # Some L2VPN EVPN AFI options are only supported under VRF - if 'vni' in afi_config: - for vni, vni_config in afi_config['vni'].items(): - if 'rd' in vni_config: - raise ConfigError('VNI route-distinguisher is only supported under EVPN VRF') - if 'route_target' in vni_config: - raise ConfigError('VNI route-target is only supported under EVPN VRF') return None def generate(bgp): if not bgp or 'deleted' in bgp: - bgp['frr_bgpd_config'] = '' - bgp['frr_zebra_config'] = '' return None bgp['protocol'] = 'bgp' # required for frr/vrf.route-map.frr.tmpl @@ -287,8 +299,9 @@ def apply(bgp): # The route-map used for the FIB (zebra) is part of the zebra daemon frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'(\s+)?ip protocol bgp route-map [-a-zA-Z0-9.]+$', '', '(\s|!)') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bgp['frr_zebra_config']) + frr_cfg.modify_section(r'(\s+)?ip protocol bgp route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') + if 'frr_zebra_config' in bgp: + frr_cfg.add_before(frr.default_add_before, bgp['frr_zebra_config']) frr_cfg.commit_configuration(zebra_daemon) # Generate empty helper string which can be ammended to FRR commands, it @@ -298,13 +311,11 @@ def apply(bgp): vrf = ' vrf ' + bgp['vrf'] frr_cfg.load_configuration(bgp_daemon) - frr_cfg.modify_section(f'^router bgp \d+{vrf}$', '') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bgp['frr_bgpd_config']) + frr_cfg.modify_section(f'^router bgp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) + if 'frr_bgpd_config' in bgp: + frr_cfg.add_before(frr.default_add_before, bgp['frr_bgpd_config']) frr_cfg.commit_configuration(bgp_daemon) - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() - return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index 4505e2496..9b4b215de 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -56,10 +56,10 @@ def get_config(config=None): # instead of the VRF instance. if vrf: isis['vrf'] = vrf - # As we no re-use this Python handler for both VRF and non VRF instances for - # IS-IS we need to find out if any interfaces changed so properly adjust - # the FRR configuration and not by acctident change interfaces from a - # different VRF. + # FRR has VRF support for different routing daemons. As interfaces belong + # to VRFs - or the global VRF, we need to check for changed interfaces so + # that they will be properly rendered for the FRR config. Also this eases + # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: isis['interface_removed'] = list(interfaces_removed) @@ -196,8 +196,6 @@ def verify(isis): def generate(isis): if not isis or 'deleted' in isis: - isis['frr_isisd_config'] = '' - isis['frr_zebra_config'] = '' return None isis['protocol'] = 'isis' # required for frr/vrf.route-map.frr.tmpl @@ -214,8 +212,9 @@ def apply(isis): # The route-map used for the FIB (zebra) is part of the zebra daemon frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'(\s+)?ip protocol isis route-map [-a-zA-Z0-9.]+$', '', '(\s|!)') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', isis['frr_zebra_config']) + frr_cfg.modify_section('(\s+)?ip protocol isis route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') + if 'frr_zebra_config' in isis: + frr_cfg.add_before(frr.default_add_before, isis['frr_zebra_config']) frr_cfg.commit_configuration(zebra_daemon) # Generate empty helper string which can be ammended to FRR commands, it @@ -225,19 +224,18 @@ def apply(isis): vrf = ' vrf ' + isis['vrf'] frr_cfg.load_configuration(isis_daemon) - frr_cfg.modify_section(f'^router isis VyOS{vrf}$', '') + frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in isis: continue for interface in isis[key]: - frr_cfg.modify_section(f'^interface {interface}{vrf}$', '') + frr_cfg.modify_section(f'^interface {interface}{vrf}', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', isis['frr_isisd_config']) - frr_cfg.commit_configuration(isis_daemon) + if 'frr_isisd_config' in isis: + frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config']) - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() + frr_cfg.commit_configuration(isis_daemon) return None diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py index 3b27608da..0b0c7d07b 100755 --- a/src/conf_mode/protocols_mpls.py +++ b/src/conf_mode/protocols_mpls.py @@ -66,36 +66,24 @@ def verify(mpls): def generate(mpls): # If there's no MPLS config generated, create dictionary key with no value. - if not mpls: - mpls['new_frr_config'] = '' + if not mpls or 'deleted' in mpls: return None - mpls['new_frr_config'] = render_to_string('frr/ldpd.frr.tmpl', mpls) + mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.tmpl', mpls) return None def apply(mpls): - # Define dictionary that will load FRR config - frr_cfg = {} + ldpd_damon = 'ldpd' + # Save original configuration prior to starting any commit actions - frr_cfg['original_config'] = frr.get_configuration(daemon='ldpd') - frr_cfg['modified_config'] = frr.replace_section(frr_cfg['original_config'], mpls['new_frr_config'], from_re='mpls.*') - - # If FRR config is blank, rerun the blank commit three times due to frr-reload - # behavior/bug not properly clearing out on one commit. - if mpls['new_frr_config'] == '': - for x in range(3): - frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd') - elif not 'ldp' in mpls: - for x in range(3): - frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd') - else: - # FRR mark configuration will test for syntax errors and throws an - # exception if any syntax errors is detected - frr.mark_configuration(frr_cfg['modified_config']) + frr_cfg = frr.FRRConfig() + + frr_cfg.load_configuration(ldpd_damon) + frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True) - # Commit resulting configuration to FRR, this will throw CommitError - # on failure - frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd') + if 'frr_ldpd_config' in mpls: + frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config']) + frr_cfg.commit_configuration(ldpd_damon) # Set number of entries in the platform label tables labels = '0' @@ -122,7 +110,7 @@ def apply(mpls): system_interfaces = [] # Populate system interfaces list with local MPLS capable interfaces for interface in glob('/proc/sys/net/mpls/conf/*'): - system_interfaces.append(os.path.basename(interface)) + system_interfaces.append(os.path.basename(interface)) # This is where the comparison is done on if an interface needs to be enabled/disabled. for system_interface in system_interfaces: interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input') @@ -138,7 +126,7 @@ def apply(mpls): system_interfaces = [] # If MPLS interfaces are not configured, set MPLS processing disabled for interface in glob('/proc/sys/net/mpls/conf/*'): - system_interfaces.append(os.path.basename(interface)) + system_interfaces.append(os.path.basename(interface)) for system_interface in system_interfaces: system_interface = system_interface.replace('.', '/') call(f'sysctl -wq net.mpls.conf.{system_interface}.input=0') diff --git a/src/conf_mode/protocols_nhrp.py b/src/conf_mode/protocols_nhrp.py index 12dacdba0..7eeb5cd30 100755 --- a/src/conf_mode/protocols_nhrp.py +++ b/src/conf_mode/protocols_nhrp.py @@ -16,6 +16,8 @@ from vyos.config import Config from vyos.configdict import node_changed +from vyos.firewall import find_nftables_rule +from vyos.firewall import remove_nftables_rule from vyos.template import render from vyos.util import process_named_running from vyos.util import run @@ -88,24 +90,19 @@ def generate(nhrp): def apply(nhrp): if 'tunnel' in nhrp: for tunnel, tunnel_conf in nhrp['tunnel'].items(): - if 'source_address' in tunnel_conf: - chain = f'VYOS_NHRP_{tunnel}_OUT_HOOK' - source_address = tunnel_conf['source_address'] + if 'source_address' in nhrp['if_tunnel'][tunnel]: + comment = f'VYOS_NHRP_{tunnel}' + source_address = nhrp['if_tunnel'][tunnel]['source_address'] - chain_exists = run(f'sudo iptables --check {chain} -j RETURN') == 0 - if not chain_exists: - run(f'sudo iptables --new {chain}') - run(f'sudo iptables --append {chain} -p gre -s {source_address} -d 224.0.0.0/4 -j DROP') - run(f'sudo iptables --append {chain} -j RETURN') - run(f'sudo iptables --insert OUTPUT 2 -j {chain}') + rule_handle = find_nftables_rule('ip filter', 'VYOS_FW_OUTPUT', ['ip protocol gre', f'ip saddr {source_address}', 'ip daddr 224.0.0.0/4']) + if not rule_handle: + run(f'sudo nft insert rule ip filter VYOS_FW_OUTPUT ip protocol gre ip saddr {source_address} ip daddr 224.0.0.0/4 counter drop comment "{comment}"') for tunnel in nhrp['del_tunnels']: - chain = f'VYOS_NHRP_{tunnel}_OUT_HOOK' - chain_exists = run(f'sudo iptables --check {chain} -j RETURN') == 0 - if chain_exists: - run(f'sudo iptables --delete OUTPUT -j {chain}') - run(f'sudo iptables --flush {chain}') - run(f'sudo iptables --delete-chain {chain}') + comment = f'VYOS_NHRP_{tunnel}' + rule_handle = find_nftables_rule('ip filter', 'VYOS_FW_OUTPUT', [f'comment "{comment}"']) + if rule_handle: + remove_nftables_rule('ip filter', 'VYOS_FW_OUTPUT', rule_handle) action = 'restart' if nhrp and 'tunnel' in nhrp else 'stop' run(f'systemctl {action} opennhrp') diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py index 6ccda2e5a..4895cde6f 100755 --- a/src/conf_mode/protocols_ospf.py +++ b/src/conf_mode/protocols_ospf.py @@ -56,10 +56,10 @@ def get_config(config=None): # instead of the VRF instance. if vrf: ospf['vrf'] = vrf - # As we no re-use this Python handler for both VRF and non VRF instances for - # OSPF we need to find out if any interfaces changed so properly adjust - # the FRR configuration and not by acctident change interfaces from a - # different VRF. + # FRR has VRF support for different routing daemons. As interfaces belong + # to VRFs - or the global VRF, we need to check for changed interfaces so + # that they will be properly rendered for the FRR config. Also this eases + # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: ospf['interface_removed'] = list(interfaces_removed) @@ -177,11 +177,11 @@ def verify(ospf): raise ConfigError('Can not use OSPF interface area and area ' \ 'network configuration at the same time!') - if 'vrf' in ospf: # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. + if 'vrf' in ospf: vrf = ospf['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: @@ -191,8 +191,6 @@ def verify(ospf): def generate(ospf): if not ospf or 'deleted' in ospf: - ospf['frr_ospfd_config'] = '' - ospf['frr_zebra_config'] = '' return None ospf['protocol'] = 'ospf' # required for frr/vrf.route-map.frr.tmpl @@ -209,8 +207,9 @@ def apply(ospf): # The route-map used for the FIB (zebra) is part of the zebra daemon frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'(\s+)?ip protocol ospf route-map [-a-zA-Z0-9.]+$', '', '(\s|!)') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospf['frr_zebra_config']) + frr_cfg.modify_section('(\s+)?ip protocol ospf route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') + if 'frr_zebra_config' in ospf: + frr_cfg.add_before(frr.default_add_before, ospf['frr_zebra_config']) frr_cfg.commit_configuration(zebra_daemon) # Generate empty helper string which can be ammended to FRR commands, it @@ -220,20 +219,18 @@ def apply(ospf): vrf = ' vrf ' + ospf['vrf'] frr_cfg.load_configuration(ospf_daemon) - frr_cfg.modify_section(f'^router ospf{vrf}$', '') + frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in ospf: continue for interface in ospf[key]: - frr_cfg.modify_section(f'^interface {interface}{vrf}$', '') + frr_cfg.modify_section(f'^interface {interface}{vrf}', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospf['frr_ospfd_config']) + if 'frr_ospfd_config' in ospf: + frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config']) frr_cfg.commit_configuration(ospf_daemon) - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() - return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py index 536ffa690..f8e733ba5 100755 --- a/src/conf_mode/protocols_ospfv3.py +++ b/src/conf_mode/protocols_ospfv3.py @@ -17,32 +17,80 @@ import os from sys import exit +from sys import argv from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps +from vyos.configverify import verify_route_map +from vyos.configverify import verify_interface_exists from vyos.template import render_to_string from vyos.ifconfig import Interface +from vyos.util import dict_search +from vyos.util import get_interface_config from vyos.xml import defaults from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'ospf6d' - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'ospfv3'] + + vrf = None + if len(argv) > 1: + vrf = argv[1] + + base_path = ['protocols', 'ospfv3'] + + # eqivalent of the C foo ? 'a' : 'b' statement + base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + # Assign the name of our VRF context. This MUST be done before the return + # statement below, else on deletion we will delete the default instance + # instead of the VRF instance. + if vrf: ospfv3['vrf'] = vrf + + # FRR has VRF support for different routing daemons. As interfaces belong + # to VRFs - or the global VRF, we need to check for changed interfaces so + # that they will be properly rendered for the FRR config. Also this eases + # removal of interfaces from the running configuration. + interfaces_removed = node_changed(conf, base + ['interface']) + if interfaces_removed: + ospfv3['interface_removed'] = list(interfaces_removed) + # Bail out early if configuration tree does not exist if not conf.exists(base): + ospfv3.update({'deleted' : ''}) return ospfv3 + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + # XXX: Note that we can not call defaults(base), as defaults does not work + # on an instance of a tag node. As we use the exact same CLI definition for + # both the non-vrf and vrf version this is absolutely safe! + default_values = defaults(base_path) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. Example: default-information + # originate comes with a default metric-type of 2, which will enable the + # entire default-information originate tree, even when not set via CLI so we + # need to check this first and probably drop that key. + if dict_search('default_information.originate', ospfv3) is None: + del default_values['default_information'] + + # XXX: T2665: we currently have no nice way for defaults under tag nodes, + # clean them out and add them manually :( + del default_values['interface'] + + # merge in remaining default values + ospfv3 = dict_merge(default_values, ospfv3) + # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # @@ -60,34 +108,68 @@ def verify(ospfv3): verify_common_route_maps(ospfv3) + # As we can have a default-information route-map, we need to validate it! + route_map_name = dict_search('default_information.originate.route_map', ospfv3) + if route_map_name: verify_route_map(route_map_name, ospfv3) + + if 'area' in ospfv3: + for area, area_config in ospfv3['area'].items(): + if 'area_type' in area_config: + if len(area_config['area_type']) > 1: + raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!') + if 'interface' in ospfv3: - for ifname, if_config in ospfv3['interface'].items(): - if 'ifmtu' in if_config: - mtu = Interface(ifname).get_mtu() - if int(if_config['ifmtu']) > int(mtu): + for interface, interface_config in ospfv3['interface'].items(): + verify_interface_exists(interface) + if 'ifmtu' in interface_config: + mtu = Interface(interface).get_mtu() + if int(interface_config['ifmtu']) > int(mtu): raise ConfigError(f'OSPFv3 ifmtu can not exceed physical MTU of "{mtu}"') + # If interface specific options are set, we must ensure that the + # interface is bound to our requesting VRF. Due to the VyOS + # priorities the interface is bound to the VRF after creation of + # the VRF itself, and before any routing protocol is configured. + if 'vrf' in ospfv3: + vrf = ospfv3['vrf'] + tmp = get_interface_config(interface) + if 'master' not in tmp or tmp['master'] != vrf: + raise ConfigError(f'Interface {interface} is not a member of VRF {vrf}!') + return None def generate(ospfv3): - if not ospfv3: - ospfv3['new_frr_config'] = '' + if not ospfv3 or 'deleted' in ospfv3: return None ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.tmpl', ospfv3) return None def apply(ospfv3): + ospf6_daemon = 'ospf6d' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(r'^interface \S+', '') - frr_cfg.modify_section('^router ospf6$', '') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospfv3['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) - - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() + + # Generate empty helper string which can be ammended to FRR commands, it + # will be either empty (default VRF) or contain the "vrf <name" statement + vrf = '' + if 'vrf' in ospfv3: + vrf = ' vrf ' + ospfv3['vrf'] + + frr_cfg.load_configuration(ospf6_daemon) + frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True) + + for key in ['interface', 'interface_removed']: + if key not in ospfv3: + continue + for interface in ospfv3[key]: + frr_cfg.modify_section(f'^interface {interface}{vrf}', stop_pattern='^exit', remove_stop_mark=True) + + if 'new_frr_config' in ospfv3: + frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config']) + + frr_cfg.commit_configuration(ospf6_daemon) return None diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py index e56eb1f56..300f56489 100755 --- a/src/conf_mode/protocols_rip.py +++ b/src/conf_mode/protocols_rip.py @@ -20,6 +20,7 @@ from sys import exit from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list @@ -39,8 +40,17 @@ def get_config(config=None): base = ['protocols', 'rip'] rip = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + # FRR has VRF support for different routing daemons. As interfaces belong + # to VRFs - or the global VRF, we need to check for changed interfaces so + # that they will be properly rendered for the FRR config. Also this eases + # removal of interfaces from the running configuration. + interfaces_removed = node_changed(conf, base + ['interface']) + if interfaces_removed: + rip['interface_removed'] = list(interfaces_removed) + # Bail out early if configuration tree does not exist if not conf.exists(base): + rip.update({'deleted' : ''}) return rip # We have gathered the dict representation of the CLI, but there are default @@ -89,12 +99,10 @@ def verify(rip): f'with "split-horizon disable" for "{interface}"!') def generate(rip): - if not rip: - rip['new_frr_config'] = '' + if not rip or 'deleted' in rip: return None - rip['new_frr_config'] = render_to_string('frr/rip.frr.tmpl', rip) - + rip['new_frr_config'] = render_to_string('frr/ripd.frr.tmpl', rip) return None def apply(rip): @@ -106,19 +114,22 @@ def apply(rip): # The route-map used for the FIB (zebra) is part of the zebra daemon frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'^ip protocol rip route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.modify_section('^ip protocol rip route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') frr_cfg.commit_configuration(zebra_daemon) frr_cfg.load_configuration(rip_daemon) - frr_cfg.modify_section(r'key chain \S+', '') - frr_cfg.modify_section(r'interface \S+', '') - frr_cfg.modify_section('^router rip$', '') + frr_cfg.modify_section('^key chain \S+', stop_pattern='^exit', remove_stop_mark=True) + frr_cfg.modify_section('^router rip', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', rip['new_frr_config']) - frr_cfg.commit_configuration(rip_daemon) + for key in ['interface', 'interface_removed']: + if key not in rip: + continue + for interface in rip[key]: + frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() + if 'new_frr_config' in rip: + frr_cfg.add_before(frr.default_add_before, rip['new_frr_config']) + frr_cfg.commit_configuration(rip_daemon) return None diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py index aaec5dacb..d9b8c0b30 100755 --- a/src/conf_mode/protocols_ripng.py +++ b/src/conf_mode/protocols_ripng.py @@ -31,8 +31,6 @@ from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'ripngd' - def get_config(config=None): if config: conf = config @@ -95,21 +93,28 @@ def generate(ripng): ripng['new_frr_config'] = '' return None - ripng['new_frr_config'] = render_to_string('frr/ripng.frr.tmpl', ripng) + ripng['new_frr_config'] = render_to_string('frr/ripngd.frr.tmpl', ripng) return None def apply(ripng): + ripng_daemon = 'ripngd' + zebra_daemon = 'zebra' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(r'key chain \S+', '') - frr_cfg.modify_section(r'interface \S+', '') - frr_cfg.modify_section('router ripng', '') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ripng['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) - - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() + + # The route-map used for the FIB (zebra) is part of the zebra daemon + frr_cfg.load_configuration(zebra_daemon) + frr_cfg.modify_section('^ipv6 protocol ripng route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') + frr_cfg.commit_configuration(zebra_daemon) + + frr_cfg.load_configuration(ripng_daemon) + frr_cfg.modify_section('key chain \S+', stop_pattern='^exit', remove_stop_mark=True) + frr_cfg.modify_section('interface \S+', stop_pattern='^exit', remove_stop_mark=True) + frr_cfg.modify_section('^router ripng', stop_pattern='^exit', remove_stop_mark=True) + if 'new_frr_config' in ripng: + frr_cfg.add_before(frr.default_add_before, ripng['new_frr_config']) + frr_cfg.commit_configuration(ripng_daemon) return None diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py index 947c8ab7a..51ad0d315 100755 --- a/src/conf_mode/protocols_rpki.py +++ b/src/conf_mode/protocols_rpki.py @@ -28,8 +28,6 @@ from vyos import frr from vyos import airbag airbag.enable() -frr_daemon = 'bgpd' - def get_config(config=None): if config: conf = config @@ -38,7 +36,9 @@ def get_config(config=None): base = ['protocols', 'rpki'] rpki = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + # Bail out early if configuration tree does not exist if not conf.exists(base): + rpki.update({'deleted' : ''}) return rpki # We have gathered the dict representation of the CLI, but there are default @@ -79,17 +79,22 @@ def verify(rpki): return None def generate(rpki): + if not rpki: + return rpki['new_frr_config'] = render_to_string('frr/rpki.frr.tmpl', rpki) return None def apply(rpki): + bgp_daemon = 'bgpd' + # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section('rpki', '') - frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', rpki['new_frr_config']) - frr_cfg.commit_configuration(frr_daemon) + frr_cfg.load_configuration(bgp_daemon) + frr_cfg.modify_section('^rpki', stop_pattern='^exit', remove_stop_mark=True) + if 'new_frr_config' in rpki: + frr_cfg.add_before(frr.default_add_before, rpki['new_frr_config']) + frr_cfg.commit_configuration(bgp_daemon) return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index 338247e30..c1e427b16 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -21,6 +21,7 @@ from sys import argv from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configdict import get_dhcp_interfaces from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_vrf from vyos.template import render_to_string @@ -56,6 +57,10 @@ def get_config(config=None): # Merge policy dict into "regular" config dict static = dict_merge(tmp, static) + # T3680 - get a list of all interfaces currently configured to use DHCP + tmp = get_dhcp_interfaces(conf, vrf) + if tmp: static['dhcp'] = tmp + return static def verify(static): @@ -80,7 +85,9 @@ def verify(static): return None def generate(static): - static['new_frr_config'] = render_to_string('frr/static.frr.tmpl', static) + if not static: + return None + static['new_frr_config'] = render_to_string('frr/staticd.frr.tmpl', static) return None def apply(static): @@ -92,24 +99,21 @@ def apply(static): # The route-map used for the FIB (zebra) is part of the zebra daemon frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'^ip protocol static route-map [-a-zA-Z0-9.]+$', '') + frr_cfg.modify_section(r'^ip protocol static route-map [-a-zA-Z0-9.]+', '') frr_cfg.commit_configuration(zebra_daemon) - frr_cfg.load_configuration(static_daemon) if 'vrf' in static: vrf = static['vrf'] - frr_cfg.modify_section(f'^vrf {vrf}$', '') + frr_cfg.modify_section(f'^vrf {vrf}', stop_pattern='^exit', remove_stop_mark=True) else: - frr_cfg.modify_section(r'^ip route .*', '') - frr_cfg.modify_section(r'^ipv6 route .*', '') + frr_cfg.modify_section(r'^ip route .*') + frr_cfg.modify_section(r'^ipv6 route .*') - frr_cfg.add_before(r'(interface .*|line vty)', static['new_frr_config']) + if 'new_frr_config' in static: + frr_cfg.add_before(frr.default_add_before, static['new_frr_config']) frr_cfg.commit_configuration(static_daemon) - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() - return None if __name__ == '__main__': diff --git a/src/conf_mode/service_mdns-repeater.py b/src/conf_mode/service_mdns-repeater.py index c920920ed..d31a0c49e 100755 --- a/src/conf_mode/service_mdns-repeater.py +++ b/src/conf_mode/service_mdns-repeater.py @@ -28,7 +28,7 @@ from vyos import ConfigError from vyos import airbag airbag.enable() -config_file = r'/etc/default/mdns-repeater' +config_file = '/run/avahi-daemon/avahi-daemon.conf' vrrp_running_file = '/run/mdns_vrrp_active' def get_config(config=None): @@ -92,12 +92,12 @@ def generate(mdns): if len(mdns['interface']) < 2: return None - render(config_file, 'mdns-repeater/mdns-repeater.tmpl', mdns) + render(config_file, 'mdns-repeater/avahi-daemon.tmpl', mdns) return None def apply(mdns): if not mdns or 'disable' in mdns: - call('systemctl stop mdns-repeater.service') + call('systemctl stop avahi-daemon.service') if os.path.exists(config_file): os.unlink(config_file) @@ -106,16 +106,16 @@ def apply(mdns): else: if 'vrrp_disable' not in mdns and os.path.exists(vrrp_running_file): os.unlink(vrrp_running_file) - + if mdns['vrrp_exists'] and 'vrrp_disable' in mdns: if not os.path.exists(vrrp_running_file): os.mknod(vrrp_running_file) # vrrp script looks for this file to update mdns repeater if len(mdns['interface']) < 2: - call('systemctl stop mdns-repeater.service') + call('systemctl stop avahi-daemon.service') return None - call('systemctl restart mdns-repeater.service') + call('systemctl restart avahi-daemon.service') return None diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py index 9fbd531da..1f31d132d 100755 --- a/src/conf_mode/service_pppoe-server.py +++ b/src/conf_mode/service_pppoe-server.py @@ -24,8 +24,11 @@ from vyos.configverify import verify_accel_ppp_base_service from vyos.template import render from vyos.util import call from vyos.util import dict_search +from vyos.util import get_interface_config from vyos import ConfigError from vyos import airbag +from vyos.range_regex import range_to_regex + airbag.enable() pppoe_conf = r'/run/accel-pppd/pppoe.conf' @@ -56,6 +59,11 @@ def verify(pppoe): if 'interface' not in pppoe: raise ConfigError('At least one listen interface must be defined!') + # Check is interface exists in the system + for iface in pppoe['interface']: + if not get_interface_config(iface): + raise ConfigError(f'Interface {iface} does not exist!') + # local ippool and gateway settings config checks if not (dict_search('client_ip_pool.subnet', pppoe) or (dict_search('client_ip_pool.start', pppoe) and @@ -73,6 +81,13 @@ def generate(pppoe): if not pppoe: return None + # Generate special regex for dynamic interfaces + for iface in pppoe['interface']: + if 'vlan_range' in pppoe['interface'][iface]: + pppoe['interface'][iface]['regex'] = [] + for vlan_range in pppoe['interface'][iface]['vlan_range']: + pppoe['interface'][iface]['regex'].append(range_to_regex(vlan_range)) + render(pppoe_conf, 'accel-ppp/pppoe.config.tmpl', pppoe) if dict_search('authentication.mode', pppoe) == 'local': diff --git a/src/conf_mode/snmp.py b/src/conf_mode/snmp.py index 23e45a5b7..8ce48780b 100755 --- a/src/conf_mode/snmp.py +++ b/src/conf_mode/snmp.py @@ -19,70 +19,49 @@ import os from sys import exit from vyos.config import Config +from vyos.configdict import dict_merge from vyos.configverify import verify_vrf -from vyos.snmpv3_hashgen import plaintext_to_md5, plaintext_to_sha1, random +from vyos.snmpv3_hashgen import plaintext_to_md5 +from vyos.snmpv3_hashgen import plaintext_to_sha1 +from vyos.snmpv3_hashgen import random from vyos.template import render -from vyos.template import is_ipv4 -from vyos.util import call, chmod_755 +from vyos.util import call +from vyos.util import chmod_755 +from vyos.util import dict_search from vyos.validate import is_addr_assigned from vyos.version import get_version_data -from vyos import ConfigError, airbag +from vyos.xml import defaults +from vyos import ConfigError +from vyos import airbag airbag.enable() config_file_client = r'/etc/snmp/snmp.conf' config_file_daemon = r'/etc/snmp/snmpd.conf' config_file_access = r'/usr/share/snmp/snmpd.conf' config_file_user = r'/var/lib/snmp/snmpd.conf' -default_script_dir = r'/config/user-data/' systemd_override = r'/etc/systemd/system/snmpd.service.d/override.conf' +systemd_service = 'snmpd.service' -# SNMP OIDs used to mark auth/priv type -OIDs = { - 'md5' : '.1.3.6.1.6.3.10.1.1.2', - 'sha' : '.1.3.6.1.6.3.10.1.1.3', - 'aes' : '.1.3.6.1.6.3.10.1.2.4', - 'des' : '.1.3.6.1.6.3.10.1.2.2', - 'none': '.1.3.6.1.6.3.10.1.2.1' -} - -default_config_data = { - 'listen_on': [], - 'listen_address': [], - 'ipv6_enabled': 'True', - 'communities': [], - 'smux_peers': [], - 'location' : '', - 'description' : '', - 'contact' : '', - 'route_table': 'False', - 'trap_source': '', - 'trap_targets': [], - 'vyos_user': '', - 'vyos_user_pass': '', - 'version': '', - 'v3_enabled': 'False', - 'v3_engineid': '', - 'v3_groups': [], - 'v3_traps': [], - 'v3_users': [], - 'v3_views': [], - 'script_ext': [] -} - -def rmfile(file): - if os.path.isfile(file): - os.unlink(file) - -def get_config(): - snmp = default_config_data - conf = Config() - if not conf.exists('service snmp'): - return None +def get_config(config=None): + if config: + conf = config else: - if conf.exists('system ipv6 disable'): - snmp['ipv6_enabled'] = False + conf = Config() + base = ['service', 'snmp'] + + snmp = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + if not conf.exists(base): + snmp.update({'deleted' : ''}) + + if conf.exists(['service', 'lldp', 'snmp', 'enable']): + snmp.update({'lldp_snmp' : ''}) - conf.set_level('service snmp') + if conf.exists(['system', 'ipv6', 'disable']): + snmp.update({'ipv6_disabled' : ''}) + + if 'deleted' in snmp: + return snmp version_data = get_version_data() snmp['version'] = version_data['version'] @@ -91,461 +70,207 @@ def get_config(): snmp['vyos_user'] = 'vyos' + random(8) snmp['vyos_user_pass'] = random(16) - if conf.exists('community'): - for name in conf.list_nodes('community'): - community = { - 'name': name, - 'authorization': 'ro', - 'network_v4': [], - 'network_v6': [], - 'has_source' : False - } - - if conf.exists('community {0} authorization'.format(name)): - community['authorization'] = conf.return_value('community {0} authorization'.format(name)) - - # Subnet of SNMP client(s) allowed to contact system - if conf.exists('community {0} network'.format(name)): - for addr in conf.return_values('community {0} network'.format(name)): - if is_ipv4(addr): - community['network_v4'].append(addr) - else: - community['network_v6'].append(addr) - - # IP address of SNMP client allowed to contact system - if conf.exists('community {0} client'.format(name)): - for addr in conf.return_values('community {0} client'.format(name)): - if is_ipv4(addr): - community['network_v4'].append(addr) - else: - community['network_v6'].append(addr) - - if (len(community['network_v4']) > 0) or (len(community['network_v6']) > 0): - community['has_source'] = True - - snmp['communities'].append(community) - - if conf.exists('contact'): - snmp['contact'] = conf.return_value('contact') - - if conf.exists('description'): - snmp['description'] = conf.return_value('description') - - if conf.exists('listen-address'): - for addr in conf.list_nodes('listen-address'): - port = '161' - if conf.exists('listen-address {0} port'.format(addr)): - port = conf.return_value('listen-address {0} port'.format(addr)) - - snmp['listen_address'].append((addr, port)) + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = defaults(base) + + # We can not merge defaults for tagNodes - those need to be blended in + # per tagNode instance + if 'listen_address' in default_values: + del default_values['listen_address'] + if 'community' in default_values: + del default_values['community'] + if 'trap_target' in default_values: + del default_values['trap_target'] + if 'v3' in default_values: + del default_values['v3'] + snmp = dict_merge(default_values, snmp) + + if 'listen_address' in snmp: + default_values = defaults(base + ['listen-address']) + for address in snmp['listen_address']: + snmp['listen_address'][address] = dict_merge( + default_values, snmp['listen_address'][address]) # Always listen on localhost if an explicit address has been configured # This is a safety measure to not end up with invalid listen addresses # that are not configured on this system. See https://phabricator.vyos.net/T850 - if not '127.0.0.1' in conf.list_nodes('listen-address'): - snmp['listen_address'].append(('127.0.0.1', '161')) - - if not '::1' in conf.list_nodes('listen-address'): - snmp['listen_address'].append(('::1', '161')) - - if conf.exists('location'): - snmp['location'] = conf.return_value('location') - - if conf.exists('smux-peer'): - snmp['smux_peers'] = conf.return_values('smux-peer') - - if conf.exists('trap-source'): - snmp['trap_source'] = conf.return_value('trap-source') - - if conf.exists('trap-target'): - for target in conf.list_nodes('trap-target'): - trap_tgt = { - 'target': target, - 'community': '', - 'port': '' - } - - if conf.exists('trap-target {0} community'.format(target)): - trap_tgt['community'] = conf.return_value('trap-target {0} community'.format(target)) - - if conf.exists('trap-target {0} port'.format(target)): - trap_tgt['port'] = conf.return_value('trap-target {0} port'.format(target)) - - snmp['trap_targets'].append(trap_tgt) - - if conf.exists('script-extensions'): - for extname in conf.list_nodes('script-extensions extension-name'): - conf_script = conf.return_value('script-extensions extension-name {} script'.format(extname)) - # if script has not absolute path, use pre configured path - if "/" not in conf_script: - conf_script = default_script_dir + conf_script - - extension = { - 'name': extname, - 'script' : conf_script - } - - snmp['script_ext'].append(extension) - - if conf.exists('oid-enable route-table'): - snmp['route_table'] = True - - if conf.exists('vrf'): - # Append key to dict but don't place it in the default dictionary. - # This is required to make the override.conf.tmpl work until we - # migrate to get_config_dict(). - snmp['vrf'] = conf.return_value('vrf') - - - ######################################################################### - # ____ _ _ __ __ ____ _____ # - # / ___|| \ | | \/ | _ \ __ _|___ / # - # \___ \| \| | |\/| | |_) | \ \ / / |_ \ # - # ___) | |\ | | | | __/ \ V / ___) | # - # |____/|_| \_|_| |_|_| \_/ |____/ # - # # - # now take care about the fancy SNMP v3 stuff, or bail out eraly # - ######################################################################### - if not conf.exists('v3'): - return snmp - else: - snmp['v3_enabled'] = True - - # 'set service snmp v3 engineid' - if conf.exists('v3 engineid'): - snmp['v3_engineid'] = conf.return_value('v3 engineid') - - # 'set service snmp v3 group' - if conf.exists('v3 group'): - for group in conf.list_nodes('v3 group'): - v3_group = { - 'name': group, - 'mode': 'ro', - 'seclevel': 'auth', - 'view': '' - } - - if conf.exists('v3 group {0} mode'.format(group)): - v3_group['mode'] = conf.return_value('v3 group {0} mode'.format(group)) - - if conf.exists('v3 group {0} seclevel'.format(group)): - v3_group['seclevel'] = conf.return_value('v3 group {0} seclevel'.format(group)) - - if conf.exists('v3 group {0} view'.format(group)): - v3_group['view'] = conf.return_value('v3 group {0} view'.format(group)) - - snmp['v3_groups'].append(v3_group) - - # 'set service snmp v3 trap-target' - if conf.exists('v3 trap-target'): - for trap in conf.list_nodes('v3 trap-target'): - trap_cfg = { - 'ipAddr': trap, - 'secName': '', - 'authProtocol': 'md5', - 'authPassword': '', - 'authMasterKey': '', - 'privProtocol': 'des', - 'privPassword': '', - 'privMasterKey': '', - 'ipProto': 'udp', - 'ipPort': '162', - 'type': '', - 'secLevel': 'noAuthNoPriv' - } - - if conf.exists('v3 trap-target {0} user'.format(trap)): - # Set the securityName used for authenticated SNMPv3 messages. - trap_cfg['secName'] = conf.return_value('v3 trap-target {0} user'.format(trap)) - - if conf.exists('v3 trap-target {0} auth type'.format(trap)): - # Set the authentication protocol (MD5 or SHA) used for authenticated SNMPv3 messages - # cmdline option '-a' - trap_cfg['authProtocol'] = conf.return_value('v3 trap-target {0} auth type'.format(trap)) - - if conf.exists('v3 trap-target {0} auth plaintext-password'.format(trap)): - # Set the authentication pass phrase used for authenticated SNMPv3 messages. - # cmdline option '-A' - trap_cfg['authPassword'] = conf.return_value('v3 trap-target {0} auth plaintext-password'.format(trap)) - - if conf.exists('v3 trap-target {0} auth encrypted-password'.format(trap)): - # Sets the keys to be used for SNMPv3 transactions. These options allow you to set the master authentication keys. - # cmdline option '-3m' - trap_cfg['authMasterKey'] = conf.return_value('v3 trap-target {0} auth encrypted-password'.format(trap)) - - if conf.exists('v3 trap-target {0} privacy type'.format(trap)): - # Set the privacy protocol (DES or AES) used for encrypted SNMPv3 messages. - # cmdline option '-x' - trap_cfg['privProtocol'] = conf.return_value('v3 trap-target {0} privacy type'.format(trap)) - - if conf.exists('v3 trap-target {0} privacy plaintext-password'.format(trap)): - # Set the privacy pass phrase used for encrypted SNMPv3 messages. - # cmdline option '-X' - trap_cfg['privPassword'] = conf.return_value('v3 trap-target {0} privacy plaintext-password'.format(trap)) - - if conf.exists('v3 trap-target {0} privacy encrypted-password'.format(trap)): - # Sets the keys to be used for SNMPv3 transactions. These options allow you to set the master encryption keys. - # cmdline option '-3M' - trap_cfg['privMasterKey'] = conf.return_value('v3 trap-target {0} privacy encrypted-password'.format(trap)) - - if conf.exists('v3 trap-target {0} protocol'.format(trap)): - trap_cfg['ipProto'] = conf.return_value('v3 trap-target {0} protocol'.format(trap)) - - if conf.exists('v3 trap-target {0} port'.format(trap)): - trap_cfg['ipPort'] = conf.return_value('v3 trap-target {0} port'.format(trap)) - - if conf.exists('v3 trap-target {0} type'.format(trap)): - trap_cfg['type'] = conf.return_value('v3 trap-target {0} type'.format(trap)) - - # Determine securityLevel used for SNMPv3 messages (noAuthNoPriv|authNoPriv|authPriv). - # Appropriate pass phrase(s) must provided when using any level higher than noAuthNoPriv. - if trap_cfg['authPassword'] or trap_cfg['authMasterKey']: - if trap_cfg['privProtocol'] or trap_cfg['privPassword']: - trap_cfg['secLevel'] = 'authPriv' - else: - trap_cfg['secLevel'] = 'authNoPriv' - - snmp['v3_traps'].append(trap_cfg) - - # 'set service snmp v3 user' - if conf.exists('v3 user'): - for user in conf.list_nodes('v3 user'): - user_cfg = { - 'name': user, - 'authMasterKey': '', - 'authPassword': '', - 'authProtocol': 'md5', - 'authOID': 'none', - 'group': '', - 'mode': 'ro', - 'privMasterKey': '', - 'privPassword': '', - 'privOID': '', - 'privProtocol': 'des' - } - - # v3 user {0} auth - if conf.exists('v3 user {0} auth encrypted-password'.format(user)): - user_cfg['authMasterKey'] = conf.return_value('v3 user {0} auth encrypted-password'.format(user)) - - if conf.exists('v3 user {0} auth plaintext-password'.format(user)): - user_cfg['authPassword'] = conf.return_value('v3 user {0} auth plaintext-password'.format(user)) - - # load default value - type = user_cfg['authProtocol'] - if conf.exists('v3 user {0} auth type'.format(user)): - type = conf.return_value('v3 user {0} auth type'.format(user)) - - # (re-)update with either default value or value from CLI - user_cfg['authProtocol'] = type - user_cfg['authOID'] = OIDs[type] - - # v3 user {0} group - if conf.exists('v3 user {0} group'.format(user)): - user_cfg['group'] = conf.return_value('v3 user {0} group'.format(user)) - - # v3 user {0} mode - if conf.exists('v3 user {0} mode'.format(user)): - user_cfg['mode'] = conf.return_value('v3 user {0} mode'.format(user)) - - # v3 user {0} privacy - if conf.exists('v3 user {0} privacy encrypted-password'.format(user)): - user_cfg['privMasterKey'] = conf.return_value('v3 user {0} privacy encrypted-password'.format(user)) - - if conf.exists('v3 user {0} privacy plaintext-password'.format(user)): - user_cfg['privPassword'] = conf.return_value('v3 user {0} privacy plaintext-password'.format(user)) - - # load default value - type = user_cfg['privProtocol'] - if conf.exists('v3 user {0} privacy type'.format(user)): - type = conf.return_value('v3 user {0} privacy type'.format(user)) - - # (re-)update with either default value or value from CLI - user_cfg['privProtocol'] = type - user_cfg['privOID'] = OIDs[type] - - snmp['v3_users'].append(user_cfg) - - # 'set service snmp v3 view' - if conf.exists('v3 view'): - for view in conf.list_nodes('v3 view'): - view_cfg = { - 'name': view, - 'oids': [] - } - - if conf.exists('v3 view {0} oid'.format(view)): - for oid in conf.list_nodes('v3 view {0} oid'.format(view)): - oid_cfg = { - 'oid': oid - } - view_cfg['oids'].append(oid_cfg) - snmp['v3_views'].append(view_cfg) + if '127.0.0.1' not in snmp['listen_address']: + tmp = {'127.0.0.1': {'port': '161'}} + snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) + + if '::1' not in snmp['listen_address']: + if 'ipv6_disabled' not in snmp: + tmp = {'::1': {'port': '161'}} + snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) + + if 'community' in snmp: + default_values = defaults(base + ['community']) + for community in snmp['community']: + snmp['community'][community] = dict_merge( + default_values, snmp['community'][community]) + + if 'trap_target' in snmp: + default_values = defaults(base + ['trap-target']) + for trap in snmp['trap_target']: + snmp['trap_target'][trap] = dict_merge( + default_values, snmp['trap_target'][trap]) + + if 'v3' in snmp: + default_values = defaults(base + ['v3']) + # tagNodes need to be merged in individually later on + for tmp in ['user', 'group', 'trap_target']: + del default_values[tmp] + snmp['v3'] = dict_merge(default_values, snmp['v3']) + + for user_group in ['user', 'group']: + if user_group in snmp['v3']: + default_values = defaults(base + ['v3', user_group]) + for tmp in snmp['v3'][user_group]: + snmp['v3'][user_group][tmp] = dict_merge( + default_values, snmp['v3'][user_group][tmp]) + + if 'trap_target' in snmp['v3']: + default_values = defaults(base + ['v3', 'trap-target']) + for trap in snmp['v3']['trap_target']: + snmp['v3']['trap_target'][trap] = dict_merge( + default_values, snmp['v3']['trap_target'][trap]) return snmp def verify(snmp): - if snmp is None: - # we can not delete SNMP when LLDP is configured with SNMP - conf = Config() - if conf.exists('service lldp snmp enable'): - raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!') - + if not snmp: return None + if {'deleted', 'lldp_snmp'} <= set(snmp): + raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!') + ### check if the configured script actually exist - if snmp['script_ext']: - for ext in snmp['script_ext']: - if not os.path.isfile(ext['script']): - print ("WARNING: script: {} doesn't exist".format(ext['script'])) + if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: + for extension, extension_opt in snmp['script_extensions']['extension_name'].items(): + if 'script' not in extension_opt: + raise ConfigError(f'Script extension "{extension}" requires an actual script to be configured!') + + tmp = extension_opt['script'] + if not os.path.isfile(tmp): + print(f'WARNING: script "{tmp}" does not exist!') else: - chmod_755(ext['script']) - - for listen in snmp['listen_address']: - addr = listen[0] - port = listen[1] - - if is_ipv4(addr): - # example: udp:127.0.0.1:161 - listen = 'udp:' + addr + ':' + port - elif snmp['ipv6_enabled']: - # example: udp6:[::1]:161 - listen = 'udp6:' + '[' + addr + ']' + ':' + port - - # We only wan't to configure addresses that exist on the system. - # Hint the user if they don't exist - if is_addr_assigned(addr): - snmp['listen_on'].append(listen) - else: - print('WARNING: SNMP listen address {0} not configured!'.format(addr)) + chmod_755(extension_opt['script']) + + if 'listen_address' in snmp: + for address in snmp['listen_address']: + # We only wan't to configure addresses that exist on the system. + # Hint the user if they don't exist + if not is_addr_assigned(address): + print(f'WARNING: SNMP listen address "{address}" not configured!') + + if 'trap_target' in snmp: + for trap, trap_config in snmp['trap_target'].items(): + if 'community' not in trap_config: + raise ConfigError(f'Trap target "{trap}" requires a community to be set!') verify_vrf(snmp) # bail out early if SNMP v3 is not configured - if not snmp['v3_enabled']: + if 'v3' not in snmp: return None - if 'v3_groups' in snmp.keys(): - for group in snmp['v3_groups']: - # - # A view must exist prior to mapping it into a group - # - if 'view' in group.keys(): - error = True - if 'v3_views' in snmp.keys(): - for view in snmp['v3_views']: - if view['name'] == group['view']: - error = False - if error: - raise ConfigError('You must create view "{0}" first'.format(group['view'])) - else: - raise ConfigError('"view" must be specified') - - if not 'mode' in group.keys(): - raise ConfigError('"mode" must be specified') - - if not 'seclevel' in group.keys(): - raise ConfigError('"seclevel" must be specified') - - if 'v3_traps' in snmp.keys(): - for trap in snmp['v3_traps']: - if trap['authPassword'] and trap['authMasterKey']: - raise ConfigError('Must specify only one of encrypted-password/plaintext-key for trap auth') - - if trap['authPassword'] == '' and trap['authMasterKey'] == '': - raise ConfigError('Must specify encrypted-password or plaintext-key for trap auth') - - if trap['privPassword'] and trap['privMasterKey']: - raise ConfigError('Must specify only one of encrypted-password/plaintext-key for trap privacy') + if 'user' in snmp['v3']: + for user, user_config in snmp['v3']['user'].items(): + if 'group' not in user_config: + raise ConfigError(f'Group membership required for user "{user}"!') - if trap['privPassword'] == '' and trap['privMasterKey'] == '': - raise ConfigError('Must specify encrypted-password or plaintext-key for trap privacy') + if 'plaintext_password' not in user_config['auth'] and 'encrypted_password' not in user_config['auth']: + raise ConfigError(f'Must specify authentication encrypted-password or plaintext-password for user "{user}"!') - if not 'type' in trap.keys(): - raise ConfigError('v3 trap: "type" must be specified') + if 'plaintext_password' not in user_config['privacy'] and 'encrypted_password' not in user_config['privacy']: + raise ConfigError(f'Must specify privacy encrypted-password or plaintext-password for user "{user}"!') - if not 'authPassword' and 'authMasterKey' in trap.keys(): - raise ConfigError('v3 trap: "auth" must be specified') + if 'group' in snmp['v3']: + for group, group_config in snmp['v3']['group'].items(): + if 'seclevel' not in group_config: + raise ConfigError(f'Must configure "seclevel" for group "{group}"!') + if 'view' not in group_config: + raise ConfigError(f'Must configure "view" for group "{group}"!') - if not 'authProtocol' in trap.keys(): - raise ConfigError('v3 trap: "protocol" must be specified') + # Check if 'view' exists + view = group_config['view'] + if 'view' not in snmp['v3'] or view not in snmp['v3']['view']: + raise ConfigError(f'You must create view "{view}" first!') - if not 'privPassword' and 'privMasterKey' in trap.keys(): - raise ConfigError('v3 trap: "user" must be specified') + if 'view' in snmp['v3']: + for view, view_config in snmp['v3']['view'].items(): + if 'oid' not in view_config: + raise ConfigError(f'Must configure an "oid" for view "{view}"!') - if 'v3_users' in snmp.keys(): - for user in snmp['v3_users']: - # - # Group must exist prior to mapping it into a group - # seclevel will be extracted from group - # - if user['group']: - error = True - if 'v3_groups' in snmp.keys(): - for group in snmp['v3_groups']: - if group['name'] == user['group']: - seclevel = group['seclevel'] - error = False + if 'trap_target' in snmp['v3']: + for trap, trap_config in snmp['v3']['trap_target'].items(): + if 'plaintext_password' not in trap_config['auth'] and 'encrypted_password' not in trap_config['auth']: + raise ConfigError(f'Must specify one of authentication encrypted-password or plaintext-password for trap "{trap}"!') - if error: - raise ConfigError('You must create group "{0}" first'.format(user['group'])) + if {'plaintext_password', 'encrypted_password'} <= set(trap_config['auth']): + raise ConfigError(f'Can not specify both authentication encrypted-password and plaintext-password for trap "{trap}"!') - # Depending on the configured security level the user has to provide additional info - if (not user['authPassword'] and not user['authMasterKey']): - raise ConfigError('Must specify encrypted-password or plaintext-key for user auth') + if 'plaintext_password' not in trap_config['privacy'] and 'encrypted_password' not in trap_config['privacy']: + raise ConfigError(f'Must specify one of privacy encrypted-password or plaintext-password for trap "{trap}"!') - if user['privPassword'] == '' and user['privMasterKey'] == '': - raise ConfigError('Must specify encrypted-password or plaintext-key for user privacy') + if {'plaintext_password', 'encrypted_password'} <= set(trap_config['privacy']): + raise ConfigError(f'Can not specify both privacy encrypted-password and plaintext-password for trap "{trap}"!') - if user['mode'] == '': - raise ConfigError('Must specify user mode ro/rw') - - if 'v3_views' in snmp.keys(): - for view in snmp['v3_views']: - if not view['oids']: - raise ConfigError('Must configure an oid') + if 'type' not in trap_config: + raise ConfigError('SNMP v3 trap "type" must be specified!') return None def generate(snmp): + # # As we are manipulating the snmpd user database we have to stop it first! # This is even save if service is going to be removed - call('systemctl stop snmpd.service') - config_files = [config_file_client, config_file_daemon, config_file_access, - config_file_user, systemd_override] + call(f'systemctl stop {systemd_service}') + # Clean config files + config_files = [config_file_client, config_file_daemon, + config_file_access, config_file_user, systemd_override] for file in config_files: - rmfile(file) + if os.path.isfile(file): + os.unlink(file) if not snmp: return None - if 'v3_users' in snmp.keys(): + if 'v3' in snmp: # net-snmp is now regenerating the configuration file in the background # thus we need to re-open and re-read the file as the content changed. # After that we can no read the encrypted password from the config and # replace the CLI plaintext password with its encrypted version. - os.environ["vyos_libexec_dir"] = "/usr/libexec/vyos" + os.environ['vyos_libexec_dir'] = '/usr/libexec/vyos' - for user in snmp['v3_users']: - if user['authProtocol'] == 'sha': - hash = plaintext_to_sha1 - else: - hash = plaintext_to_md5 + if 'user' in snmp['v3']: + for user, user_config in snmp['v3']['user'].items(): + if dict_search('auth.type', user_config) == 'sha': + hash = plaintext_to_sha1 + else: + hash = plaintext_to_md5 + + if dict_search('auth.plaintext_password', user_config) is not None: + tmp = hash(dict_search('auth.plaintext_password', user_config), + dict_search('v3.engineid', snmp)) + + snmp['v3']['user'][user]['auth']['encrypted_password'] = tmp + del snmp['v3']['user'][user]['auth']['plaintext_password'] - if user['authPassword']: - user['authMasterKey'] = hash(user['authPassword'], snmp['v3_engineid']) - user['authPassword'] = '' + call(f'/opt/vyatta/sbin/my_set service snmp v3 user "{user}" auth encrypted-password "{tmp}" > /dev/null') + call(f'/opt/vyatta/sbin/my_delete service snmp v3 user "{user}" auth plaintext-password > /dev/null') - call('/opt/vyatta/sbin/my_set service snmp v3 user "{name}" auth encrypted-password "{authMasterKey}" > /dev/null'.format(**user)) - call('/opt/vyatta/sbin/my_delete service snmp v3 user "{name}" auth plaintext-password > /dev/null'.format(**user)) + if dict_search('privacy.plaintext_password', user_config) is not None: + tmp = hash(dict_search('privacy.plaintext_password', user_config), + dict_search('v3.engineid', snmp)) - if user['privPassword']: - user['privMasterKey'] = hash(user['privPassword'], snmp['v3_engineid']) - user['privPassword'] = '' + snmp['v3']['user'][user]['privacy']['encrypted_password'] = tmp + del snmp['v3']['user'][user]['privacy']['plaintext_password'] - call('/opt/vyatta/sbin/my_set service snmp v3 user "{name}" privacy encrypted-password "{privMasterKey}" > /dev/null'.format(**user)) - call('/opt/vyatta/sbin/my_delete service snmp v3 user "{name}" privacy plaintext-password > /dev/null'.format(**user)) + call(f'/opt/vyatta/sbin/my_set service snmp v3 user "{user}" privacy encrypted-password "{tmp}" > /dev/null') + call(f'/opt/vyatta/sbin/my_delete service snmp v3 user "{user}" privacy plaintext-password > /dev/null') # Write client config file render(config_file_client, 'snmp/etc.snmp.conf.tmpl', snmp) @@ -568,7 +293,7 @@ def apply(snmp): return None # start SNMP daemon - call('systemctl restart snmpd.service') + call(f'systemctl restart {systemd_service}') # Enable AgentX in FRR call('vtysh -c "configure terminal" -c "agentx" >/dev/null') diff --git a/src/conf_mode/system-login-banner.py b/src/conf_mode/system-login-banner.py index a960a4da3..a521c9834 100755 --- a/src/conf_mode/system-login-banner.py +++ b/src/conf_mode/system-login-banner.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020 VyOS maintainers and contributors +# Copyright (C) 2020-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -15,34 +15,33 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit +from copy import deepcopy + from vyos.config import Config +from vyos.util import write_file from vyos import ConfigError - from vyos import airbag airbag.enable() -motd=""" -The programs included with the Debian/VyOS GNU/Linux system are free software; -the exact distribution terms for each program are described in the -individual files in /usr/share/doc/*/copyright. - -Debian/VyOS GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent -permitted by applicable law. - -""" +try: + with open('/usr/share/vyos/default_motd') as f: + motd = f.read() +except: + # Use an empty banner if the default banner file cannot be read + motd = "\n" PRELOGIN_FILE = r'/etc/issue' PRELOGIN_NET_FILE = r'/etc/issue.net' POSTLOGIN_FILE = r'/etc/motd' default_config_data = { - 'issue': 'Welcome to VyOS - \\n \\l\n', - 'issue_net': 'Welcome to VyOS\n', + 'issue': 'Welcome to VyOS - \\n \\l\n\n', + 'issue_net': '', 'motd': motd } def get_config(config=None): - banner = default_config_data + banner = deepcopy(default_config_data) if config: conf = config else: @@ -91,14 +90,9 @@ def generate(banner): pass def apply(banner): - with open(PRELOGIN_FILE, 'w') as f: - f.write(banner['issue']) - - with open(PRELOGIN_NET_FILE, 'w') as f: - f.write(banner['issue_net']) - - with open(POSTLOGIN_FILE, 'w') as f: - f.write(banner['motd']) + write_file(PRELOGIN_FILE, banner['issue']) + write_file(PRELOGIN_NET_FILE, banner['issue_net']) + write_file(POSTLOGIN_FILE, banner['motd']) return None diff --git a/src/conf_mode/system-logs.py b/src/conf_mode/system-logs.py new file mode 100755 index 000000000..e6296656d --- /dev/null +++ b/src/conf_mode/system-logs.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from sys import exit + +from vyos import ConfigError +from vyos import airbag +from vyos.config import Config +from vyos.configdict import dict_merge +from vyos.logger import syslog +from vyos.template import render +from vyos.util import dict_search +from vyos.xml import defaults +airbag.enable() + +# path to logrotate configs +logrotate_atop_file = '/etc/logrotate.d/vyos-atop' +logrotate_rsyslog_file = '/etc/logrotate.d/vyos-rsyslog' + + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + base = ['system', 'logs'] + default_values = defaults(base) + logs_config = conf.get_config_dict(base, + key_mangling=('-', '_'), + get_first_key=True) + logs_config = dict_merge(default_values, logs_config) + + return logs_config + + +def verify(logs_config): + # Nothing to verify here + pass + + +def generate(logs_config): + # get configuration for logrotate atop + logrotate_atop = dict_search('logrotate.atop', logs_config) + # generate new config file for atop + syslog.debug('Adding logrotate config for atop') + render(logrotate_atop_file, 'logs/logrotate/vyos-atop.tmpl', logrotate_atop) + + # get configuration for logrotate rsyslog + logrotate_rsyslog = dict_search('logrotate.messages', logs_config) + # generate new config file for rsyslog + syslog.debug('Adding logrotate config for rsyslog') + render(logrotate_rsyslog_file, 'logs/logrotate/vyos-rsyslog.tmpl', + logrotate_rsyslog) + + +def apply(logs_config): + # No further actions needed + pass + + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system-option.py index 55cf6b142..b1c63e316 100755 --- a/src/conf_mode/system-option.py +++ b/src/conf_mode/system-option.py @@ -126,6 +126,12 @@ def apply(options): if 'keyboard_layout' in options: cmd('loadkeys {keyboard_layout}'.format(**options)) + # Enable/diable root-partition-auto-resize SystemD service + if 'root_partition_auto_resize' in options: + cmd('systemctl enable root-partition-auto-resize.service') + else: + cmd('systemctl disable root-partition-auto-resize.service') + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/system_console.py b/src/conf_mode/system_console.py index 33a546bd3..19b252513 100755 --- a/src/conf_mode/system_console.py +++ b/src/conf_mode/system_console.py @@ -18,9 +18,14 @@ import os import re from vyos.config import Config -from vyos.util import call, read_file, write_file +from vyos.configdict import dict_merge +from vyos.util import call +from vyos.util import read_file +from vyos.util import write_file from vyos.template import render -from vyos import ConfigError, airbag +from vyos.xml import defaults +from vyos import ConfigError +from vyos import airbag airbag.enable() by_bus_dir = '/dev/serial/by-bus' @@ -36,21 +41,27 @@ def get_config(config=None): console = conf.get_config_dict(base, get_first_key=True) # bail out early if no serial console is configured - if 'device' not in console.keys(): + if 'device' not in console: return console # convert CLI values to system values - for device in console['device'].keys(): - # no speed setting has been configured - use default value - if not 'speed' in console['device'][device].keys(): - tmp = { 'speed': '' } - if device.startswith('hvc'): - tmp['speed'] = 38400 - else: - tmp['speed'] = 115200 + default_values = defaults(base + ['device']) + for device, device_config in console['device'].items(): + if 'speed' not in device_config and device.startswith('hvc'): + # XEN console has a different default console speed + console['device'][device]['speed'] = 38400 + else: + # Merge in XML defaults - the proper way to do it + console['device'][device] = dict_merge(default_values, + console['device'][device]) + + return console - console['device'][device].update(tmp) +def verify(console): + if not console or 'device' not in console: + return None + for device in console['device']: if device.startswith('usb'): # It is much easiert to work with the native ttyUSBn name when using # getty, but that name may change across reboots - depending on the @@ -58,13 +69,13 @@ def get_config(config=None): # to its dynamic device file - and create a new dict entry for it. by_bus_device = f'{by_bus_dir}/{device}' if os.path.isdir(by_bus_dir) and os.path.exists(by_bus_device): - tmp = os.path.basename(os.readlink(by_bus_device)) - # updating the dict must come as last step in the loop! - console['device'][tmp] = console['device'].pop(device) + device = os.path.basename(os.readlink(by_bus_device)) - return console + # If the device name still starts with usbXXX no matching tty was found + # and it can not be used as a serial interface + if device.startswith('usb'): + raise ConfigError(f'Device {device} does not support beeing used as tty') -def verify(console): return None def generate(console): @@ -76,20 +87,29 @@ def generate(console): call(f'systemctl stop {basename}') os.unlink(os.path.join(root, basename)) - if not console: + if not console or 'device' not in console: return None - for device in console['device'].keys(): + for device, device_config in console['device'].items(): + if device.startswith('usb'): + # It is much easiert to work with the native ttyUSBn name when using + # getty, but that name may change across reboots - depending on the + # amount of connected devices. We will resolve the fixed device name + # to its dynamic device file - and create a new dict entry for it. + by_bus_device = f'{by_bus_dir}/{device}' + if os.path.isdir(by_bus_dir) and os.path.exists(by_bus_device): + device = os.path.basename(os.readlink(by_bus_device)) + config_file = base_dir + f'/serial-getty@{device}.service' getty_wants_symlink = base_dir + f'/getty.target.wants/serial-getty@{device}.service' - render(config_file, 'getty/serial-getty.service.tmpl', console['device'][device]) + render(config_file, 'getty/serial-getty.service.tmpl', device_config) os.symlink(config_file, getty_wants_symlink) # GRUB # For existing serial line change speed (if necessary) # Only applys to ttyS0 - if 'ttyS0' not in console['device'].keys(): + if 'ttyS0' not in console['device']: return None speed = console['device']['ttyS0']['speed'] @@ -98,7 +118,6 @@ def generate(console): return None lines = read_file(grub_config).split('\n') - p = re.compile(r'^(.* console=ttyS0),[0-9]+(.*)$') write = False newlines = [] @@ -122,9 +141,8 @@ def generate(console): return None def apply(console): - # reset screen blanking + # Reset screen blanking call('/usr/bin/setterm -blank 0 -powersave off -powerdown 0 -term linux </dev/tty1 >/dev/tty1 2>&1') - # Reload systemd manager configuration call('systemctl daemon-reload') @@ -136,11 +154,11 @@ def apply(console): call('/usr/bin/setterm -blank 15 -powersave powerdown -powerdown 60 -term linux </dev/tty1 >/dev/tty1 2>&1') # Start getty process on configured serial interfaces - for device in console['device'].keys(): + for device in console['device']: # Only start console if it exists on the running system. If a user # detaches a USB serial console and reboots - it should not fail! if os.path.exists(f'/dev/{device}'): - call(f'systemctl start serial-getty@{device}.service') + call(f'systemctl restart serial-getty@{device}.service') return None diff --git a/src/conf_mode/tftp_server.py b/src/conf_mode/tftp_server.py index 2409eec1f..ef726670c 100755 --- a/src/conf_mode/tftp_server.py +++ b/src/conf_mode/tftp_server.py @@ -24,6 +24,7 @@ from sys import exit from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configverify import verify_vrf from vyos.template import render from vyos.template import is_ipv4 from vyos.util import call @@ -65,10 +66,11 @@ def verify(tftpd): if 'listen_address' not in tftpd: raise ConfigError('TFTP server listen address must be configured!') - for address in tftpd['listen_address']: + for address, address_config in tftpd['listen_address'].items(): if not is_addr_assigned(address): print(f'WARNING: TFTP server listen address "{address}" not ' \ 'assigned to any interface!') + verify_vrf(address_config) return None @@ -83,7 +85,7 @@ def generate(tftpd): return None idx = 0 - for address in tftpd['listen_address']: + for address, address_config in tftpd['listen_address'].items(): config = deepcopy(tftpd) port = tftpd['port'] if is_ipv4(address): @@ -91,6 +93,9 @@ def generate(tftpd): else: config['listen_address'] = f'[{address}]:{port} -6' + if 'vrf' in address_config: + config['vrf'] = address_config['vrf'] + file = config_file + str(idx) render(file, 'tftp-server/default.tmpl', config) idx = idx + 1 diff --git a/src/conf_mode/vpn_l2tp.py b/src/conf_mode/vpn_l2tp.py index 9c52f77ca..818e8fa0b 100755 --- a/src/conf_mode/vpn_l2tp.py +++ b/src/conf_mode/vpn_l2tp.py @@ -290,6 +290,8 @@ def get_config(config=None): # LNS secret if conf.exists(['lns', 'shared-secret']): l2tp['lns_shared_secret'] = conf.return_value(['lns', 'shared-secret']) + if conf.exists(['lns', 'host-name']): + l2tp['lns_host_name'] = conf.return_value(['lns', 'host-name']) if conf.exists(['ccp-disable']): l2tp['ccp_disable'] = True diff --git a/src/conf_mode/vpn_openconnect.py b/src/conf_mode/vpn_openconnect.py index f6db196dc..51ea1f223 100755 --- a/src/conf_mode/vpn_openconnect.py +++ b/src/conf_mode/vpn_openconnect.py @@ -23,9 +23,11 @@ from vyos.pki import wrap_certificate from vyos.pki import wrap_private_key from vyos.template import render from vyos.util import call +from vyos.util import is_systemd_service_running from vyos.xml import defaults from vyos import ConfigError from crypt import crypt, mksalt, METHOD_SHA512 +from time import sleep from vyos import airbag airbag.enable() @@ -172,6 +174,16 @@ def apply(ocserv): os.unlink(file) else: call('systemctl restart ocserv.service') + counter = 0 + while True: + # exit early when service runs + if is_systemd_service_running("ocserv.service"): + break + sleep(0.250) + if counter > 5: + raise ConfigError('openconnect failed to start, check the logs for details') + break + counter += 1 if __name__ == '__main__': diff --git a/src/conf_mode/vpn_sstp.py b/src/conf_mode/vpn_sstp.py index d1a71a5ad..68980e5ab 100755 --- a/src/conf_mode/vpn_sstp.py +++ b/src/conf_mode/vpn_sstp.py @@ -26,6 +26,7 @@ from vyos.pki import wrap_private_key from vyos.template import render from vyos.util import call from vyos.util import dict_search +from vyos.util import write_file from vyos import ConfigError from vyos import airbag airbag.enable() @@ -34,6 +35,10 @@ cfg_dir = '/run/accel-pppd' sstp_conf = '/run/accel-pppd/sstp.conf' sstp_chap_secrets = '/run/accel-pppd/sstp.chap-secrets' +cert_file_path = os.path.join(cfg_dir, 'sstp-cert.pem') +cert_key_path = os.path.join(cfg_dir, 'sstp-cert.key') +ca_cert_file_path = os.path.join(cfg_dir, 'sstp-ca.pem') + def get_config(config=None): if config: conf = config @@ -58,7 +63,7 @@ def verify(sstp): verify_accel_ppp_base_service(sstp) - if not sstp['client_ip_pool']: + if 'client_ip_pool' not in sstp and 'client_ipv6_pool' not in sstp: raise ConfigError('Client IP subnet required') # @@ -72,22 +77,32 @@ def verify(sstp): ssl = sstp['ssl'] + # CA if 'ca_certificate' not in ssl: raise ConfigError('SSL CA certificate missing on SSTP config') + ca_name = ssl['ca_certificate'] + + if ca_name not in sstp['pki']['ca']: + raise ConfigError('Invalid CA certificate on SSTP config') + + if 'certificate' not in sstp['pki']['ca'][ca_name]: + raise ConfigError('Missing certificate data for CA certificate on SSTP config') + + # Certificate if 'certificate' not in ssl: raise ConfigError('SSL certificate missing on SSTP config') cert_name = ssl['certificate'] - if ssl['ca_certificate'] not in sstp['pki']['ca']: - raise ConfigError('Invalid CA certificate on SSTP config') - if cert_name not in sstp['pki']['certificate']: raise ConfigError('Invalid certificate on SSTP config') pki_cert = sstp['pki']['certificate'][cert_name] + if 'certificate' not in pki_cert: + raise ConfigError('Missing certificate data for certificate on SSTP config') + if 'private' not in pki_cert or 'key' not in pki_cert['private']: raise ConfigError('Missing private key for certificate on SSTP config') @@ -98,27 +113,18 @@ def generate(sstp): if not sstp: return None - cert_file_path = os.path.join(cfg_dir, 'sstp-cert.pem') - cert_key_path = os.path.join(cfg_dir, 'sstp-cert.key') - ca_cert_file_path = os.path.join(cfg_dir, 'sstp-ca.pem') + # accel-cmd reload doesn't work so any change results in a restart of the daemon + render(sstp_conf, 'accel-ppp/sstp.config.tmpl', sstp) cert_name = sstp['ssl']['certificate'] pki_cert = sstp['pki']['certificate'][cert_name] - with open(cert_file_path, 'w') as f: - f.write(wrap_certificate(pki_cert['certificate'])) - - with open(cert_key_path, 'w') as f: - f.write(wrap_private_key(pki_cert['private']['key'])) - ca_cert_name = sstp['ssl']['ca_certificate'] pki_ca = sstp['pki']['ca'][ca_cert_name] - with open(ca_cert_file_path, 'w') as f: - f.write(wrap_certificate(pki_ca['certificate'])) - - # accel-cmd reload doesn't work so any change results in a restart of the daemon - render(sstp_conf, 'accel-ppp/sstp.config.tmpl', sstp) + write_file(cert_file_path, wrap_certificate(pki_cert['certificate'])) + write_file(cert_key_path, wrap_private_key(pki_cert['private']['key'])) + write_file(ca_cert_file_path, wrap_certificate(pki_ca['certificate'])) if dict_search('authentication.mode', sstp) == 'local': render(sstp_chap_secrets, 'accel-ppp/chap-secrets.config_dict.tmpl', diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py index 919083ac4..38c0c4463 100755 --- a/src/conf_mode/vrf.py +++ b/src/conf_mode/vrf.py @@ -18,7 +18,6 @@ import os from sys import exit from json import loads -from tempfile import NamedTemporaryFile from vyos.config import Config from vyos.configdict import node_changed @@ -31,10 +30,12 @@ from vyos.util import get_interface_config from vyos.util import popen from vyos.util import run from vyos import ConfigError +from vyos import frr from vyos import airbag airbag.enable() -config_file = r'/etc/iproute2/rt_tables.d/vyos-vrf.conf' +config_file = '/etc/iproute2/rt_tables.d/vyos-vrf.conf' +nft_vrf_config = '/tmp/nftables-vrf-zones' def list_rules(): command = 'ip -j -4 rule show' @@ -128,8 +129,8 @@ def verify(vrf): def generate(vrf): render(config_file, 'vrf/vrf.conf.tmpl', vrf) # Render nftables zones config - vrf['nft_vrf_zones'] = NamedTemporaryFile().name - render(vrf['nft_vrf_zones'], 'firewall/nftables-vrf-zones.tmpl', vrf) + + render(nft_vrf_config, 'firewall/nftables-vrf-zones.tmpl', vrf) return None @@ -165,8 +166,9 @@ def apply(vrf): _, err = popen('nft list table inet vrf_zones') # If not, create a table if err: - cmd(f'nft -f {vrf["nft_vrf_zones"]}') - os.unlink(vrf['nft_vrf_zones']) + if os.path.exists(nft_vrf_config): + cmd(f'nft -f {nft_vrf_config}') + os.unlink(nft_vrf_config) for name, config in vrf['name'].items(): table = config['table'] diff --git a/src/conf_mode/vrf_vni.py b/src/conf_mode/vrf_vni.py index 87ee8f2d1..1a7bd1f09 100755 --- a/src/conf_mode/vrf_vni.py +++ b/src/conf_mode/vrf_vni.py @@ -32,37 +32,26 @@ def get_config(config=None): else: conf = Config() - # This script only works with a passed VRF name - if len(argv) < 1: - raise NotImplementedError - vrf = argv[1] + base = ['vrf'] + vrf = conf.get_config_dict(base, get_first_key=True) + return vrf - # "assemble" dict - easier here then use a full blown get_config_dict() - # on a single leafNode - vni = { 'vrf' : vrf } - tmp = conf.return_value(['vrf', 'name', vrf, 'vni']) - if tmp: vni.update({ 'vni' : tmp }) - - return vni - -def verify(vni): +def verify(vrf): return None -def generate(vni): - vni['new_frr_config'] = render_to_string('frr/vrf-vni.frr.tmpl', vni) +def generate(vrf): + vrf['new_frr_config'] = render_to_string('frr/vrf-vni.frr.tmpl', vrf) return None -def apply(vni): +def apply(vrf): # add configuration to FRR frr_cfg = frr.FRRConfig() frr_cfg.load_configuration(frr_daemon) - frr_cfg.modify_section(f'^vrf [a-zA-Z-]*$', '') - frr_cfg.add_before(r'(interface .*|line vty)', vni['new_frr_config']) + frr_cfg.modify_section(f'^vrf .+', stop_pattern='^exit-vrf', remove_stop_mark=True) + if 'new_frr_config' in vrf: + frr_cfg.add_before(frr.default_add_before, vrf['new_frr_config']) frr_cfg.commit_configuration(frr_daemon) - # Save configuration to /run/frr/config/frr.conf - frr.save_configuration() - return None if __name__ == '__main__': diff --git a/src/conf_mode/vrrp.py b/src/conf_mode/vrrp.py index e8f1c1f99..c72efc61f 100755 --- a/src/conf_mode/vrrp.py +++ b/src/conf_mode/vrrp.py @@ -28,6 +28,7 @@ from vyos.template import render from vyos.template import is_ipv4 from vyos.template import is_ipv6 from vyos.util import call +from vyos.util import is_systemd_service_running from vyos.xml import defaults from vyos import ConfigError from vyos import airbag @@ -139,7 +140,12 @@ def apply(vrrp): call(f'systemctl stop {service_name}') return None - call(f'systemctl restart {service_name}') + # XXX: T3944 - reload keepalived configuration if service is already running + # to not cause any service disruption when applying changes. + if is_systemd_service_running(service_name): + call(f'systemctl reload {service_name}') + else: + call(f'systemctl restart {service_name}') return None if __name__ == '__main__': diff --git a/src/conf_mode/zone_policy.py b/src/conf_mode/zone_policy.py new file mode 100755 index 000000000..2535ea33b --- /dev/null +++ b/src/conf_mode/zone_policy.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from json import loads +from sys import exit + +from vyos.config import Config +from vyos.template import render +from vyos.util import cmd +from vyos.util import dict_search_args +from vyos.util import run +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +nftables_conf = '/run/nftables_zone.conf' + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['zone-policy'] + zone_policy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + if zone_policy: + zone_policy['firewall'] = conf.get_config_dict(['firewall'], key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + return zone_policy + +def verify(zone_policy): + # bail out early - looks like removal from running config + if not zone_policy: + return None + + local_zone = False + interfaces = [] + + if 'zone' in zone_policy: + for zone, zone_conf in zone_policy['zone'].items(): + if 'local_zone' not in zone_conf and 'interface' not in zone_conf: + raise ConfigError(f'Zone "{zone}" has no interfaces and is not the local zone') + + if 'local_zone' in zone_conf: + if local_zone: + raise ConfigError('There cannot be multiple local zones') + if 'interface' in zone_conf: + raise ConfigError('Local zone cannot have interfaces assigned') + if 'intra_zone_filtering' in zone_conf: + raise ConfigError('Local zone cannot use intra-zone-filtering') + local_zone = True + + if 'interface' in zone_conf: + found_duplicates = [intf for intf in zone_conf['interface'] if intf in interfaces] + + if found_duplicates: + raise ConfigError(f'Interfaces cannot be assigned to multiple zones') + + interfaces += zone_conf['interface'] + + if 'intra_zone_filtering' in zone_conf: + intra_zone = zone_conf['intra_zone_filtering'] + + if len(intra_zone) > 1: + raise ConfigError('Only one intra-zone-filtering action must be specified') + + if 'firewall' in intra_zone: + v4_name = dict_search_args(intra_zone, 'firewall', 'name') + if v4_name and not dict_search_args(zone_policy, 'firewall', 'name', v4_name): + raise ConfigError(f'Firewall name "{v4_name}" does not exist') + + v6_name = dict_search_args(intra_zone, 'firewall', 'ipv6-name') + if v6_name and not dict_search_args(zone_policy, 'firewall', 'ipv6-name', v6_name): + raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist') + + if not v4_name and not v6_name: + raise ConfigError('No firewall names specified for intra-zone-filtering') + + if 'from' in zone_conf: + for from_zone, from_conf in zone_conf['from'].items(): + v4_name = dict_search_args(from_conf, 'firewall', 'name') + if v4_name: + if 'name' not in zone_policy['firewall']: + raise ConfigError(f'Firewall name "{v4_name}" does not exist') + + if not dict_search_args(zone_policy, 'firewall', 'name', v4_name): + raise ConfigError(f'Firewall name "{v4_name}" does not exist') + + v6_name = dict_search_args(from_conf, 'firewall', 'v6_name') + if v6_name: + if 'ipv6_name' not in zone_policy['firewall']: + raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist') + + if not dict_search_args(zone_policy, 'firewall', 'ipv6_name', v6_name): + raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist') + + return None + +def has_ipv4_fw(zone_conf): + if 'from' not in zone_conf: + return False + zone_from = zone_conf['from'] + return any([True for fz in zone_from if dict_search_args(zone_from, fz, 'firewall', 'name')]) + +def has_ipv6_fw(zone_conf): + if 'from' not in zone_conf: + return False + zone_from = zone_conf['from'] + return any([True for fz in zone_from if dict_search_args(zone_from, fz, 'firewall', 'ipv6_name')]) + +def get_local_from(zone_policy, local_zone_name): + # Get all zone firewall names from the local zone + out = {} + for zone, zone_conf in zone_policy['zone'].items(): + if zone == local_zone_name: + continue + if 'from' not in zone_conf: + continue + if local_zone_name in zone_conf['from']: + out[zone] = zone_conf['from'][local_zone_name] + return out + +def cleanup_commands(): + commands = [] + for table in ['ip filter', 'ip6 filter']: + json_str = cmd(f'nft -j list table {table}') + obj = loads(json_str) + if 'nftables' not in obj: + continue + for item in obj['nftables']: + if 'rule' in item: + chain = item['rule']['chain'] + handle = item['rule']['handle'] + if 'expr' not in item['rule']: + continue + for expr in item['rule']['expr']: + target = dict_search_args(expr, 'jump', 'target') + if target and target.startswith("VZONE"): + commands.append(f'delete rule {table} {chain} handle {handle}') + for item in obj['nftables']: + if 'chain' in item: + if item['chain']['name'].startswith("VZONE"): + chain = item['chain']['name'] + commands.append(f'delete chain {table} {chain}') + return commands + +def generate(zone_policy): + data = zone_policy or {} + + if os.path.exists(nftables_conf): # Check to see if we've run before + data['cleanup_commands'] = cleanup_commands() + + if 'zone' in data: + for zone, zone_conf in data['zone'].items(): + zone_conf['ipv4'] = has_ipv4_fw(zone_conf) + zone_conf['ipv6'] = has_ipv6_fw(zone_conf) + + if 'local_zone' in zone_conf: + zone_conf['from_local'] = get_local_from(data, zone) + + render(nftables_conf, 'zone_policy/nftables.tmpl', data) + return None + +def apply(zone_policy): + install_result = run(f'nft -f {nftables_conf}') + if install_result == 1: + raise ConfigError('Failed to apply zone-policy') + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/etc/cron.d/check-wwan b/src/etc/cron.d/check-wwan new file mode 100644 index 000000000..28190776f --- /dev/null +++ b/src/etc/cron.d/check-wwan @@ -0,0 +1 @@ +*/5 * * * * root /usr/libexec/vyos/vyos-check-wwan.py diff --git a/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient b/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient index f737148dc..ae6bf9f16 100644 --- a/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient +++ b/src/etc/dhcp/dhclient-enter-hooks.d/02-vyos-stopdhclient @@ -23,10 +23,12 @@ if [ -z ${CONTROLLED_STOP} ] ; then if ([ $dhclient -ne $current_dhclient ] && [ $dhclient -ne $master_dhclient ]); then # get path to PID-file of dhclient process local dhclient_pidfile=`ps --no-headers --format args --pid $dhclient | awk 'match(\$0, ".*-pf (/.*pid) .*", PF) { print PF[1] }'` + # get path to lease-file of dhclient process + local dhclient_leasefile=`ps --no-headers --format args --pid $dhclient | awk 'match(\$0, ".*-lf (/\\\S*leases) .*", LF) { print LF[1] }'` # stop dhclient with native command - this will run dhclient-script with correct reason unlike simple kill - logmsg info "Stopping dhclient with PID: ${dhclient}, PID file: $dhclient_pidfile" + logmsg info "Stopping dhclient with PID: ${dhclient}, PID file: ${dhclient_pidfile}, Leases file: ${dhclient_leasefile}" if [[ -e $dhclient_pidfile ]]; then - dhclient -e CONTROLLED_STOP=yes -x -pf $dhclient_pidfile + dhclient -e CONTROLLED_STOP=yes -x -pf $dhclient_pidfile -lf $dhclient_leasefile else logmsg error "PID file $dhclient_pidfile does not exists, killing dhclient with SIGTERM signal" kill -s 15 ${dhclient} diff --git a/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf b/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf index 24090e2a8..b1902b585 100644 --- a/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf +++ b/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf @@ -1,44 +1,48 @@ -# modified make_resolv_conf () for VyOS -make_resolv_conf() { - hostsd_client="/usr/bin/vyos-hostsd-client" - hostsd_changes= +# modified make_resolv_conf() for VyOS +# should be used only if vyos-hostsd is running - if [ -n "$new_domain_name" ]; then - logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client" - $hostsd_client --delete-search-domains --tag "dhcp-$interface" - logmsg info "Adding domain name \"$new_domain_name\" as search domain with tag \"dhcp-$interface\" via vyos-hostsd-client" - $hostsd_client --add-search-domains "$new_domain_name" --tag "dhcp-$interface" - hostsd_changes=y - fi +if /usr/bin/systemctl -q is-active vyos-hostsd; then + make_resolv_conf() { + hostsd_client="/usr/bin/vyos-hostsd-client" + hostsd_changes= - if [ -n "$new_dhcp6_domain_search" ]; then - logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client" - $hostsd_client --delete-search-domains --tag "dhcpv6-$interface" - logmsg info "Adding search domain \"$new_dhcp6_domain_search\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client" - $hostsd_client --add-search-domains "$new_dhcp6_domain_search" --tag "dhcpv6-$interface" - hostsd_changes=y - fi + if [ -n "$new_domain_name" ]; then + logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client" + $hostsd_client --delete-search-domains --tag "dhcp-$interface" + logmsg info "Adding domain name \"$new_domain_name\" as search domain with tag \"dhcp-$interface\" via vyos-hostsd-client" + $hostsd_client --add-search-domains "$new_domain_name" --tag "dhcp-$interface" + hostsd_changes=y + fi - if [ -n "$new_domain_name_servers" ]; then - logmsg info "Deleting nameservers with tag \"dhcp-$interface\" via vyos-hostsd-client" - $hostsd_client --delete-name-servers --tag "dhcp-$interface" - logmsg info "Adding nameservers \"$new_domain_name_servers\" with tag \"dhcp-$interface\" via vyos-hostsd-client" - $hostsd_client --add-name-servers $new_domain_name_servers --tag "dhcp-$interface" - hostsd_changes=y - fi + if [ -n "$new_dhcp6_domain_search" ]; then + logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client" + $hostsd_client --delete-search-domains --tag "dhcpv6-$interface" + logmsg info "Adding search domain \"$new_dhcp6_domain_search\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client" + $hostsd_client --add-search-domains "$new_dhcp6_domain_search" --tag "dhcpv6-$interface" + hostsd_changes=y + fi - if [ -n "$new_dhcp6_name_servers" ]; then - logmsg info "Deleting nameservers with tag \"dhcpv6-$interface\" via vyos-hostsd-client" - $hostsd_client --delete-name-servers --tag "dhcpv6-$interface" - logmsg info "Adding nameservers \"$new_dhcpv6_name_servers\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client" - $hostsd_client --add-name-servers $new_dhcpv6_name_servers --tag "dhcpv6-$interface" - hostsd_changes=y - fi + if [ -n "$new_domain_name_servers" ]; then + logmsg info "Deleting nameservers with tag \"dhcp-$interface\" via vyos-hostsd-client" + $hostsd_client --delete-name-servers --tag "dhcp-$interface" + logmsg info "Adding nameservers \"$new_domain_name_servers\" with tag \"dhcp-$interface\" via vyos-hostsd-client" + $hostsd_client --add-name-servers $new_domain_name_servers --tag "dhcp-$interface" + hostsd_changes=y + fi - if [ $hostsd_changes ]; then - logmsg info "Applying changes via vyos-hostsd-client" - $hostsd_client --apply - else - logmsg info "No changes to apply via vyos-hostsd-client" - fi -} + if [ -n "$new_dhcp6_name_servers" ]; then + logmsg info "Deleting nameservers with tag \"dhcpv6-$interface\" via vyos-hostsd-client" + $hostsd_client --delete-name-servers --tag "dhcpv6-$interface" + logmsg info "Adding nameservers \"$new_dhcpv6_name_servers\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client" + $hostsd_client --add-name-servers $new_dhcpv6_name_servers --tag "dhcpv6-$interface" + hostsd_changes=y + fi + + if [ $hostsd_changes ]; then + logmsg info "Applying changes via vyos-hostsd-client" + $hostsd_client --apply + else + logmsg info "No changes to apply via vyos-hostsd-client" + fi + } +fi diff --git a/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup b/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup index 694d53b6b..ad6a1d5eb 100644 --- a/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup +++ b/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup @@ -4,21 +4,32 @@ # NOTE: here we use 'ip' wrapper, therefore a route will be actually deleted via /usr/sbin/ip or vtysh, according to the system state hostsd_client="/usr/bin/vyos-hostsd-client" hostsd_changes= +# check vyos-hostsd status +/usr/bin/systemctl -q is-active vyos-hostsd +hostsd_status=$? if [[ $reason =~ (EXPIRE|FAIL|RELEASE|STOP) ]]; then - # delete search domains and nameservers via vyos-hostsd - logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client" - $hostsd_client --delete-search-domains --tag "dhcp-$interface" - logmsg info "Deleting nameservers with tag \"dhcp-${interface}\" via vyos-hostsd-client" - $hostsd_client --delete-name-servers --tag "dhcp-${interface}" - hostsd_changes=y + if [[ $hostsd_status -eq 0 ]]; then + # delete search domains and nameservers via vyos-hostsd + logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client" + $hostsd_client --delete-search-domains --tag "dhcp-$interface" + logmsg info "Deleting nameservers with tag \"dhcp-${interface}\" via vyos-hostsd-client" + $hostsd_client --delete-name-servers --tag "dhcp-${interface}" + hostsd_changes=y + fi if_metric="$IF_METRIC" - # try to delete default ip route + # try to delete default ip route for router in $old_routers; do - logmsg info "Deleting default route: via $router dev ${interface} ${if_metric:+metric $if_metric}" - ip -4 route del default via $router dev ${interface} ${if_metric:+metric $if_metric} + # check if we are bound to a VRF + local vrf_name=$(basename /sys/class/net/${interface}/upper_* | sed -e 's/upper_//') + if [ "$vrf_name" != "*" ]; then + vrf="vrf $vrf_name" + fi + + logmsg info "Deleting default route: via $router dev ${interface} ${if_metric:+metric $if_metric} ${vrf}" + ip -4 route del default via $router dev ${interface} ${if_metric:+metric $if_metric} ${vrf} if_metric=$((if_metric+1)) done @@ -86,12 +97,14 @@ if [[ $reason =~ (EXPIRE|FAIL|RELEASE|STOP) ]]; then fi if [[ $reason =~ (EXPIRE6|RELEASE6|STOP6) ]]; then - # delete search domains and nameservers via vyos-hostsd - logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client" - $hostsd_client --delete-search-domains --tag "dhcpv6-$interface" - logmsg info "Deleting nameservers with tag \"dhcpv6-${interface}\" via vyos-hostsd-client" - $hostsd_client --delete-name-servers --tag "dhcpv6-${interface}" - hostsd_changes=y + if [[ $hostsd_status -eq 0 ]]; then + # delete search domains and nameservers via vyos-hostsd + logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client" + $hostsd_client --delete-search-domains --tag "dhcpv6-$interface" + logmsg info "Deleting nameservers with tag \"dhcpv6-${interface}\" via vyos-hostsd-client" + $hostsd_client --delete-name-servers --tag "dhcpv6-${interface}" + hostsd_changes=y + fi fi if [ $hostsd_changes ]; then diff --git a/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook b/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook index a7a9a2ce6..61a89e62a 100755 --- a/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook +++ b/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook @@ -35,19 +35,14 @@ fi python3 - <<PYEND import os import re + from vyos.util import call from vyos.util import cmd +from vyos.util import read_file +from vyos.util import write_file SWANCTL_CONF="/etc/swanctl/swanctl.conf" -def getlines(file): - with open(file, 'r') as f: - return f.readlines() - -def writelines(file, lines): - with open(file, 'w') as f: - f.writelines(lines) - def ipsec_down(ip_address): # This prevents the need to restart ipsec and kill all active connections, only the stale connection is closed status = cmd('sudo ipsec statusall') @@ -66,23 +61,26 @@ if __name__ == '__main__': new_ip = os.getenv('new_ip_address') old_ip = os.getenv('old_ip_address') - conf_lines = getlines(SWANCTL_CONF) - found = False - to_match = f'# dhcp:{interface}' + if os.path.exists(SWANCTL_CONF): + conf_lines = read_file(SWANCTL_CONF) + found = False + to_match = f'# dhcp:{interface}' + + for i, line in enumerate(conf_lines): + if line.find(to_match) > 0: + conf_lines[i] = line.replace(old_ip, new_ip) + found = True - for i, line in enumerate(conf_lines): - if line.find(to_match) > 0: - conf_lines[i] = line.replace(old_ip, new_ip) - found = True + for i, line in enumerate(secrets_lines): + if line.find(to_match) > 0: + secrets_lines[i] = line.replace(old_ip, new_ip) - for i, line in enumerate(secrets_lines): - if line.find(to_match) > 0: - secrets_lines[i] = line.replace(old_ip, new_ip) + if found: + write_file(SWANCTL_CONF, conf_lines) + ipsec_down(old_ip) + call('sudo ipsec rereadall') + call('sudo ipsec reload') + call('sudo swanctl -q') - if found: - writelines(SWANCTL_CONF, conf_lines) - ipsec_down(old_ip) - call('sudo ipsec rereadall') - call('sudo ipsec reload') - call('sudo swanctl -q') + exit(0) PYEND
\ No newline at end of file diff --git a/src/etc/logrotate.d/vyos-atop b/src/etc/logrotate.d/vyos-atop new file mode 100644 index 000000000..0c8359c7b --- /dev/null +++ b/src/etc/logrotate.d/vyos-atop @@ -0,0 +1,20 @@ +/var/log/atop/atop.log { + daily + dateext + dateformat _%Y-%m-%d_%H-%M-%S + maxsize 10M + missingok + nocompress + nocreate + nomail + rotate 10 + prerotate + # stop the service + systemctl stop atop.service + endscript + postrotate + # start atop service again + systemctl start atop.service + endscript +} + diff --git a/src/etc/systemd/system/atop.service.d/10-override.conf b/src/etc/systemd/system/atop.service.d/10-override.conf new file mode 100644 index 000000000..10df15862 --- /dev/null +++ b/src/etc/systemd/system/atop.service.d/10-override.conf @@ -0,0 +1,6 @@ +[Service] +ExecStartPre= +ExecStart= +ExecStart=/bin/sh -c 'exec /usr/bin/atop ${LOGOPTS} -w "${LOGPATH}/atop.log" ${LOGINTERVAL}' +ExecStartPost= + diff --git a/src/etc/systemd/system/avahi-daemon.service.d/override.conf b/src/etc/systemd/system/avahi-daemon.service.d/override.conf new file mode 100644 index 000000000..a9d2085f7 --- /dev/null +++ b/src/etc/systemd/system/avahi-daemon.service.d/override.conf @@ -0,0 +1,8 @@ +[Unit] +After= +After=vyos-router.service +ConditionPathExists=/run/avahi-daemon/avahi-daemon.conf + +[Service] +ExecStart= +ExecStart=/usr/sbin/avahi-daemon --syslog --file /run/avahi-daemon/avahi-daemon.conf
\ No newline at end of file diff --git a/src/etc/systemd/system/openvpn@.service.d/override.conf b/src/etc/systemd/system/openvpn@.service.d/10-override.conf index 03fe6b587..775a2d7ba 100644 --- a/src/etc/systemd/system/openvpn@.service.d/override.conf +++ b/src/etc/systemd/system/openvpn@.service.d/10-override.conf @@ -7,6 +7,7 @@ WorkingDirectory= WorkingDirectory=/run/openvpn ExecStart= ExecStart=/usr/sbin/openvpn --daemon openvpn-%i --config %i.conf --status %i.status 30 --writepid %i.pid +ExecReload=/bin/kill -HUP $MAINPID User=openvpn Group=openvpn AmbientCapabilities=CAP_IPC_LOCK CAP_NET_ADMIN CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_SETGID CAP_SETUID CAP_SYS_CHROOT CAP_DAC_OVERRIDE CAP_AUDIT_WRITE diff --git a/src/etc/systemd/system/uacctd.service.d/override.conf b/src/etc/systemd/system/uacctd.service.d/override.conf new file mode 100644 index 000000000..38bcce515 --- /dev/null +++ b/src/etc/systemd/system/uacctd.service.d/override.conf @@ -0,0 +1,14 @@ +[Unit] +After= +After=vyos-router.service +ConditionPathExists= +ConditionPathExists=/run/pmacct/uacctd.conf + +[Service] +EnvironmentFile= +ExecStart= +ExecStart=/usr/sbin/uacctd -f /run/pmacct/uacctd.conf +WorkingDirectory= +WorkingDirectory=/run/pmacct +PIDFile= +PIDFile=/run/pmacct/uacctd.pid diff --git a/src/etc/udev/rules.d/62-temporary-interface-rename.rules b/src/etc/udev/rules.d/62-temporary-interface-rename.rules new file mode 100644 index 000000000..4a579dcab --- /dev/null +++ b/src/etc/udev/rules.d/62-temporary-interface-rename.rules @@ -0,0 +1 @@ +SUBSYSTEM=="net", ACTION=="add", KERNEL=="eth*", DRIVERS=="?*", NAME="e$env{IFINDEX}" diff --git a/src/etc/udev/rules.d/65-vyatta-net.rules b/src/etc/udev/rules.d/65-vyatta-net.rules deleted file mode 100644 index 2b48c1213..000000000 --- a/src/etc/udev/rules.d/65-vyatta-net.rules +++ /dev/null @@ -1,26 +0,0 @@ -# These rules use vyatta_net_name to persistently name network interfaces -# per "hwid" association in the Vyatta configuration file. - -ACTION!="add", GOTO="vyatta_net_end" -SUBSYSTEM!="net", GOTO="vyatta_net_end" - -# ignore the interface if a name has already been set -NAME=="?*", GOTO="vyatta_net_end" - -# Do name change for ethernet and wireless devices only -KERNEL!="eth*|wlan*", GOTO="vyatta_net_end" - -# ignore "secondary" monitor interfaces of mac80211 drivers -KERNEL=="wlan*", ATTRS{type}=="803", GOTO="vyatta_net_end" - -# If using VyOS predefined names -ENV{VYOS_IFNAME}!="eth*", GOTO="end_vyos_predef_names" - -DRIVERS=="?*", PROGRAM="vyatta_net_name %k $attr{address} $env{VYOS_IFNAME}", NAME="%c", GOTO="vyatta_net_end" - -LABEL="end_vyos_predef_names" - -# ignore interfaces without a driver link like bridges and VLANs -DRIVERS=="?*", PROGRAM="vyatta_net_name %k $attr{address}", NAME="%c" - -LABEL="vyatta_net_end" diff --git a/src/etc/udev/rules.d/65-vyos-net.rules b/src/etc/udev/rules.d/65-vyos-net.rules new file mode 100644 index 000000000..32ae352de --- /dev/null +++ b/src/etc/udev/rules.d/65-vyos-net.rules @@ -0,0 +1,23 @@ +# These rules use vyos_net_name to persistently name network interfaces +# per "hwid" association in the VyOS configuration file. + +ACTION!="add", GOTO="vyos_net_end" +SUBSYSTEM!="net", GOTO="vyos_net_end" + +# Do name change for ethernet and wireless devices only +KERNEL!="eth*|wlan*|e*", GOTO="vyos_net_end" + +# ignore "secondary" monitor interfaces of mac80211 drivers +KERNEL=="wlan*", ATTRS{type}=="803", GOTO="vyos_net_end" + +# If using VyOS predefined names +ENV{VYOS_IFNAME}!="eth*", GOTO="end_vyos_predef_names" + +DRIVERS=="?*", PROGRAM="vyos_net_name %k $attr{address} $env{VYOS_IFNAME}", NAME="%c", GOTO="vyos_net_end" + +LABEL="end_vyos_predef_names" + +# ignore interfaces without a driver link like bridges and VLANs +DRIVERS=="?*", PROGRAM="vyos_net_name %k $attr{address}", NAME="%c" + +LABEL="vyos_net_end" diff --git a/src/etc/udev/rules.d/90-vyos-serial.rules b/src/etc/udev/rules.d/90-vyos-serial.rules index 872fd4fea..30c1d3170 100644 --- a/src/etc/udev/rules.d/90-vyos-serial.rules +++ b/src/etc/udev/rules.d/90-vyos-serial.rules @@ -22,7 +22,7 @@ IMPORT{builtin}="path_id", IMPORT{builtin}="usb_id" # (tr -d -) does the replacement # - Replace the first group after ":" to represent the bus relation (sed -e 0,/:/s//b/) indicated by "b" # - Replace the next group after ":" to represent the port relation (sed -e 0,/:/s//p/) indicated by "p" -ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="", PROGRAM="/bin/sh -c 'echo $env{ID_PATH:17} | tr -d - | sed -e 0,/:/s//b/ | sed -e 0,/:/s//p/'", SYMLINK+="serial/by-bus/$result" -ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="?*", PROGRAM="/bin/sh -c 'echo $env{ID_PATH:17} | tr -d - | sed -e 0,/:/s//b/ | sed -e 0,/:/s//p/'", SYMLINK+="serial/by-bus/$result" +ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="", PROGRAM="/bin/sh -c 'echo $env{ID_PATH} | cut -d- -f3- | tr -d - | sed -e 0,/:/s//b/ | sed -e 0,/:/s//p/'", SYMLINK+="serial/by-bus/$result" +ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="?*", PROGRAM="/bin/sh -c 'echo $env{ID_PATH} | cut -d- -f3- | tr -d - | sed -e 0,/:/s//b/ | sed -e 0,/:/s//p/'", SYMLINK+="serial/by-bus/$result" LABEL="serial_end" diff --git a/src/helpers/strip-private.py b/src/helpers/strip-private.py index c74a379aa..e4e1fe11d 100755 --- a/src/helpers/strip-private.py +++ b/src/helpers/strip-private.py @@ -106,6 +106,7 @@ if __name__ == "__main__": stripping_rules = [ # Strip passwords (True, re.compile(r'password \S+'), 'password xxxxxx'), + (True, re.compile(r'cisco-authentication \S+'), 'cisco-authentication xxxxxx'), # Strip public key information (True, re.compile(r'public-keys \S+'), 'public-keys xxxx@xxx.xxx'), (True, re.compile(r'type \'ssh-(rsa|dss)\''), 'type ssh-xxx'), diff --git a/src/helpers/vyos-boot-config-loader.py b/src/helpers/vyos-boot-config-loader.py index c5bf22f10..b9cc87bfa 100755 --- a/src/helpers/vyos-boot-config-loader.py +++ b/src/helpers/vyos-boot-config-loader.py @@ -23,12 +23,12 @@ import grp import traceback from datetime import datetime -from vyos.defaults import directories +from vyos.defaults import directories, config_status from vyos.configsession import ConfigSession, ConfigSessionError from vyos.configtree import ConfigTree from vyos.util import cmd -STATUS_FILE = '/tmp/vyos-config-status' +STATUS_FILE = config_status TRACE_FILE = '/tmp/boot-config-trace' CFG_GROUP = 'vyattacfg' diff --git a/src/helpers/vyos-check-wwan.py b/src/helpers/vyos-check-wwan.py new file mode 100755 index 000000000..2ff9a574f --- /dev/null +++ b/src/helpers/vyos-check-wwan.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from vyos.configquery import VbashOpRun +from vyos.configquery import ConfigTreeQuery + +from vyos.util import is_wwan_connected + +conf = ConfigTreeQuery() +dict = conf.get_config_dict(['interfaces', 'wwan'], key_mangling=('-', '_'), + get_first_key=True) + +for interface, interface_config in dict.items(): + if not is_wwan_connected(interface): + if 'disable' in interface_config: + # do not restart this interface as it's disabled by the user + continue + + op = VbashOpRun() + op.run(['connect', 'interface', interface]) + +exit(0) diff --git a/src/helpers/vyos-interface-rescan.py b/src/helpers/vyos-interface-rescan.py new file mode 100755 index 000000000..1ac1810e0 --- /dev/null +++ b/src/helpers/vyos-interface-rescan.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# + +import os +import stat +import argparse +import logging +import netaddr + +from vyos.configtree import ConfigTree +from vyos.defaults import directories +from vyos.util import get_cfg_group_id + +debug = False + +vyos_udev_dir = directories['vyos_udev_dir'] +vyos_log_dir = directories['log'] +log_file = os.path.splitext(os.path.basename(__file__))[0] +vyos_log_file = os.path.join(vyos_log_dir, log_file) + +logger = logging.getLogger(__name__) +handler = logging.FileHandler(vyos_log_file, mode='a') +formatter = logging.Formatter('%(levelname)s: %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) + +passlist = { + '02:07:01' : 'Interlan', + '02:60:60' : '3Com', + '02:60:8c' : '3Com', + '02:a0:c9' : 'Intel', + '02:aa:3c' : 'Olivetti', + '02:cf:1f' : 'CMC', + '02:e0:3b' : 'Prominet', + '02:e6:d3' : 'BTI', + '52:54:00' : 'Realtek', + '52:54:4c' : 'Novell 2000', + '52:54:ab' : 'Realtec', + 'e2:0c:0f' : 'Kingston Technologies' +} + +def is_multicast(addr: netaddr.eui.EUI) -> bool: + return bool(addr.words[0] & 0b1) + +def is_locally_administered(addr: netaddr.eui.EUI) -> bool: + return bool(addr.words[0] & 0b10) + +def is_on_passlist(hwid: str) -> bool: + top = hwid.rsplit(':', 3)[0] + if top in list(passlist): + return True + return False + +def is_persistent(hwid: str) -> bool: + addr = netaddr.EUI(hwid) + if is_multicast(addr): + return False + if is_locally_administered(addr) and not is_on_passlist(hwid): + return False + return True + +def get_wireless_physical_device(intf: str) -> str: + if 'wlan' not in intf: + return '' + try: + tmp = os.readlink(f'/sys/class/net/{intf}/phy80211') + except OSError: + logger.critical(f"Failed to read '/sys/class/net/{intf}/phy80211'") + return '' + phy = os.path.basename(tmp) + logger.info(f"wireless phy is {phy}") + return phy + +def get_interface_type(intf: str) -> str: + if 'eth' in intf: + intf_type = 'ethernet' + elif 'wlan' in intf: + intf_type = 'wireless' + else: + logger.critical('Unrecognized interface type!') + intf_type = '' + return intf_type + +def get_new_interfaces() -> dict: + """ Read any new interface data left in /run/udev/vyos by vyos_net_name + """ + interfaces = {} + + for intf in os.listdir(vyos_udev_dir): + path = os.path.join(vyos_udev_dir, intf) + try: + with open(path) as f: + hwid = f.read().rstrip() + except OSError as e: + logger.error(f"OSError {e}") + continue + interfaces[intf] = hwid + + # reverse sort to simplify insertion in config + interfaces = {key: value for key, value in sorted(interfaces.items(), + reverse=True)} + return interfaces + +def filter_interfaces(intfs: dict) -> dict: + """ Ignore no longer existing interfaces or non-persistent mac addresses + """ + filtered = {} + + for intf, hwid in intfs.items(): + if not os.path.isdir(os.path.join('/sys/class/net', intf)): + continue + if not is_persistent(hwid): + continue + filtered[intf] = hwid + + return filtered + +def interface_rescan(config_path: str): + """ Read new data and update config file + """ + interfaces = get_new_interfaces() + + logger.debug(f"interfaces from udev: {interfaces}") + + interfaces = filter_interfaces(interfaces) + + logger.debug(f"filtered interfaces: {interfaces}") + + try: + with open(config_path) as f: + config_file = f.read() + except OSError as e: + logger.critical(f"OSError {e}") + exit(1) + + config = ConfigTree(config_file) + + for intf, hwid in interfaces.items(): + logger.info(f"Writing '{intf}' '{hwid}' to config file") + intf_type = get_interface_type(intf) + if not intf_type: + continue + if not config.exists(['interfaces', intf_type]): + config.set(['interfaces', intf_type]) + config.set_tag(['interfaces', intf_type]) + config.set(['interfaces', intf_type, intf, 'hw-id'], value=hwid) + + if intf_type == 'wireless': + phy = get_wireless_physical_device(intf) + if not phy: + continue + config.set(['interfaces', intf_type, intf, 'physical-device'], + value=phy) + + try: + with open(config_path, 'w') as f: + f.write(config.to_string()) + except OSError as e: + logger.critical(f"OSError {e}") + +def main(): + global debug + + argparser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter) + argparser.add_argument('configfile', type=str) + argparser.add_argument('--debug', action='store_true') + args = argparser.parse_args() + + if args.debug: + debug = True + logger.setLevel(logging.DEBUG) + else: + logger.setLevel(logging.INFO) + + configfile = args.configfile + + # preserve vyattacfg group write access to running config + os.setgid(get_cfg_group_id()) + os.umask(0o002) + + # log file perms are not automatic; this could be cleaner by moving to a + # logging config file + os.chown(vyos_log_file, 0, get_cfg_group_id()) + os.chmod(vyos_log_file, + stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH) + + interface_rescan(configfile) + +if __name__ == '__main__': + main() diff --git a/src/helpers/vyos_net_name b/src/helpers/vyos_net_name new file mode 100755 index 000000000..afeef8f2d --- /dev/null +++ b/src/helpers/vyos_net_name @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# + +import os +import re +import time +import logging +import threading +from sys import argv + +from vyos.configtree import ConfigTree +from vyos.defaults import directories +from vyos.util import cmd, boot_configuration_complete + +vyos_udev_dir = directories['vyos_udev_dir'] +vyos_log_dir = '/run/udev/log' +vyos_log_file = os.path.join(vyos_log_dir, 'vyos-net-name') + +config_path = '/opt/vyatta/etc/config/config.boot' + +lock = threading.Lock() + +try: + os.mkdir(vyos_log_dir) +except FileExistsError: + pass + +logging.basicConfig(filename=vyos_log_file, level=logging.DEBUG) + +def is_available(intfs: dict, intf_name: str) -> bool: + """ Check if interface name is already assigned + """ + if intf_name in list(intfs.values()): + return False + return True + +def find_available(intfs: dict, prefix: str) -> str: + """ Find lowest indexed iterface name that is not assigned + """ + index_list = [int(x.replace(prefix, '')) for x in list(intfs.values()) if prefix in x] + index_list.sort() + # find 'holes' in list, if any + missing = sorted(set(range(index_list[0], index_list[-1])) - set(index_list)) + if missing: + return f'{prefix}{missing[0]}' + + return f'{prefix}{len(index_list)}' + +def mod_ifname(ifname: str) -> str: + """ Check interface with names eX and return ifname on the next format eth{ifindex} - 2 + """ + if re.match("^e[0-9]+$", ifname): + intf = ifname.split("e") + if intf[1]: + if int(intf[1]) >= 2: + return "eth" + str(int(intf[1]) - 2) + else: + return "eth" + str(intf[1]) + + return ifname + +def get_biosdevname(ifname: str) -> str: + """ Use legacy vyatta-biosdevname to query for name + + This is carried over for compatability only, and will likely be dropped + going forward. + XXX: This throws an error, and likely has for a long time, unnoticed + since vyatta_net_name redirected stderr to /dev/null. + """ + intf = mod_ifname(ifname) + + if 'eth' not in intf: + return intf + if os.path.isdir('/proc/xen'): + return intf + + time.sleep(1) + + try: + biosname = cmd(f'/sbin/biosdevname --policy all_ethN -i {ifname}') + except Exception as e: + logging.error(f'biosdevname error: {e}') + biosname = '' + + return intf if biosname == '' else biosname + +def leave_rescan_hint(intf_name: str, hwid: str): + """Write interface information reported by udev + + This script is called while the root mount is still read-only. Leave + information in /run/udev: file name, the interface; contents, the + hardware id. + """ + try: + os.mkdir(vyos_udev_dir) + except FileExistsError: + pass + except Exception as e: + logging.critical(f"Error creating rescan hint directory: {e}") + exit(1) + + try: + with open(os.path.join(vyos_udev_dir, intf_name), 'w') as f: + f.write(hwid) + except OSError as e: + logging.critical(f"OSError {e}") + +def get_configfile_interfaces() -> dict: + """Read existing interfaces from config file + """ + interfaces: dict = {} + + if not os.path.isfile(config_path): + # If the case, then we are running off of livecd; return empty + return interfaces + + try: + with open(config_path) as f: + config_file = f.read() + except OSError as e: + logging.critical(f"OSError {e}") + exit(1) + + try: + config = ConfigTree(config_file) + except Exception: + logging.debug(f"updating component version string syntax") + try: + # this will update the component version string in place, for + # updates 1.2 --> 1.3/1.4 + os.system(f'/usr/libexec/vyos/run-config-migration.py {config_path} --virtual --set-vintage=vyos') + with open(config_path) as f: + config_file = f.read() + config = ConfigTree(config_file) + except Exception as e: + logging.critical(f"ConfigTree error: {e}") + + base = ['interfaces', 'ethernet'] + if config.exists(base): + eth_intfs = config.list_nodes(base) + for intf in eth_intfs: + path = base + [intf, 'hw-id'] + if not config.exists(path): + logging.warning(f"no 'hw-id' entry for {intf}") + continue + hwid = config.return_value(path) + if hwid in list(interfaces): + logging.warning(f"multiple entries for {hwid}: {interfaces[hwid]}, {intf}") + continue + interfaces[hwid] = intf + + base = ['interfaces', 'wireless'] + if config.exists(base): + wlan_intfs = config.list_nodes(base) + for intf in wlan_intfs: + path = base + [intf, 'hw-id'] + if not config.exists(path): + logging.warning(f"no 'hw-id' entry for {intf}") + continue + hwid = config.return_value(path) + if hwid in list(interfaces): + logging.warning(f"multiple entries for {hwid}: {interfaces[hwid]}, {intf}") + continue + interfaces[hwid] = intf + + logging.debug(f"config file entries: {interfaces}") + + return interfaces + +def add_assigned_interfaces(intfs: dict): + """Add interfaces found by previous invocation of udev rule + """ + if not os.path.isdir(vyos_udev_dir): + return + + for intf in os.listdir(vyos_udev_dir): + path = os.path.join(vyos_udev_dir, intf) + try: + with open(path) as f: + hwid = f.read().rstrip() + except OSError as e: + logging.error(f"OSError {e}") + continue + intfs[hwid] = intf + +def on_boot_event(intf_name: str, hwid: str, predefined: str = '') -> str: + """Called on boot by vyos-router: 'coldplug' in vyatta_net_name + """ + logging.info(f"lookup {intf_name}, {hwid}") + interfaces = get_configfile_interfaces() + logging.debug(f"config file interfaces are {interfaces}") + + if hwid in list(interfaces): + logging.info(f"use mapping from config file: '{hwid}' -> '{interfaces[hwid]}'") + return interfaces[hwid] + + add_assigned_interfaces(interfaces) + logging.debug(f"adding assigned interfaces: {interfaces}") + + if predefined: + newname = predefined + logging.info(f"predefined interface name for '{intf_name}' is '{newname}'") + else: + newname = get_biosdevname(intf_name) + logging.info(f"biosdevname returned '{newname}' for '{intf_name}'") + + if not is_available(interfaces, newname): + prefix = re.sub(r'\d+$', '', newname) + newname = find_available(interfaces, prefix) + + logging.info(f"new name for '{intf_name}' is '{newname}'") + + leave_rescan_hint(newname, hwid) + + return newname + +def hotplug_event(): + # Not yet implemented, since interface-rescan will only be run on boot. + pass + +if len(argv) > 3: + predef_name = argv[3] +else: + predef_name = '' + +lock.acquire() +if not boot_configuration_complete(): + res = on_boot_event(argv[1], argv[2], predefined=predef_name) + logging.debug(f"on boot, returned name is {res}") + print(res) +else: + logging.debug("boot configuration complete") +lock.release() + diff --git a/src/migration-scripts/bgp/1-to-2 b/src/migration-scripts/bgp/1-to-2 new file mode 100755 index 000000000..4c6d5ceb8 --- /dev/null +++ b/src/migration-scripts/bgp/1-to-2 @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T3741: no-ipv4-unicast is now enabled by default + +from sys import argv +from sys import exit + +from vyos.configtree import ConfigTree +from vyos.template import is_ipv4 + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['protocols', 'bgp'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +# This is now a default option - simply delete it. +# As it was configured explicitly - we can also bail out early as we need to +# do nothing! +if config.exists(base + ['parameters', 'default', 'no-ipv4-unicast']): + config.delete(base + ['parameters', 'default', 'no-ipv4-unicast']) + + # Check if the "default" node is now empty, if so - remove it + if len(config.list_nodes(base + ['parameters', 'default'])) == 0: + config.delete(base + ['parameters', 'default']) + + # Check if the "default" node is now empty, if so - remove it + if len(config.list_nodes(base + ['parameters'])) == 0: + config.delete(base + ['parameters']) + + exit(0) + +# As we now install a new default option into BGP we need to migrate all +# existing BGP neighbors and restore the old behavior +if config.exists(base + ['neighbor']): + for neighbor in config.list_nodes(base + ['neighbor']): + peer_group = base + ['neighbor', neighbor, 'peer-group'] + if config.exists(peer_group): + peer_group_name = config.return_value(peer_group) + # peer group enables old behavior for neighbor - bail out + if config.exists(base + ['peer-group', peer_group_name, 'address-family', 'ipv4-unicast']): + continue + + afi_ipv4 = base + ['neighbor', neighbor, 'address-family', 'ipv4-unicast'] + if not config.exists(afi_ipv4): + config.set(afi_ipv4) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print(f'Failed to save the modified config: {e}') + exit(1) diff --git a/src/migration-scripts/dns-forwarding/1-to-2 b/src/migration-scripts/dns-forwarding/1-to-2 index 8c4f4b5c7..ba10c26f2 100755 --- a/src/migration-scripts/dns-forwarding/1-to-2 +++ b/src/migration-scripts/dns-forwarding/1-to-2 @@ -67,8 +67,14 @@ if config.exists(base + ['listen-on']): # retrieve corresponding interface addresses in CIDR format # those need to be converted in pure IP addresses without network information path = ['interfaces', section, intf, 'address'] - for addr in config.return_values(path): - listen_addr.append( ip_interface(addr).ip ) + try: + for addr in config.return_values(path): + listen_addr.append( ip_interface(addr).ip ) + except: + # Some interface types do not use "address" option (e.g. OpenVPN) + # and may not even have a fixed address + print("Could not retrieve the address of the interface {} from the config".format(intf)) + print("You will need to update your DNS forwarding configuration manually") for addr in listen_addr: config.set(base + ['listen-address'], value=addr, replace=False) diff --git a/src/migration-scripts/firewall/6-to-7 b/src/migration-scripts/firewall/6-to-7 new file mode 100755 index 000000000..4a4097d56 --- /dev/null +++ b/src/migration-scripts/firewall/6-to-7 @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T2199: Remove unavailable nodes due to XML/Python implementation using nftables +# monthdays: nftables does not have a monthdays equivalent +# utc: nftables userspace uses localtime and calculates the UTC offset automatically + +from sys import argv +from sys import exit + +from vyos.configtree import ConfigTree +from vyos.ifconfig import Section + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['firewall'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +if config.exists(base + ['name']): + for name in config.list_nodes(base + ['name']): + if config.exists(base + ['name', name, 'rule']): + for rule in config.list_nodes(base + ['name', name, 'rule']): + rule_time = base + ['name', name, 'rule', rule, 'time'] + + if config.exists(rule_time + ['monthdays']): + config.delete(rule_time + ['monthdays']) + + if config.exists(rule_time + ['utc']): + config.delete(rule_time + ['utc']) + +if config.exists(base + ['ipv6-name']): + for name in config.list_nodes(base + ['ipv6-name']): + if config.exists(base + ['ipv6-name', name, 'rule']): + for rule in config.list_nodes(base + ['ipv6-name', name, 'rule']): + rule_time = base + ['ipv6-name', name, 'rule', rule, 'time'] + + if config.exists(rule_time + ['monthdays']): + config.delete(rule_time + ['monthdays']) + + if config.exists(rule_time + ['utc']): + config.delete(rule_time + ['utc']) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + exit(1) diff --git a/src/migration-scripts/flow-accounting/0-to-1 b/src/migration-scripts/flow-accounting/0-to-1 new file mode 100755 index 000000000..72cce77b0 --- /dev/null +++ b/src/migration-scripts/flow-accounting/0-to-1 @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T4099: flow-accounting: sync "source-ip" and "source-address" between netflow +# and sflow ion CLI +# T4105: flow-accounting: drop "sflow agent-address auto" + +from sys import argv +from vyos.configtree import ConfigTree + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['system', 'flow-accounting'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +# T4099 +tmp = base + ['netflow', 'source-ip'] +if config.exists(tmp): + config.rename(tmp, 'source-address') + +# T4105 +tmp = base + ['sflow', 'agent-address'] +if config.exists(tmp): + value = config.return_value(tmp) + if value == 'auto': + # delete the "auto" + config.delete(tmp) + + # 1) check if BGP router-id is set + # 2) check if OSPF router-id is set + # 3) check if OSPFv3 router-id is set + router_id = None + for protocol in ['bgp', 'ospf', 'ospfv3']: + if config.exists(['protocols', protocol, 'parameters', 'router-id']): + router_id = config.return_value(['protocols', protocol, 'parameters', 'router-id']) + break + if router_id: + config.set(tmp, value=router_id) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + exit(1) diff --git a/src/migration-scripts/interfaces/21-to-22 b/src/migration-scripts/interfaces/21-to-22 index 06e07572f..098102102 100755 --- a/src/migration-scripts/interfaces/21-to-22 +++ b/src/migration-scripts/interfaces/21-to-22 @@ -15,131 +15,32 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import argv -from sys import exit from vyos.configtree import ConfigTree -def migrate_ospf(config, path, interface): - path = path + ['ospf'] - if config.exists(path): - new_base = ['protocols', 'ospf', 'interface'] - config.set(new_base) - config.set_tag(new_base) - config.copy(path, new_base + [interface]) - config.delete(path) +if (len(argv) < 1): + print("Must specify file name!") + exit(1) - # if "ip ospf" was the only setting, we can clean out the empty - # ip node afterwards - if len(config.list_nodes(path[:-1])) == 0: - config.delete(path[:-1]) +file_name = argv[1] +with open(file_name, 'r') as f: + config_file = f.read() -def migrate_ospfv3(config, path, interface): - path = path + ['ospfv3'] - if config.exists(path): - new_base = ['protocols', 'ospfv3', 'interface'] - config.set(new_base) - config.set_tag(new_base) - config.copy(path, new_base + [interface]) - config.delete(path) +config = ConfigTree(config_file) +base = ['interfaces', 'tunnel'] - # if "ipv6 ospfv3" was the only setting, we can clean out the empty - # ip node afterwards - if len(config.list_nodes(path[:-1])) == 0: - config.delete(path[:-1]) +if not config.exists(base): + exit(0) -def migrate_rip(config, path, interface): - path = path + ['rip'] +for interface in config.list_nodes(base): + path = base + [interface, 'dhcp-interface'] if config.exists(path): - new_base = ['protocols', 'rip', 'interface'] - config.set(new_base) - config.set_tag(new_base) - config.copy(path, new_base + [interface]) + tmp = config.return_value(path) config.delete(path) - - # if "ip rip" was the only setting, we can clean out the empty - # ip node afterwards - if len(config.list_nodes(path[:-1])) == 0: - config.delete(path[:-1]) - -def migrate_ripng(config, path, interface): - path = path + ['ripng'] - if config.exists(path): - new_base = ['protocols', 'ripng', 'interface'] - config.set(new_base) - config.set_tag(new_base) - config.copy(path, new_base + [interface]) - config.delete(path) - - # if "ipv6 ripng" was the only setting, we can clean out the empty - # ip node afterwards - if len(config.list_nodes(path[:-1])) == 0: - config.delete(path[:-1]) - -if __name__ == '__main__': - if (len(argv) < 1): - print("Must specify file name!") - exit(1) - - file_name = argv[1] - with open(file_name, 'r') as f: - config_file = f.read() - - config = ConfigTree(config_file) - - # - # Migrate "interface ethernet eth0 ip ospf" to "protocols ospf interface eth0" - # - for type in config.list_nodes(['interfaces']): - for interface in config.list_nodes(['interfaces', type]): - ip_base = ['interfaces', type, interface, 'ip'] - ipv6_base = ['interfaces', type, interface, 'ipv6'] - migrate_rip(config, ip_base, interface) - migrate_ripng(config, ipv6_base, interface) - migrate_ospf(config, ip_base, interface) - migrate_ospfv3(config, ipv6_base, interface) - - vif_path = ['interfaces', type, interface, 'vif'] - if config.exists(vif_path): - for vif in config.list_nodes(vif_path): - vif_ip_base = vif_path + [vif, 'ip'] - vif_ipv6_base = vif_path + [vif, 'ipv6'] - ifname = f'{interface}.{vif}' - - migrate_rip(config, vif_ip_base, ifname) - migrate_ripng(config, vif_ipv6_base, ifname) - migrate_ospf(config, vif_ip_base, ifname) - migrate_ospfv3(config, vif_ipv6_base, ifname) - - - vif_s_path = ['interfaces', type, interface, 'vif-s'] - if config.exists(vif_s_path): - for vif_s in config.list_nodes(vif_s_path): - vif_s_ip_base = vif_s_path + [vif_s, 'ip'] - vif_s_ipv6_base = vif_s_path + [vif_s, 'ipv6'] - - # vif-c interfaces MUST be migrated before their parent vif-s - # interface as the migrate_*() functions delete the path! - vif_c_path = ['interfaces', type, interface, 'vif-s', vif_s, 'vif-c'] - if config.exists(vif_c_path): - for vif_c in config.list_nodes(vif_c_path): - vif_c_ip_base = vif_c_path + [vif_c, 'ip'] - vif_c_ipv6_base = vif_c_path + [vif_c, 'ipv6'] - ifname = f'{interface}.{vif_s}.{vif_c}' - - migrate_rip(config, vif_c_ip_base, ifname) - migrate_ripng(config, vif_c_ipv6_base, ifname) - migrate_ospf(config, vif_c_ip_base, ifname) - migrate_ospfv3(config, vif_c_ipv6_base, ifname) - - - ifname = f'{interface}.{vif_s}' - migrate_rip(config, vif_s_ip_base, ifname) - migrate_ripng(config, vif_s_ipv6_base, ifname) - migrate_ospf(config, vif_s_ip_base, ifname) - migrate_ospfv3(config, vif_s_ipv6_base, ifname) - - try: - with open(file_name, 'w') as f: - f.write(config.to_string()) - except OSError as e: - print("Failed to save the modified config: {}".format(e)) - exit(1) + config.set(base + [interface, 'source-interface'], value=tmp) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + exit(1) diff --git a/src/migration-scripts/interfaces/22-to-23 b/src/migration-scripts/interfaces/22-to-23 index d1ec2ad3e..06e07572f 100755 --- a/src/migration-scripts/interfaces/22-to-23 +++ b/src/migration-scripts/interfaces/22-to-23 @@ -14,47 +14,132 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -# A VTI interface also requires an IPSec configuration - VyOS 1.2 supported -# having a VTI interface in the CLI but no IPSec configuration - drop VTI -# configuration if this is the case for VyOS 1.4 - -import sys +from sys import argv +from sys import exit from vyos.configtree import ConfigTree +def migrate_ospf(config, path, interface): + path = path + ['ospf'] + if config.exists(path): + new_base = ['protocols', 'ospf', 'interface'] + config.set(new_base) + config.set_tag(new_base) + config.copy(path, new_base + [interface]) + config.delete(path) + + # if "ip ospf" was the only setting, we can clean out the empty + # ip node afterwards + if len(config.list_nodes(path[:-1])) == 0: + config.delete(path[:-1]) + +def migrate_ospfv3(config, path, interface): + path = path + ['ospfv3'] + if config.exists(path): + new_base = ['protocols', 'ospfv3', 'interface'] + config.set(new_base) + config.set_tag(new_base) + config.copy(path, new_base + [interface]) + config.delete(path) + + # if "ipv6 ospfv3" was the only setting, we can clean out the empty + # ip node afterwards + if len(config.list_nodes(path[:-1])) == 0: + config.delete(path[:-1]) + +def migrate_rip(config, path, interface): + path = path + ['rip'] + if config.exists(path): + new_base = ['protocols', 'rip', 'interface'] + config.set(new_base) + config.set_tag(new_base) + config.copy(path, new_base + [interface]) + config.delete(path) + + # if "ip rip" was the only setting, we can clean out the empty + # ip node afterwards + if len(config.list_nodes(path[:-1])) == 0: + config.delete(path[:-1]) + +def migrate_ripng(config, path, interface): + path = path + ['ripng'] + if config.exists(path): + new_base = ['protocols', 'ripng', 'interface'] + config.set(new_base) + config.set_tag(new_base) + config.copy(path, new_base + [interface]) + config.delete(path) + + # if "ipv6 ripng" was the only setting, we can clean out the empty + # ip node afterwards + if len(config.list_nodes(path[:-1])) == 0: + config.delete(path[:-1]) + if __name__ == '__main__': - if (len(sys.argv) < 1): + if (len(argv) < 1): print("Must specify file name!") - sys.exit(1) - - file_name = sys.argv[1] + exit(1) + file_name = argv[1] with open(file_name, 'r') as f: config_file = f.read() config = ConfigTree(config_file) - base = ['interfaces', 'vti'] - if not config.exists(base): - # Nothing to do - sys.exit(0) - - ipsec_base = ['vpn', 'ipsec', 'site-to-site', 'peer'] - for interface in config.list_nodes(base): - found = False - if config.exists(ipsec_base): - for peer in config.list_nodes(ipsec_base): - if config.exists(ipsec_base + [peer, 'vti', 'bind']): - tmp = config.return_value(ipsec_base + [peer, 'vti', 'bind']) - if tmp == interface: - # Interface was found and we no longer need to search - # for it in our IPSec peers - found = True - break - if not found: - config.delete(base + [interface]) + + # + # Migrate "interface ethernet eth0 ip ospf" to "protocols ospf interface eth0" + # + for type in config.list_nodes(['interfaces']): + for interface in config.list_nodes(['interfaces', type]): + ip_base = ['interfaces', type, interface, 'ip'] + ipv6_base = ['interfaces', type, interface, 'ipv6'] + migrate_rip(config, ip_base, interface) + migrate_ripng(config, ipv6_base, interface) + migrate_ospf(config, ip_base, interface) + migrate_ospfv3(config, ipv6_base, interface) + + vif_path = ['interfaces', type, interface, 'vif'] + if config.exists(vif_path): + for vif in config.list_nodes(vif_path): + vif_ip_base = vif_path + [vif, 'ip'] + vif_ipv6_base = vif_path + [vif, 'ipv6'] + ifname = f'{interface}.{vif}' + + migrate_rip(config, vif_ip_base, ifname) + migrate_ripng(config, vif_ipv6_base, ifname) + migrate_ospf(config, vif_ip_base, ifname) + migrate_ospfv3(config, vif_ipv6_base, ifname) + + + vif_s_path = ['interfaces', type, interface, 'vif-s'] + if config.exists(vif_s_path): + for vif_s in config.list_nodes(vif_s_path): + vif_s_ip_base = vif_s_path + [vif_s, 'ip'] + vif_s_ipv6_base = vif_s_path + [vif_s, 'ipv6'] + + # vif-c interfaces MUST be migrated before their parent vif-s + # interface as the migrate_*() functions delete the path! + vif_c_path = ['interfaces', type, interface, 'vif-s', vif_s, 'vif-c'] + if config.exists(vif_c_path): + for vif_c in config.list_nodes(vif_c_path): + vif_c_ip_base = vif_c_path + [vif_c, 'ip'] + vif_c_ipv6_base = vif_c_path + [vif_c, 'ipv6'] + ifname = f'{interface}.{vif_s}.{vif_c}' + + migrate_rip(config, vif_c_ip_base, ifname) + migrate_ripng(config, vif_c_ipv6_base, ifname) + migrate_ospf(config, vif_c_ip_base, ifname) + migrate_ospfv3(config, vif_c_ipv6_base, ifname) + + + ifname = f'{interface}.{vif_s}' + migrate_rip(config, vif_s_ip_base, ifname) + migrate_ripng(config, vif_s_ipv6_base, ifname) + migrate_ospf(config, vif_s_ip_base, ifname) + migrate_ospfv3(config, vif_s_ipv6_base, ifname) try: with open(file_name, 'w') as f: f.write(config.to_string()) except OSError as e: print("Failed to save the modified config: {}".format(e)) - sys.exit(1) + exit(1) diff --git a/src/migration-scripts/interfaces/23-to-24 b/src/migration-scripts/interfaces/23-to-24 index 93ce9215f..d1ec2ad3e 100755 --- a/src/migration-scripts/interfaces/23-to-24 +++ b/src/migration-scripts/interfaces/23-to-24 @@ -14,356 +14,47 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -# Migrate Wireguard to store keys in CLI -# Migrate EAPoL to PKI configuration +# A VTI interface also requires an IPSec configuration - VyOS 1.2 supported +# having a VTI interface in the CLI but no IPSec configuration - drop VTI +# configuration if this is the case for VyOS 1.4 -import os import sys from vyos.configtree import ConfigTree -from vyos.pki import load_certificate -from vyos.pki import load_crl -from vyos.pki import load_dh_parameters -from vyos.pki import load_private_key -from vyos.pki import encode_certificate -from vyos.pki import encode_dh_parameters -from vyos.pki import encode_private_key -from vyos.util import run -def wrapped_pem_to_config_value(pem): - out = [] - for line in pem.strip().split("\n"): - if not line or line.startswith("-----") or line[0] == '#': - continue - out.append(line) - return "".join(out) +if __name__ == '__main__': + if (len(sys.argv) < 1): + print("Must specify file name!") + sys.exit(1) -def read_file_for_pki(config_auth_path): - full_path = os.path.join(AUTH_DIR, config_auth_path) - output = None + file_name = sys.argv[1] - if os.path.isfile(full_path): - if not os.access(full_path, os.R_OK): - run(f'sudo chmod 644 {full_path}') + with open(file_name, 'r') as f: + config_file = f.read() - with open(full_path, 'r') as f: - output = f.read() + config = ConfigTree(config_file) + base = ['interfaces', 'vti'] + if not config.exists(base): + # Nothing to do + sys.exit(0) - return output - -if (len(sys.argv) < 1): - print("Must specify file name!") - sys.exit(1) - -file_name = sys.argv[1] - -with open(file_name, 'r') as f: - config_file = f.read() - -config = ConfigTree(config_file) - -AUTH_DIR = '/config/auth' -pki_base = ['pki'] - -# OpenVPN -base = ['interfaces', 'openvpn'] - -if config.exists(base): - for interface in config.list_nodes(base): - x509_base = base + [interface, 'tls'] - pki_name = f'openvpn_{interface}' - - if config.exists(base + [interface, 'shared-secret-key-file']): - if not config.exists(pki_base + ['openvpn', 'shared-secret']): - config.set(pki_base + ['openvpn', 'shared-secret']) - config.set_tag(pki_base + ['openvpn', 'shared-secret']) - - key_file = config.return_value(base + [interface, 'shared-secret-key-file']) - key = read_file_for_pki(key_file) - key_pki_name = f'{pki_name}_shared' - - if key: - config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'key'], value=wrapped_pem_to_config_value(key)) - config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'version'], value='1') - config.set(base + [interface, 'shared-secret-key'], value=key_pki_name) - else: - print(f'Failed to migrate shared-secret-key on openvpn interface {interface}') - - config.delete(base + [interface, 'shared-secret-key-file']) - - if not config.exists(base + [interface, 'tls']): - continue - - if config.exists(base + [interface, 'tls', 'auth-file']): - if not config.exists(pki_base + ['openvpn', 'shared-secret']): - config.set(pki_base + ['openvpn', 'shared-secret']) - config.set_tag(pki_base + ['openvpn', 'shared-secret']) - - key_file = config.return_value(base + [interface, 'tls', 'auth-file']) - key = read_file_for_pki(key_file) - key_pki_name = f'{pki_name}_auth' - - if key: - config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'key'], value=wrapped_pem_to_config_value(key)) - config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'version'], value='1') - config.set(base + [interface, 'tls', 'auth-key'], value=key_pki_name) - else: - print(f'Failed to migrate auth-key on openvpn interface {interface}') - - config.delete(base + [interface, 'tls', 'auth-file']) - - if config.exists(base + [interface, 'tls', 'crypt-file']): - if not config.exists(pki_base + ['openvpn', 'shared-secret']): - config.set(pki_base + ['openvpn', 'shared-secret']) - config.set_tag(pki_base + ['openvpn', 'shared-secret']) - - key_file = config.return_value(base + [interface, 'tls', 'crypt-file']) - key = read_file_for_pki(key_file) - key_pki_name = f'{pki_name}_crypt' - - if key: - config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'key'], value=wrapped_pem_to_config_value(key)) - config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'version'], value='1') - config.set(base + [interface, 'tls', 'crypt-key'], value=key_pki_name) - else: - print(f'Failed to migrate crypt-key on openvpn interface {interface}') - - config.delete(base + [interface, 'tls', 'crypt-file']) - - if config.exists(x509_base + ['ca-cert-file']): - if not config.exists(pki_base + ['ca']): - config.set(pki_base + ['ca']) - config.set_tag(pki_base + ['ca']) - - cert_file = config.return_value(x509_base + ['ca-cert-file']) - cert_path = os.path.join(AUTH_DIR, cert_file) - cert = None - - if os.path.isfile(cert_path): - if not os.access(cert_path, os.R_OK): - run(f'sudo chmod 644 {cert_path}') - - with open(cert_path, 'r') as f: - cert_data = f.read() - cert = load_certificate(cert_data, wrap_tags=False) - - if cert: - cert_pem = encode_certificate(cert) - config.set(pki_base + ['ca', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) - config.set(x509_base + ['ca-certificate'], value=pki_name) - else: - print(f'Failed to migrate CA certificate on openvpn interface {interface}') - - config.delete(x509_base + ['ca-cert-file']) - - if config.exists(x509_base + ['crl-file']): - if not config.exists(pki_base + ['ca']): - config.set(pki_base + ['ca']) - config.set_tag(pki_base + ['ca']) - - crl_file = config.return_value(x509_base + ['crl-file']) - crl_path = os.path.join(AUTH_DIR, crl_file) - crl = None - - if os.path.isfile(crl_path): - if not os.access(crl_path, os.R_OK): - run(f'sudo chmod 644 {crl_path}') - - with open(crl_path, 'r') as f: - crl_data = f.read() - crl = load_crl(crl_data, wrap_tags=False) - - if crl: - crl_pem = encode_certificate(crl) - config.set(pki_base + ['ca', pki_name, 'crl'], value=wrapped_pem_to_config_value(crl_pem)) - else: - print(f'Failed to migrate CRL on openvpn interface {interface}') - - config.delete(x509_base + ['crl-file']) - - if config.exists(x509_base + ['cert-file']): - if not config.exists(pki_base + ['certificate']): - config.set(pki_base + ['certificate']) - config.set_tag(pki_base + ['certificate']) - - cert_file = config.return_value(x509_base + ['cert-file']) - cert_path = os.path.join(AUTH_DIR, cert_file) - cert = None - - if os.path.isfile(cert_path): - if not os.access(cert_path, os.R_OK): - run(f'sudo chmod 644 {cert_path}') - - with open(cert_path, 'r') as f: - cert_data = f.read() - cert = load_certificate(cert_data, wrap_tags=False) - - if cert: - cert_pem = encode_certificate(cert) - config.set(pki_base + ['certificate', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) - config.set(x509_base + ['certificate'], value=pki_name) - else: - print(f'Failed to migrate certificate on openvpn interface {interface}') - - config.delete(x509_base + ['cert-file']) - - if config.exists(x509_base + ['key-file']): - key_file = config.return_value(x509_base + ['key-file']) - key_path = os.path.join(AUTH_DIR, key_file) - key = None - - if os.path.isfile(key_path): - if not os.access(key_path, os.R_OK): - run(f'sudo chmod 644 {key_path}') - - with open(key_path, 'r') as f: - key_data = f.read() - key = load_private_key(key_data, passphrase=None, wrap_tags=False) - - if key: - key_pem = encode_private_key(key, passphrase=None) - config.set(pki_base + ['certificate', pki_name, 'private', 'key'], value=wrapped_pem_to_config_value(key_pem)) - else: - print(f'Failed to migrate private key on openvpn interface {interface}') - - config.delete(x509_base + ['key-file']) - - if config.exists(x509_base + ['dh-file']): - if not config.exists(pki_base + ['dh']): - config.set(pki_base + ['dh']) - config.set_tag(pki_base + ['dh']) - - dh_file = config.return_value(x509_base + ['dh-file']) - dh_path = os.path.join(AUTH_DIR, dh_file) - dh = None - - if os.path.isfile(dh_path): - if not os.access(dh_path, os.R_OK): - run(f'sudo chmod 644 {dh_path}') - - with open(dh_path, 'r') as f: - dh_data = f.read() - dh = load_dh_parameters(dh_data, wrap_tags=False) - - if dh: - dh_pem = encode_dh_parameters(dh) - config.set(pki_base + ['dh', pki_name, 'parameters'], value=wrapped_pem_to_config_value(dh_pem)) - config.set(x509_base + ['dh-params'], value=pki_name) - else: - print(f'Failed to migrate DH parameters on openvpn interface {interface}') - - config.delete(x509_base + ['dh-file']) - -# Wireguard -base = ['interfaces', 'wireguard'] - -if config.exists(base): - for interface in config.list_nodes(base): - private_key_path = base + [interface, 'private-key'] - - key_file = 'default' - if config.exists(private_key_path): - key_file = config.return_value(private_key_path) - - full_key_path = f'/config/auth/wireguard/{key_file}/private.key' - - if not os.path.exists(full_key_path): - print(f'Could not find wireguard private key for migration on interface "{interface}"') - continue - - with open(full_key_path, 'r') as f: - key_data = f.read().strip() - config.set(private_key_path, value=key_data) - - for peer in config.list_nodes(base + [interface, 'peer']): - config.rename(base + [interface, 'peer', peer, 'pubkey'], 'public-key') - -# Ethernet EAPoL -base = ['interfaces', 'ethernet'] - -if config.exists(base): + ipsec_base = ['vpn', 'ipsec', 'site-to-site', 'peer'] for interface in config.list_nodes(base): - if not config.exists(base + [interface, 'eapol']): - continue - - x509_base = base + [interface, 'eapol'] - pki_name = f'eapol_{interface}' - - if config.exists(x509_base + ['ca-cert-file']): - if not config.exists(pki_base + ['ca']): - config.set(pki_base + ['ca']) - config.set_tag(pki_base + ['ca']) - - cert_file = config.return_value(x509_base + ['ca-cert-file']) - cert_path = os.path.join(AUTH_DIR, cert_file) - cert = None - - if os.path.isfile(cert_path): - if not os.access(cert_path, os.R_OK): - run(f'sudo chmod 644 {cert_path}') - - with open(cert_path, 'r') as f: - cert_data = f.read() - cert = load_certificate(cert_data, wrap_tags=False) - - if cert: - cert_pem = encode_certificate(cert) - config.set(pki_base + ['ca', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) - config.set(x509_base + ['ca-certificate'], value=pki_name) - else: - print(f'Failed to migrate CA certificate on eapol config for interface {interface}') - - config.delete(x509_base + ['ca-cert-file']) - - if config.exists(x509_base + ['cert-file']): - if not config.exists(pki_base + ['certificate']): - config.set(pki_base + ['certificate']) - config.set_tag(pki_base + ['certificate']) - - cert_file = config.return_value(x509_base + ['cert-file']) - cert_path = os.path.join(AUTH_DIR, cert_file) - cert = None - - if os.path.isfile(cert_path): - if not os.access(cert_path, os.R_OK): - run(f'sudo chmod 644 {cert_path}') - - with open(cert_path, 'r') as f: - cert_data = f.read() - cert = load_certificate(cert_data, wrap_tags=False) - - if cert: - cert_pem = encode_certificate(cert) - config.set(pki_base + ['certificate', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) - config.set(x509_base + ['certificate'], value=pki_name) - else: - print(f'Failed to migrate certificate on eapol config for interface {interface}') - - config.delete(x509_base + ['cert-file']) - - if config.exists(x509_base + ['key-file']): - key_file = config.return_value(x509_base + ['key-file']) - key_path = os.path.join(AUTH_DIR, key_file) - key = None - - if os.path.isfile(key_path): - if not os.access(key_path, os.R_OK): - run(f'sudo chmod 644 {key_path}') - - with open(key_path, 'r') as f: - key_data = f.read() - key = load_private_key(key_data, passphrase=None, wrap_tags=False) - - if key: - key_pem = encode_private_key(key, passphrase=None) - config.set(pki_base + ['certificate', pki_name, 'private', 'key'], value=wrapped_pem_to_config_value(key_pem)) - else: - print(f'Failed to migrate private key on eapol config for interface {interface}') - - config.delete(x509_base + ['key-file']) - -try: - with open(file_name, 'w') as f: - f.write(config.to_string()) -except OSError as e: - print("Failed to save the modified config: {}".format(e)) - sys.exit(1) + found = False + if config.exists(ipsec_base): + for peer in config.list_nodes(ipsec_base): + if config.exists(ipsec_base + [peer, 'vti', 'bind']): + tmp = config.return_value(ipsec_base + [peer, 'vti', 'bind']) + if tmp == interface: + # Interface was found and we no longer need to search + # for it in our IPSec peers + found = True + break + if not found: + config.delete(base + [interface]) + + try: + with open(file_name, 'w') as f: + f.write(config.to_string()) + except OSError as e: + print("Failed to save the modified config: {}".format(e)) + sys.exit(1) diff --git a/src/migration-scripts/interfaces/24-to-25 b/src/migration-scripts/interfaces/24-to-25 new file mode 100755 index 000000000..93ce9215f --- /dev/null +++ b/src/migration-scripts/interfaces/24-to-25 @@ -0,0 +1,369 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# Migrate Wireguard to store keys in CLI +# Migrate EAPoL to PKI configuration + +import os +import sys +from vyos.configtree import ConfigTree +from vyos.pki import load_certificate +from vyos.pki import load_crl +from vyos.pki import load_dh_parameters +from vyos.pki import load_private_key +from vyos.pki import encode_certificate +from vyos.pki import encode_dh_parameters +from vyos.pki import encode_private_key +from vyos.util import run + +def wrapped_pem_to_config_value(pem): + out = [] + for line in pem.strip().split("\n"): + if not line or line.startswith("-----") or line[0] == '#': + continue + out.append(line) + return "".join(out) + +def read_file_for_pki(config_auth_path): + full_path = os.path.join(AUTH_DIR, config_auth_path) + output = None + + if os.path.isfile(full_path): + if not os.access(full_path, os.R_OK): + run(f'sudo chmod 644 {full_path}') + + with open(full_path, 'r') as f: + output = f.read() + + return output + +if (len(sys.argv) < 1): + print("Must specify file name!") + sys.exit(1) + +file_name = sys.argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +config = ConfigTree(config_file) + +AUTH_DIR = '/config/auth' +pki_base = ['pki'] + +# OpenVPN +base = ['interfaces', 'openvpn'] + +if config.exists(base): + for interface in config.list_nodes(base): + x509_base = base + [interface, 'tls'] + pki_name = f'openvpn_{interface}' + + if config.exists(base + [interface, 'shared-secret-key-file']): + if not config.exists(pki_base + ['openvpn', 'shared-secret']): + config.set(pki_base + ['openvpn', 'shared-secret']) + config.set_tag(pki_base + ['openvpn', 'shared-secret']) + + key_file = config.return_value(base + [interface, 'shared-secret-key-file']) + key = read_file_for_pki(key_file) + key_pki_name = f'{pki_name}_shared' + + if key: + config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'key'], value=wrapped_pem_to_config_value(key)) + config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'version'], value='1') + config.set(base + [interface, 'shared-secret-key'], value=key_pki_name) + else: + print(f'Failed to migrate shared-secret-key on openvpn interface {interface}') + + config.delete(base + [interface, 'shared-secret-key-file']) + + if not config.exists(base + [interface, 'tls']): + continue + + if config.exists(base + [interface, 'tls', 'auth-file']): + if not config.exists(pki_base + ['openvpn', 'shared-secret']): + config.set(pki_base + ['openvpn', 'shared-secret']) + config.set_tag(pki_base + ['openvpn', 'shared-secret']) + + key_file = config.return_value(base + [interface, 'tls', 'auth-file']) + key = read_file_for_pki(key_file) + key_pki_name = f'{pki_name}_auth' + + if key: + config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'key'], value=wrapped_pem_to_config_value(key)) + config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'version'], value='1') + config.set(base + [interface, 'tls', 'auth-key'], value=key_pki_name) + else: + print(f'Failed to migrate auth-key on openvpn interface {interface}') + + config.delete(base + [interface, 'tls', 'auth-file']) + + if config.exists(base + [interface, 'tls', 'crypt-file']): + if not config.exists(pki_base + ['openvpn', 'shared-secret']): + config.set(pki_base + ['openvpn', 'shared-secret']) + config.set_tag(pki_base + ['openvpn', 'shared-secret']) + + key_file = config.return_value(base + [interface, 'tls', 'crypt-file']) + key = read_file_for_pki(key_file) + key_pki_name = f'{pki_name}_crypt' + + if key: + config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'key'], value=wrapped_pem_to_config_value(key)) + config.set(pki_base + ['openvpn', 'shared-secret', key_pki_name, 'version'], value='1') + config.set(base + [interface, 'tls', 'crypt-key'], value=key_pki_name) + else: + print(f'Failed to migrate crypt-key on openvpn interface {interface}') + + config.delete(base + [interface, 'tls', 'crypt-file']) + + if config.exists(x509_base + ['ca-cert-file']): + if not config.exists(pki_base + ['ca']): + config.set(pki_base + ['ca']) + config.set_tag(pki_base + ['ca']) + + cert_file = config.return_value(x509_base + ['ca-cert-file']) + cert_path = os.path.join(AUTH_DIR, cert_file) + cert = None + + if os.path.isfile(cert_path): + if not os.access(cert_path, os.R_OK): + run(f'sudo chmod 644 {cert_path}') + + with open(cert_path, 'r') as f: + cert_data = f.read() + cert = load_certificate(cert_data, wrap_tags=False) + + if cert: + cert_pem = encode_certificate(cert) + config.set(pki_base + ['ca', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) + config.set(x509_base + ['ca-certificate'], value=pki_name) + else: + print(f'Failed to migrate CA certificate on openvpn interface {interface}') + + config.delete(x509_base + ['ca-cert-file']) + + if config.exists(x509_base + ['crl-file']): + if not config.exists(pki_base + ['ca']): + config.set(pki_base + ['ca']) + config.set_tag(pki_base + ['ca']) + + crl_file = config.return_value(x509_base + ['crl-file']) + crl_path = os.path.join(AUTH_DIR, crl_file) + crl = None + + if os.path.isfile(crl_path): + if not os.access(crl_path, os.R_OK): + run(f'sudo chmod 644 {crl_path}') + + with open(crl_path, 'r') as f: + crl_data = f.read() + crl = load_crl(crl_data, wrap_tags=False) + + if crl: + crl_pem = encode_certificate(crl) + config.set(pki_base + ['ca', pki_name, 'crl'], value=wrapped_pem_to_config_value(crl_pem)) + else: + print(f'Failed to migrate CRL on openvpn interface {interface}') + + config.delete(x509_base + ['crl-file']) + + if config.exists(x509_base + ['cert-file']): + if not config.exists(pki_base + ['certificate']): + config.set(pki_base + ['certificate']) + config.set_tag(pki_base + ['certificate']) + + cert_file = config.return_value(x509_base + ['cert-file']) + cert_path = os.path.join(AUTH_DIR, cert_file) + cert = None + + if os.path.isfile(cert_path): + if not os.access(cert_path, os.R_OK): + run(f'sudo chmod 644 {cert_path}') + + with open(cert_path, 'r') as f: + cert_data = f.read() + cert = load_certificate(cert_data, wrap_tags=False) + + if cert: + cert_pem = encode_certificate(cert) + config.set(pki_base + ['certificate', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) + config.set(x509_base + ['certificate'], value=pki_name) + else: + print(f'Failed to migrate certificate on openvpn interface {interface}') + + config.delete(x509_base + ['cert-file']) + + if config.exists(x509_base + ['key-file']): + key_file = config.return_value(x509_base + ['key-file']) + key_path = os.path.join(AUTH_DIR, key_file) + key = None + + if os.path.isfile(key_path): + if not os.access(key_path, os.R_OK): + run(f'sudo chmod 644 {key_path}') + + with open(key_path, 'r') as f: + key_data = f.read() + key = load_private_key(key_data, passphrase=None, wrap_tags=False) + + if key: + key_pem = encode_private_key(key, passphrase=None) + config.set(pki_base + ['certificate', pki_name, 'private', 'key'], value=wrapped_pem_to_config_value(key_pem)) + else: + print(f'Failed to migrate private key on openvpn interface {interface}') + + config.delete(x509_base + ['key-file']) + + if config.exists(x509_base + ['dh-file']): + if not config.exists(pki_base + ['dh']): + config.set(pki_base + ['dh']) + config.set_tag(pki_base + ['dh']) + + dh_file = config.return_value(x509_base + ['dh-file']) + dh_path = os.path.join(AUTH_DIR, dh_file) + dh = None + + if os.path.isfile(dh_path): + if not os.access(dh_path, os.R_OK): + run(f'sudo chmod 644 {dh_path}') + + with open(dh_path, 'r') as f: + dh_data = f.read() + dh = load_dh_parameters(dh_data, wrap_tags=False) + + if dh: + dh_pem = encode_dh_parameters(dh) + config.set(pki_base + ['dh', pki_name, 'parameters'], value=wrapped_pem_to_config_value(dh_pem)) + config.set(x509_base + ['dh-params'], value=pki_name) + else: + print(f'Failed to migrate DH parameters on openvpn interface {interface}') + + config.delete(x509_base + ['dh-file']) + +# Wireguard +base = ['interfaces', 'wireguard'] + +if config.exists(base): + for interface in config.list_nodes(base): + private_key_path = base + [interface, 'private-key'] + + key_file = 'default' + if config.exists(private_key_path): + key_file = config.return_value(private_key_path) + + full_key_path = f'/config/auth/wireguard/{key_file}/private.key' + + if not os.path.exists(full_key_path): + print(f'Could not find wireguard private key for migration on interface "{interface}"') + continue + + with open(full_key_path, 'r') as f: + key_data = f.read().strip() + config.set(private_key_path, value=key_data) + + for peer in config.list_nodes(base + [interface, 'peer']): + config.rename(base + [interface, 'peer', peer, 'pubkey'], 'public-key') + +# Ethernet EAPoL +base = ['interfaces', 'ethernet'] + +if config.exists(base): + for interface in config.list_nodes(base): + if not config.exists(base + [interface, 'eapol']): + continue + + x509_base = base + [interface, 'eapol'] + pki_name = f'eapol_{interface}' + + if config.exists(x509_base + ['ca-cert-file']): + if not config.exists(pki_base + ['ca']): + config.set(pki_base + ['ca']) + config.set_tag(pki_base + ['ca']) + + cert_file = config.return_value(x509_base + ['ca-cert-file']) + cert_path = os.path.join(AUTH_DIR, cert_file) + cert = None + + if os.path.isfile(cert_path): + if not os.access(cert_path, os.R_OK): + run(f'sudo chmod 644 {cert_path}') + + with open(cert_path, 'r') as f: + cert_data = f.read() + cert = load_certificate(cert_data, wrap_tags=False) + + if cert: + cert_pem = encode_certificate(cert) + config.set(pki_base + ['ca', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) + config.set(x509_base + ['ca-certificate'], value=pki_name) + else: + print(f'Failed to migrate CA certificate on eapol config for interface {interface}') + + config.delete(x509_base + ['ca-cert-file']) + + if config.exists(x509_base + ['cert-file']): + if not config.exists(pki_base + ['certificate']): + config.set(pki_base + ['certificate']) + config.set_tag(pki_base + ['certificate']) + + cert_file = config.return_value(x509_base + ['cert-file']) + cert_path = os.path.join(AUTH_DIR, cert_file) + cert = None + + if os.path.isfile(cert_path): + if not os.access(cert_path, os.R_OK): + run(f'sudo chmod 644 {cert_path}') + + with open(cert_path, 'r') as f: + cert_data = f.read() + cert = load_certificate(cert_data, wrap_tags=False) + + if cert: + cert_pem = encode_certificate(cert) + config.set(pki_base + ['certificate', pki_name, 'certificate'], value=wrapped_pem_to_config_value(cert_pem)) + config.set(x509_base + ['certificate'], value=pki_name) + else: + print(f'Failed to migrate certificate on eapol config for interface {interface}') + + config.delete(x509_base + ['cert-file']) + + if config.exists(x509_base + ['key-file']): + key_file = config.return_value(x509_base + ['key-file']) + key_path = os.path.join(AUTH_DIR, key_file) + key = None + + if os.path.isfile(key_path): + if not os.access(key_path, os.R_OK): + run(f'sudo chmod 644 {key_path}') + + with open(key_path, 'r') as f: + key_data = f.read() + key = load_private_key(key_data, passphrase=None, wrap_tags=False) + + if key: + key_pem = encode_private_key(key, passphrase=None) + config.set(pki_base + ['certificate', pki_name, 'private', 'key'], value=wrapped_pem_to_config_value(key_pem)) + else: + print(f'Failed to migrate private key on eapol config for interface {interface}') + + config.delete(x509_base + ['key-file']) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + sys.exit(1) diff --git a/src/migration-scripts/ospf/0-to-1 b/src/migration-scripts/ospf/0-to-1 new file mode 100755 index 000000000..678569d9e --- /dev/null +++ b/src/migration-scripts/ospf/0-to-1 @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T3753: upgrade to FRR8 and move CLI options to better fit with the new FRR CLI + +from sys import argv +from vyos.configtree import ConfigTree + +def ospf_passive_migration(config, ospf_base): + if config.exists(ospf_base): + if config.exists(ospf_base + ['passive-interface']): + default = False + for interface in config.return_values(ospf_base + ['passive-interface']): + if interface == 'default': + default = True + continue + config.set(ospf_base + ['interface', interface, 'passive']) + + config.delete(ospf_base + ['passive-interface']) + config.set(ospf_base + ['passive-interface'], value='default') + + if config.exists(ospf_base + ['passive-interface-exclude']): + for interface in config.return_values(ospf_base + ['passive-interface-exclude']): + config.set(ospf_base + ['interface', interface, 'passive', 'disable']) + config.delete(ospf_base + ['passive-interface-exclude']) + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +config = ConfigTree(config_file) + +ospfv3_base = ['protocols', 'ospfv3'] +if config.exists(ospfv3_base): + area_base = ospfv3_base + ['area'] + if config.exists(area_base): + for area in config.list_nodes(area_base): + if not config.exists(area_base + [area, 'interface']): + continue + + for interface in config.return_values(area_base + [area, 'interface']): + config.set(ospfv3_base + ['interface', interface, 'area'], value=area) + config.set_tag(ospfv3_base + ['interface']) + + config.delete(area_base + [area, 'interface']) + +# Migrate OSPF syntax in default VRF +ospf_base = ['protocols', 'ospf'] +ospf_passive_migration(config, ospf_base) + +vrf_base = ['vrf', 'name'] +if config.exists(vrf_base): + for vrf in config.list_nodes(vrf_base): + vrf_ospf_base = vrf_base + [vrf, 'protocols', 'ospf'] + if config.exists(vrf_ospf_base): + ospf_passive_migration(config, vrf_ospf_base) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print(f'Failed to save the modified config: {e}') + exit(1) diff --git a/src/op_mode/connect_disconnect.py b/src/op_mode/connect_disconnect.py index a773aa28e..ffc574362 100755 --- a/src/op_mode/connect_disconnect.py +++ b/src/op_mode/connect_disconnect.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020 VyOS maintainers and contributors +# Copyright (C) 2020-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -17,21 +17,19 @@ import os import argparse -from sys import exit from psutil import process_iter -from time import strftime, localtime, time from vyos.util import call +from vyos.util import DEVNULL +from vyos.util import is_wwan_connected -def check_interface(interface): +def check_ppp_interface(interface): if not os.path.isfile(f'/etc/ppp/peers/{interface}'): - print(f'Interface {interface}: invalid!') + print(f'Interface {interface} does not exist!') exit(1) def check_ppp_running(interface): - """ - Check if ppp process is running in the interface in question - """ + """ Check if PPP process is running in the interface in question """ for p in process_iter(): if "pppd" in p.name(): if interface in p.cmdline(): @@ -40,32 +38,46 @@ def check_ppp_running(interface): return False def connect(interface): - """ - Connect PPP interface - """ - check_interface(interface) + """ Connect dialer interface """ - # Check if interface is already dialed - if os.path.isdir(f'/sys/class/net/{interface}'): - print(f'Interface {interface}: already connected!') - elif check_ppp_running(interface): - print(f'Interface {interface}: connection is beeing established!') + if interface.startswith('ppp'): + check_ppp_interface(interface) + # Check if interface is already dialed + if os.path.isdir(f'/sys/class/net/{interface}'): + print(f'Interface {interface}: already connected!') + elif check_ppp_running(interface): + print(f'Interface {interface}: connection is beeing established!') + else: + print(f'Interface {interface}: connecting...') + call(f'systemctl restart ppp@{interface}.service') + elif interface.startswith('wwan'): + if is_wwan_connected(interface): + print(f'Interface {interface}: already connected!') + else: + call(f'VYOS_TAGNODE_VALUE={interface} /usr/libexec/vyos/conf_mode/interfaces-wwan.py') else: - print(f'Interface {interface}: connecting...') - call(f'systemctl restart ppp@{interface}.service') + print(f'Unknown interface {interface}, can not connect. Aborting!') def disconnect(interface): - """ - Disconnect PPP interface - """ - check_interface(interface) + """ Disconnect dialer interface """ - # Check if interface is already down - if not check_ppp_running(interface): - print(f'Interface {interface}: connection is already down') + if interface.startswith('ppp'): + check_ppp_interface(interface) + + # Check if interface is already down + if not check_ppp_running(interface): + print(f'Interface {interface}: connection is already down') + else: + print(f'Interface {interface}: disconnecting...') + call(f'systemctl stop ppp@{interface}.service') + elif interface.startswith('wwan'): + if not is_wwan_connected(interface): + print(f'Interface {interface}: connection is already down') + else: + modem = interface.lstrip('wwan') + call(f'mmcli --modem {modem} --simple-disconnect', stdout=DEVNULL) else: - print(f'Interface {interface}: disconnecting...') - call(f'systemctl stop ppp@{interface}.service') + print(f'Unknown interface {interface}, can not disconnect. Aborting!') def main(): parser = argparse.ArgumentParser() diff --git a/src/op_mode/conntrack_sync.py b/src/op_mode/conntrack_sync.py index 66ecf8439..89f6df4b9 100755 --- a/src/op_mode/conntrack_sync.py +++ b/src/op_mode/conntrack_sync.py @@ -20,12 +20,15 @@ import xmltodict from argparse import ArgumentParser from vyos.configquery import CliShellApiConfigQuery +from vyos.configquery import ConfigTreeQuery +from vyos.util import call from vyos.util import cmd from vyos.util import run from vyos.template import render_to_string conntrackd_bin = '/usr/sbin/conntrackd' conntrackd_config = '/run/conntrackd/conntrackd.conf' +failover_state_file = '/var/run/vyatta-conntrackd-failover-state' parser = ArgumentParser(description='Conntrack Sync') group = parser.add_mutually_exclusive_group() @@ -36,6 +39,8 @@ group.add_argument('--show-internal', help='Show internal (main) tracking cache' group.add_argument('--show-external', help='Show external (main) tracking cache', action='store_true') group.add_argument('--show-internal-expect', help='Show internal (expect) tracking cache', action='store_true') group.add_argument('--show-external-expect', help='Show external (expect) tracking cache', action='store_true') +group.add_argument('--show-statistics', help='Show connection syncing statistics', action='store_true') +group.add_argument('--show-status', help='Show conntrack-sync status', action='store_true') def is_configured(): """ Check if conntrack-sync service is configured """ @@ -131,6 +136,46 @@ if __name__ == '__main__': out = cmd(f'sudo {conntrackd_bin} -C {conntrackd_config} {opt} -x') xml_to_stdout(out) + elif args.show_statistics: + is_configured() + config = ConfigTreeQuery() + print('\nMain Table Statistics:\n') + call(f'sudo {conntrackd_bin} -C {conntrackd_config} -s') + print() + if config.exists(['service', 'conntrack-sync', 'expect-sync']): + print('\nExpect Table Statistics:\n') + call(f'sudo {conntrackd_bin} -C {conntrackd_config} -s exp') + print() + + elif args.show_status: + is_configured() + config = ConfigTreeQuery() + ct_sync_intf = config.list_nodes(['service', 'conntrack-sync', 'interface']) + ct_sync_intf = ', '.join(ct_sync_intf) + failover_state = "no transition yet!" + expect_sync_protocols = "disabled" + + if config.exists(['service', 'conntrack-sync', 'failover-mechanism', 'vrrp']): + failover_mechanism = "vrrp" + vrrp_sync_grp = config.value(['service', 'conntrack-sync', 'failover-mechanism', 'vrrp', 'sync-group']) + + if os.path.isfile(failover_state_file): + with open(failover_state_file, "r") as f: + failover_state = f.readline() + + if config.exists(['service', 'conntrack-sync', 'expect-sync']): + expect_sync_protocols = config.values(['service', 'conntrack-sync', 'expect-sync']) + if 'all' in expect_sync_protocols: + expect_sync_protocols = ["ftp", "sip", "h323", "nfs", "sqlnet"] + expect_sync_protocols = ', '.join(expect_sync_protocols) + + show_status = (f'\nsync-interface : {ct_sync_intf}\n' + f'failover-mechanism : {failover_mechanism} [sync-group {vrrp_sync_grp}]\n' + f'last state transition : {failover_state}' + f'ExpectationSync : {expect_sync_protocols}') + + print(show_status) + else: parser.print_help() exit(1) diff --git a/src/op_mode/firewall.py b/src/op_mode/firewall.py new file mode 100755 index 000000000..cf70890a6 --- /dev/null +++ b/src/op_mode/firewall.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import json +import re +import tabulate + +from vyos.config import Config +from vyos.util import cmd +from vyos.util import dict_search_args + +def get_firewall_interfaces(conf, firewall, name=None, ipv6=False): + interfaces = conf.get_config_dict(['interfaces'], key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + + directions = ['in', 'out', 'local'] + + def parse_if(ifname, if_conf): + if 'firewall' in if_conf: + for direction in directions: + if direction in if_conf['firewall']: + fw_conf = if_conf['firewall'][direction] + name_str = f'({ifname},{direction})' + + if 'name' in fw_conf: + fw_name = fw_conf['name'] + + if not name: + firewall['name'][fw_name]['interface'].append(name_str) + elif not ipv6 and name == fw_name: + firewall['interface'].append(name_str) + + if 'ipv6_name' in fw_conf: + fw_name = fw_conf['ipv6_name'] + + if not name: + firewall['ipv6_name'][fw_name]['interface'].append(name_str) + elif ipv6 and name == fw_name: + firewall['interface'].append(name_str) + + for iftype in ['vif', 'vif_s', 'vif_c']: + if iftype in if_conf: + for vifname, vif_conf in if_conf[iftype].items(): + parse_if(f'{ifname}.{vifname}', vif_conf) + + for iftype, iftype_conf in interfaces.items(): + for ifname, if_conf in iftype_conf.items(): + parse_if(ifname, if_conf) + + return firewall + +def get_config_firewall(conf, name=None, ipv6=False, interfaces=True): + config_path = ['firewall'] + if name: + config_path += ['ipv6-name' if ipv6 else 'name', name] + + firewall = conf.get_config_dict(config_path, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + if firewall and interfaces: + if name: + firewall['interface'] = [] + else: + if 'name' in firewall: + for fw_name, name_conf in firewall['name'].items(): + name_conf['interface'] = [] + + if 'ipv6_name' in firewall: + for fw_name, name_conf in firewall['ipv6_name'].items(): + name_conf['interface'] = [] + + get_firewall_interfaces(conf, firewall, name, ipv6) + return firewall + +def get_nftables_details(name, ipv6=False): + suffix = '6' if ipv6 else '' + command = f'sudo nft list chain ip{suffix} filter {name}' + try: + results = cmd(command) + except: + return {} + + out = {} + for line in results.split('\n'): + comment_search = re.search(rf'{name}[\- ](\d+|default-action)', line) + if not comment_search: + continue + + rule = {} + rule_id = comment_search[1] + counter_search = re.search(r'counter packets (\d+) bytes (\d+)', line) + if counter_search: + rule['packets'] = counter_search[1] + rule['bytes'] = counter_search[2] + + rule['conditions'] = re.sub(r'(\b(counter packets \d+ bytes \d+|drop|reject|return|log)\b|comment "[\w\-]+")', '', line).strip() + out[rule_id] = rule + return out + +def output_firewall_name(name, name_conf, ipv6=False, single_rule_id=None): + ip_str = 'IPv6' if ipv6 else 'IPv4' + print(f'\n---------------------------------\n{ip_str} Firewall "{name}"\n') + + if name_conf['interface']: + print('Active on: {0}\n'.format(" ".join(name_conf['interface']))) + + details = get_nftables_details(name, ipv6) + rows = [] + + if 'rule' in name_conf: + for rule_id, rule_conf in name_conf['rule'].items(): + if single_rule_id and rule_id != single_rule_id: + continue + + if 'disable' in rule_conf: + continue + + row = [rule_id, rule_conf['action'], rule_conf['protocol'] if 'protocol' in rule_conf else 'all'] + if rule_id in details: + rule_details = details[rule_id] + row.append(rule_details.get('packets', 0)) + row.append(rule_details.get('bytes', 0)) + row.append(rule_details['conditions']) + rows.append(row) + + if 'default_action' in name_conf and not single_rule_id: + row = ['default', name_conf['default_action'], 'all'] + if 'default-action' in details: + rule_details = details['default-action'] + row.append(rule_details.get('packets', 0)) + row.append(rule_details.get('bytes', 0)) + rows.append(row) + + if rows: + header = ['Rule', 'Action', 'Protocol', 'Packets', 'Bytes', 'Conditions'] + print(tabulate.tabulate(rows, header) + '\n') + +def output_firewall_name_statistics(name, name_conf, ipv6=False, single_rule_id=None): + ip_str = 'IPv6' if ipv6 else 'IPv4' + print(f'\n---------------------------------\n{ip_str} Firewall "{name}"\n') + + if name_conf['interface']: + print('Active on: {0}\n'.format(" ".join(name_conf['interface']))) + + details = get_nftables_details(name, ipv6) + rows = [] + + if 'rule' in name_conf: + for rule_id, rule_conf in name_conf['rule'].items(): + if single_rule_id and rule_id != single_rule_id: + continue + + if 'disable' in rule_conf: + continue + + source_addr = dict_search_args(rule_conf, 'source', 'address') or '0.0.0.0/0' + dest_addr = dict_search_args(rule_conf, 'destination', 'address') or '0.0.0.0/0' + + row = [rule_id] + if rule_id in details: + rule_details = details[rule_id] + row.append(rule_details.get('packets', 0)) + row.append(rule_details.get('bytes', 0)) + else: + row.append('0') + row.append('0') + row.append(rule_conf['action']) + row.append(source_addr) + row.append(dest_addr) + rows.append(row) + + if 'default_action' in name_conf and not single_rule_id: + row = ['default'] + if 'default-action' in details: + rule_details = details['default-action'] + row.append(rule_details.get('packets', 0)) + row.append(rule_details.get('bytes', 0)) + else: + row.append('0') + row.append('0') + row.append(name_conf['default_action']) + row.append('0.0.0.0/0') # Source + row.append('0.0.0.0/0') # Dest + rows.append(row) + + if rows: + header = ['Rule', 'Packets', 'Bytes', 'Action', 'Source', 'Destination'] + print(tabulate.tabulate(rows, header) + '\n') + +def show_firewall(): + print('Rulesets Information') + + conf = Config() + firewall = get_config_firewall(conf) + + if not firewall: + return + + if 'name' in firewall: + for name, name_conf in firewall['name'].items(): + output_firewall_name(name, name_conf, ipv6=False) + + if 'ipv6_name' in firewall: + for name, name_conf in firewall['ipv6_name'].items(): + output_firewall_name(name, name_conf, ipv6=True) + +def show_firewall_name(name, ipv6=False): + print('Ruleset Information') + + conf = Config() + firewall = get_config_firewall(conf, name, ipv6) + if firewall: + output_firewall_name(name, firewall, ipv6) + +def show_firewall_rule(name, rule_id, ipv6=False): + print('Rule Information') + + conf = Config() + firewall = get_config_firewall(conf, name, ipv6) + if firewall: + output_firewall_name(name, firewall, ipv6, rule_id) + +def show_firewall_group(name=None): + conf = Config() + firewall = get_config_firewall(conf, interfaces=False) + + if 'group' not in firewall: + return + + def find_references(group_type, group_name): + out = [] + for name_type in ['name', 'ipv6_name']: + if name_type not in firewall: + continue + for name, name_conf in firewall[name_type].items(): + if 'rule' not in name_conf: + continue + for rule_id, rule_conf in name_conf['rule'].items(): + source_group = dict_search_args(rule_conf, 'source', 'group', group_type) + dest_group = dict_search_args(rule_conf, 'destination', 'group', group_type) + if source_group and group_name == source_group: + out.append(f'{name}-{rule_id}') + elif dest_group and group_name == dest_group: + out.append(f'{name}-{rule_id}') + return out + + header = ['Name', 'Type', 'References', 'Members'] + rows = [] + + for group_type, group_type_conf in firewall['group'].items(): + for group_name, group_conf in group_type_conf.items(): + if name and name != group_name: + continue + + references = find_references(group_type, group_name) + row = [group_name, group_type, ', '.join(references)] + if 'address' in group_conf: + row.append(", ".join(group_conf['address'])) + elif 'network' in group_conf: + row.append(", ".join(group_conf['network'])) + elif 'port' in group_conf: + row.append(", ".join(group_conf['port'])) + rows.append(row) + + if rows: + print('Firewall Groups\n') + print(tabulate.tabulate(rows, header)) + +def show_summary(): + print('Ruleset Summary') + + conf = Config() + firewall = get_config_firewall(conf) + + if not firewall: + return + + header = ['Ruleset Name', 'Description', 'References'] + v4_out = [] + v6_out = [] + + if 'name' in firewall: + for name, name_conf in firewall['name'].items(): + description = name_conf.get('description', '') + interfaces = ", ".join(name_conf['interface']) + v4_out.append([name, description, interfaces]) + + if 'ipv6_name' in firewall: + for name, name_conf in firewall['ipv6_name'].items(): + description = name_conf.get('description', '') + interfaces = ", ".join(name_conf['interface']) + v6_out.append([name, description, interfaces]) + + if v6_out: + print('\nIPv6 name:\n') + print(tabulate.tabulate(v6_out, header) + '\n') + + if v4_out: + print('\nIPv4 name:\n') + print(tabulate.tabulate(v4_out, header) + '\n') + + show_firewall_group() + +def show_statistics(): + print('Rulesets Statistics') + + conf = Config() + firewall = get_config_firewall(conf) + + if not firewall: + return + + if 'name' in firewall: + for name, name_conf in firewall['name'].items(): + output_firewall_name_statistics(name, name_conf, ipv6=False) + + if 'ipv6_name' in firewall: + for name, name_conf in firewall['ipv6_name'].items(): + output_firewall_name_statistics(name, name_conf, ipv6=True) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--action', help='Action', required=False) + parser.add_argument('--name', help='Firewall name', required=False, action='store', nargs='?', default='') + parser.add_argument('--rule', help='Firewall Rule ID', required=False) + parser.add_argument('--ipv6', help='IPv6 toggle', action='store_true') + + args = parser.parse_args() + + if args.action == 'show': + if not args.rule: + show_firewall_name(args.name, args.ipv6) + else: + show_firewall_rule(args.name, args.rule, args.ipv6) + elif args.action == 'show_all': + show_firewall() + elif args.action == 'show_group': + show_firewall_group(args.name) + elif args.action == 'show_statistics': + show_statistics() + elif args.action == 'show_summary': + show_summary() diff --git a/src/op_mode/force_root-partition-auto-resize.sh b/src/op_mode/force_root-partition-auto-resize.sh new file mode 100755 index 000000000..b39e87560 --- /dev/null +++ b/src/op_mode/force_root-partition-auto-resize.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# ROOT_PART_DEV – root partition device path +# ROOT_PART_NAME – root partition device name +# ROOT_DEV_NAME – disk device name +# ROOT_DEV – disk device path +# ROOT_PART_NUM – number of root partition on disk +# ROOT_DEV_SIZE – disk total size in 512 bytes sectors +# ROOT_PART_SIZE – root partition total size in 512 bytes sectors +# ROOT_PART_START – number of 512 bytes sector where root partition starts +# AVAILABLE_EXTENSION_SIZE – calculation available disk space after root partition in 512 bytes sectors +ROOT_PART_DEV=$(findmnt /usr/lib/live/mount/persistence -o source -n) +ROOT_PART_NAME=$(echo "$ROOT_PART_DEV" | cut -d "/" -f 3) +ROOT_DEV_NAME=$(echo /sys/block/*/"${ROOT_PART_NAME}" | cut -d "/" -f 4) +ROOT_DEV="/dev/${ROOT_DEV_NAME}" +ROOT_PART_NUM=$(cat "/sys/block/${ROOT_DEV_NAME}/${ROOT_PART_NAME}/partition") +ROOT_DEV_SIZE=$(cat "/sys/block/${ROOT_DEV_NAME}/size") +ROOT_PART_SIZE=$(cat "/sys/block/${ROOT_DEV_NAME}/${ROOT_PART_NAME}/size") +ROOT_PART_START=$(cat "/sys/block/${ROOT_DEV_NAME}/${ROOT_PART_NAME}/start") +AVAILABLE_EXTENSION_SIZE=$((ROOT_DEV_SIZE - ROOT_PART_START - ROOT_PART_SIZE - 8)) + +# +# Check if device have space for root partition growing up. +# +if [ $AVAILABLE_EXTENSION_SIZE -lt 1 ]; then + echo "There is no available space for root partition extension" + exit 0; +fi + +# +# Resize the partition and grow the filesystem. +# +# "print" and "Fix" directives were added to fix GPT table if it corrupted after virtual drive extension. +# If GPT table is corrupted we'll get Fix/Ignore dialogue after "print" command. +# "Fix" will be the answer for this dialogue. +# If GPT table is fine and no auto-fix dialogue appeared the directive "Fix" simply will print parted utility help info. +parted -m ${ROOT_DEV} ---pretend-input-tty > /dev/null 2>&1 <<EOF +print +Fix +resizepart +${ROOT_PART_NUM} +Yes +100% +EOF +partprobe > /dev/null 2>&1 +resize2fs ${ROOT_PART_DEV} > /dev/null 2>&1 diff --git a/src/op_mode/format_disk.py b/src/op_mode/format_disk.py index df4486bce..b3ba44e87 100755 --- a/src/op_mode/format_disk.py +++ b/src/op_mode/format_disk.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019 VyOS maintainers and contributors +# Copyright (C) 2019-2021 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -17,11 +17,10 @@ import argparse import os import re -import sys + from datetime import datetime -from time import sleep -from vyos.util import is_admin, ask_yes_no +from vyos.util import ask_yes_no from vyos.util import call from vyos.util import cmd from vyos.util import DEVNULL @@ -38,16 +37,17 @@ def list_disks(): def is_busy(disk: str): """Check if given disk device is busy by re-reading it's partition table""" - return call(f'sudo blockdev --rereadpt /dev/{disk}', stderr=DEVNULL) != 0 + return call(f'blockdev --rereadpt /dev/{disk}', stderr=DEVNULL) != 0 def backup_partitions(disk: str): """Save sfdisk partitions output to a backup file""" - device_path = '/dev/' + disk - backup_ts = datetime.now().strftime('%Y-%m-%d-%H:%M') - backup_file = '/var/tmp/backup_{}.{}'.format(disk, backup_ts) - cmd(f'sudo /sbin/sfdisk -d {device_path} > {backup_file}') + device_path = f'/dev/{disk}' + backup_ts = datetime.now().strftime('%Y%m%d-%H%M') + backup_file = f'/var/tmp/backup_{disk}.{backup_ts}' + call(f'sfdisk -d {device_path} > {backup_file}') + print(f'Partition table backup saved to {backup_file}') def list_partitions(disk: str): @@ -65,11 +65,11 @@ def list_partitions(disk: str): def delete_partition(disk: str, partition_idx: int): - cmd(f'sudo /sbin/parted /dev/{disk} rm {partition_idx}') + cmd(f'parted /dev/{disk} rm {partition_idx}') def format_disk_like(target: str, proto: str): - cmd(f'sudo /sbin/sfdisk -d /dev/{proto} | sudo /sbin/sfdisk --force /dev/{target}') + cmd(f'sfdisk -d /dev/{proto} | sfdisk --force /dev/{target}') if __name__ == '__main__': @@ -79,10 +79,6 @@ if __name__ == '__main__': group.add_argument('-p', '--proto', type=str, required=True, help='Prototype device to use as reference') args = parser.parse_args() - if not is_admin(): - print('Must be admin or root to format disk') - sys.exit(1) - target_disk = args.target eligible_target_disks = list_disks() @@ -90,54 +86,48 @@ if __name__ == '__main__': eligible_proto_disks = eligible_target_disks.copy() eligible_proto_disks.remove(target_disk) - fmt = { - 'target_disk': target_disk, - 'proto_disk': proto_disk, - } - if proto_disk == target_disk: print('The two disk drives must be different.') - sys.exit(1) + exit(1) - if not os.path.exists('/dev/' + proto_disk): - print('Device /dev/{proto_disk} does not exist'.format_map(fmt)) - sys.exit(1) + if not os.path.exists(f'/dev/{proto_disk}'): + print(f'Device /dev/{proto_disk} does not exist') + exit(1) if not os.path.exists('/dev/' + target_disk): - print('Device /dev/{target_disk} does not exist'.format_map(fmt)) - sys.exit(1) + print(f'Device /dev/{target_disk} does not exist') + exit(1) if target_disk not in eligible_target_disks: - print('Device {target_disk} can not be formatted'.format_map(fmt)) - sys.exit(1) + print(f'Device {target_disk} can not be formatted') + exit(1) if proto_disk not in eligible_proto_disks: - print('Device {proto_disk} can not be used as a prototype for {target_disk}'.format_map(fmt)) - sys.exit(1) + print(f'Device {proto_disk} can not be used as a prototype for {target_disk}') + exit(1) if is_busy(target_disk): - print("Disk device {target_disk} is busy. Can't format it now".format_map(fmt)) - sys.exit(1) + print(f'Disk device {target_disk} is busy, unable to format') + exit(1) - print('This will re-format disk {target_disk} so that it has the same disk\n' - 'partion sizes and offsets as {proto_disk}. This will not copy\n' - 'data from {proto_disk} to {target_disk}. But this will erase all\n' - 'data on {target_disk}.\n'.format_map(fmt)) + print(f'\nThis will re-format disk {target_disk} so that it has the same disk' + f'\npartion sizes and offsets as {proto_disk}. This will not copy' + f'\ndata from {proto_disk} to {target_disk}. But this will erase all' + f'\ndata on {target_disk}.\n') - if not ask_yes_no("Do you wish to proceed?"): - print('OK. Disk drive {target_disk} will not be re-formated'.format_map(fmt)) - sys.exit(0) + if not ask_yes_no('Do you wish to proceed?'): + print(f'Disk drive {target_disk} will not be re-formated') + exit(0) - print('OK. Re-formating disk drive {target_disk}...'.format_map(fmt)) + print(f'Re-formating disk drive {target_disk}...') print('Making backup copy of partitions...') backup_partitions(target_disk) - sleep(1) print('Deleting old partitions...') for p in list_partitions(target_disk): delete_partition(disk=target_disk, partition_idx=p) - print('Creating new partitions on {target_disk} based on {proto_disk}...'.format_map(fmt)) + print(f'Creating new partitions on {target_disk} based on {proto_disk}...') format_disk_like(target=target_disk, proto=proto_disk) - print('Done.') + print('Done!') diff --git a/src/op_mode/generate_ipsec_debug_archive.sh b/src/op_mode/generate_ipsec_debug_archive.sh new file mode 100755 index 000000000..53d0a6eaa --- /dev/null +++ b/src/op_mode/generate_ipsec_debug_archive.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Collecting IPSec Debug Information + +DATE=`date +%d-%m-%Y` + +a_CMD=( + "sudo ipsec status" + "sudo swanctl -L" + "sudo swanctl -l" + "sudo swanctl -P" + "sudo ip x sa show" + "sudo ip x policy show" + "sudo ip tunnel show" + "sudo ip address" + "sudo ip rule show" + "sudo ip route" + "sudo ip route show table 220" + ) + + +echo "DEBUG: ${DATE} on host \"$(hostname)\"" > /tmp/ipsec-status-${DATE}.txt +date >> /tmp/ipsec-status-${DATE}.txt + +# Execute all DEBUG commands and save it to file +for cmd in "${a_CMD[@]}"; do + echo -e "\n### ${cmd} ###" >> /tmp/ipsec-status-${DATE}.txt + ${cmd} >> /tmp/ipsec-status-${DATE}.txt 2>/dev/null +done + +# Collect charon logs, build .tgz archive +sudo journalctl /usr/lib/ipsec/charon > /tmp/journalctl-charon-${DATE}.txt && \ +sudo tar -zcvf /tmp/ipsec-debug-${DATE}.tgz /tmp/journalctl-charon-${DATE}.txt /tmp/ipsec-status-${DATE}.txt >& /dev/null +sudo rm -f /tmp/journalctl-charon-${DATE}.txt /tmp/ipsec-status-${DATE}.txt + +echo "Debug file is generated and located in /tmp/ipsec-debug-${DATE}.tgz" diff --git a/src/op_mode/lldp_op.py b/src/op_mode/lldp_op.py index 731e71891..b9ebc991a 100755 --- a/src/op_mode/lldp_op.py +++ b/src/op_mode/lldp_op.py @@ -55,6 +55,9 @@ def parse_data(data, interface): if interface is not None and local_if != interface: continue for chassis, c_value in values.get('chassis', {}).items(): + # bail out early if no capabilities found + if 'capability' not in c_value: + continue capabilities = c_value['capability'] if isinstance(capabilities, dict): capabilities = [capabilities] diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py index e1428c581..bc7813052 100755 --- a/src/op_mode/pki.py +++ b/src/op_mode/pki.py @@ -35,6 +35,7 @@ from vyos.pki import verify_certificate from vyos.xml import defaults from vyos.util import ask_input, ask_yes_no from vyos.util import cmd +from vyos.util import install_into_config CERT_REQ_END = '-----END CERTIFICATE REQUEST-----' auth_dir = '/config/auth' @@ -142,48 +143,50 @@ def get_revoked_by_serial_numbers(serial_numbers=[]): return certs_out def install_certificate(name, cert='', private_key=None, key_type=None, key_passphrase=None, is_ca=False): - # Show conf commands for installing certificate + # Show/install conf commands for certificate prefix = 'ca' if is_ca else 'certificate' - print('Configure mode commands to install:') - base = f"set pki {prefix} {name}" + base = f"pki {prefix} {name}" + config_paths = [] if cert: cert_pem = "".join(encode_certificate(cert).strip().split("\n")[1:-1]) - print(f"{base} certificate '{cert_pem}'") + config_paths.append(f"{base} certificate '{cert_pem}'") if private_key: key_pem = "".join(encode_private_key(private_key, passphrase=key_passphrase).strip().split("\n")[1:-1]) - print(f"{base} private key '{key_pem}'") + config_paths.append(f"{base} private key '{key_pem}'") if key_passphrase: - print(f"{base} private password-protected") + config_paths.append(f"{base} private password-protected") + + install_into_config(conf, config_paths) def install_crl(ca_name, crl): - # Show conf commands for installing crl - print("Configure mode commands to install CRL:") + # Show/install conf commands for crl crl_pem = "".join(encode_certificate(crl).strip().split("\n")[1:-1]) - print(f"set pki ca {ca_name} crl '{crl_pem}'") + install_into_config(conf, [f"pki ca {ca_name} crl '{crl_pem}'"]) def install_dh_parameters(name, params): - # Show conf commands for installing dh params - print("Configure mode commands to install DH parameters:") + # Show/install conf commands for dh params dh_pem = "".join(encode_dh_parameters(params).strip().split("\n")[1:-1]) - print(f"set pki dh {name} parameters '{dh_pem}'") + install_into_config(conf, [f"pki dh {name} parameters '{dh_pem}'"]) def install_ssh_key(name, public_key, private_key, passphrase=None): - # Show conf commands for installing ssh key + # Show/install conf commands for ssh key key_openssh = encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH') username = os.getlogin() type_key_split = key_openssh.split(" ") - base = f"set system login user {username} authentication public-keys {name}" - print("Configure mode commands to install SSH key:") - print(f"{base} key '{type_key_split[1]}'") - print(f"{base} type '{type_key_split[0]}'", end="\n\n") + base = f"system login user {username} authentication public-keys {name}" + install_into_config(conf, [ + f"{base} key '{type_key_split[1]}'", + f"{base} type '{type_key_split[0]}'" + ]) print(encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) def install_keypair(name, key_type, private_key=None, public_key=None, passphrase=None): - # Show conf commands for installing key-pair - print("Configure mode commands to install key pair:") + # Show/install conf commands for key-pair + + config_paths = [] if public_key: install_public_key = ask_yes_no('Do you want to install the public key?', default=True) @@ -191,7 +194,7 @@ def install_keypair(name, key_type, private_key=None, public_key=None, passphras if install_public_key: install_public_pem = "".join(public_key_pem.strip().split("\n")[1:-1]) - print(f"set pki key-pair {name} public key '{install_public_pem}'") + config_paths.append(f"pki key-pair {name} public key '{install_public_pem}'") else: print("Public key:") print(public_key_pem) @@ -202,13 +205,15 @@ def install_keypair(name, key_type, private_key=None, public_key=None, passphras if install_private_key: install_private_pem = "".join(private_key_pem.strip().split("\n")[1:-1]) - print(f"set pki key-pair {name} private key '{install_private_pem}'") + config_paths.append(f"pki key-pair {name} private key '{install_private_pem}'") if passphrase: - print(f"set pki key-pair {name} private password-protected") + config_paths.append(f"pki key-pair {name} private password-protected") else: print("Private key:") print(private_key_pem) + install_into_config(conf, config_paths) + def install_wireguard_key(interface, private_key, public_key): # Show conf commands for installing wireguard key pairs from vyos.ifconfig import Section @@ -217,20 +222,10 @@ def install_wireguard_key(interface, private_key, public_key): exit(1) # Check if we are running in a config session - if yes, we can directly write to the CLI - cli_string = f"interfaces wireguard {interface} private-key '{private_key}'" - if Config().in_session(): - cmd(f"/opt/vyatta/sbin/my_set {cli_string}") - - print('"generate" CLI command executed from config session.\nGenerated private-key was imported to CLI!',end='\n\n') - print(f'Use the following command to verify: show interfaces wireguard {interface}') - else: - print('"generate" CLI command executed from operational level.\n' - 'Generated private-key is not stored to CLI, use configure mode commands to install key:', end='\n\n') - print(f"set {cli_string}", end="\n\n") + install_into_config(conf, [f"interfaces wireguard {interface} private-key '{private_key}'"]) print(f"Corresponding public-key to use on peer system is: '{public_key}'") - def install_wireguard_psk(interface, peer, psk): from vyos.ifconfig import Section if Section.section(interface) != 'wireguard': @@ -238,17 +233,7 @@ def install_wireguard_psk(interface, peer, psk): exit(1) # Check if we are running in a config session - if yes, we can directly write to the CLI - cli_string = f"interfaces wireguard {interface} peer {peer} preshared-key '{psk}'" - if Config().in_session(): - cmd(f"/opt/vyatta/sbin/my_set {cli_string}") - - print('"generate" CLI command executed from config session.\nGenerated preshared-key was imported to CLI!',end='\n\n') - print(f'Use the following command to verify: show interfaces wireguard {interface}') - else: - print('"generate" CLI command executed from operational level.\n' - 'Generated preshared-key is not stored to CLI, use configure mode commands to install key:', end='\n\n') - print(f"set {cli_string}", end="\n\n") - + install_into_config(conf, [f"interfaces wireguard {interface} peer {peer} preshared-key '{psk}'"]) def ask_passphrase(): passphrase = None @@ -858,8 +843,18 @@ if __name__ == '__main__': elif args.action == 'show': if args.ca: - show_certificate_authority(None if args.ca == 'all' else args.ca) + ca_name = None if args.ca == 'all' else args.ca + if ca_name: + if not conf.exists(['pki', 'ca', ca_name]): + print(f'CA "{ca_name}" does not exist!') + exit(1) + show_certificate_authority(ca_name) elif args.certificate: + cert_name = None if args.certificate == 'all' else args.certificate + if cert_name: + if not conf.exists(['pki', 'certificate', cert_name]): + print(f'Certificate "{cert_name}" does not exist!') + exit(1) show_certificate(None if args.certificate == 'all' else args.certificate) elif args.crl: show_crl(None if args.crl == 'all' else args.crl) diff --git a/src/op_mode/policy_route.py b/src/op_mode/policy_route.py new file mode 100755 index 000000000..e0b4ac514 --- /dev/null +++ b/src/op_mode/policy_route.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import re +import tabulate + +from vyos.config import Config +from vyos.util import cmd +from vyos.util import dict_search_args + +def get_policy_interfaces(conf, policy, name=None, ipv6=False): + interfaces = conf.get_config_dict(['interfaces'], key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + + routes = ['route', 'ipv6_route'] + + def parse_if(ifname, if_conf): + if 'policy' in if_conf: + for route in routes: + if route in if_conf['policy']: + route_name = if_conf['policy'][route] + name_str = f'({ifname},{route})' + + if not name: + policy[route][route_name]['interface'].append(name_str) + elif not ipv6 and name == route_name: + policy['interface'].append(name_str) + + for iftype in ['vif', 'vif_s', 'vif_c']: + if iftype in if_conf: + for vifname, vif_conf in if_conf[iftype].items(): + parse_if(f'{ifname}.{vifname}', vif_conf) + + for iftype, iftype_conf in interfaces.items(): + for ifname, if_conf in iftype_conf.items(): + parse_if(ifname, if_conf) + +def get_config_policy(conf, name=None, ipv6=False, interfaces=True): + config_path = ['policy'] + if name: + config_path += ['ipv6-route' if ipv6 else 'route', name] + + policy = conf.get_config_dict(config_path, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + if policy and interfaces: + if name: + policy['interface'] = [] + else: + if 'route' in policy: + for route_name, route_conf in policy['route'].items(): + route_conf['interface'] = [] + + if 'ipv6_route' in policy: + for route_name, route_conf in policy['ipv6_route'].items(): + route_conf['interface'] = [] + + get_policy_interfaces(conf, policy, name, ipv6) + + return policy + +def get_nftables_details(name, ipv6=False): + suffix = '6' if ipv6 else '' + command = f'sudo nft list chain ip{suffix} mangle VYOS_PBR{suffix}_{name}' + try: + results = cmd(command) + except: + return {} + + out = {} + for line in results.split('\n'): + comment_search = re.search(rf'{name}[\- ](\d+|default-action)', line) + if not comment_search: + continue + + rule = {} + rule_id = comment_search[1] + counter_search = re.search(r'counter packets (\d+) bytes (\d+)', line) + if counter_search: + rule['packets'] = counter_search[1] + rule['bytes'] = counter_search[2] + + rule['conditions'] = re.sub(r'(\b(counter packets \d+ bytes \d+|drop|reject|return|log)\b|comment "[\w\-]+")', '', line).strip() + out[rule_id] = rule + return out + +def output_policy_route(name, route_conf, ipv6=False, single_rule_id=None): + ip_str = 'IPv6' if ipv6 else 'IPv4' + print(f'\n---------------------------------\n{ip_str} Policy Route "{name}"\n') + + if route_conf['interface']: + print('Active on: {0}\n'.format(" ".join(route_conf['interface']))) + + details = get_nftables_details(name, ipv6) + rows = [] + + if 'rule' in route_conf: + for rule_id, rule_conf in route_conf['rule'].items(): + if single_rule_id and rule_id != single_rule_id: + continue + + if 'disable' in rule_conf: + continue + + action = rule_conf['action'] if 'action' in rule_conf else 'set' + protocol = rule_conf['protocol'] if 'protocol' in rule_conf else 'all' + + row = [rule_id, action, protocol] + if rule_id in details: + rule_details = details[rule_id] + row.append(rule_details.get('packets', 0)) + row.append(rule_details.get('bytes', 0)) + row.append(rule_details['conditions']) + rows.append(row) + + if 'default_action' in route_conf and not single_rule_id: + row = ['default', route_conf['default_action'], 'all'] + if 'default-action' in details: + rule_details = details['default-action'] + row.append(rule_details.get('packets', 0)) + row.append(rule_details.get('bytes', 0)) + rows.append(row) + + if rows: + header = ['Rule', 'Action', 'Protocol', 'Packets', 'Bytes', 'Conditions'] + print(tabulate.tabulate(rows, header) + '\n') + +def show_policy(ipv6=False): + print('Ruleset Information') + + conf = Config() + policy = get_config_policy(conf) + + if not policy: + return + + if not ipv6 and 'route' in policy: + for route, route_conf in policy['route'].items(): + output_policy_route(route, route_conf, ipv6=False) + + if ipv6 and 'ipv6_route' in policy: + for route, route_conf in policy['ipv6_route'].items(): + output_policy_route(route, route_conf, ipv6=True) + +def show_policy_name(name, ipv6=False): + print('Ruleset Information') + + conf = Config() + policy = get_config_policy(conf, name, ipv6) + if policy: + output_policy_route(name, policy, ipv6) + +def show_policy_rule(name, rule_id, ipv6=False): + print('Rule Information') + + conf = Config() + policy = get_config_policy(conf, name, ipv6) + if policy: + output_policy_route(name, policy, ipv6, rule_id) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--action', help='Action', required=False) + parser.add_argument('--name', help='Policy name', required=False, action='store', nargs='?', default='') + parser.add_argument('--rule', help='Policy Rule ID', required=False) + parser.add_argument('--ipv6', help='IPv6 toggle', action='store_true') + + args = parser.parse_args() + + if args.action == 'show': + if not args.rule: + show_policy_name(args.name, args.ipv6) + else: + show_policy_rule(args.name, args.rule, args.ipv6) + elif args.action == 'show_all': + show_policy(args.ipv6) diff --git a/src/op_mode/powerctrl.py b/src/op_mode/powerctrl.py index f8b5a3dda..679b03c0b 100755 --- a/src/op_mode/powerctrl.py +++ b/src/op_mode/powerctrl.py @@ -92,37 +92,40 @@ def cancel_shutdown(): try: run('/sbin/shutdown -c --no-wall') except OSError as e: - exit("Could not cancel a reboot or poweroff: %s" % e) + exit(f'Could not cancel a reboot or poweroff: {e}') - message = 'Scheduled {} has been cancelled {}'.format(output['MODE'], timenow) + mode = output['MODE'] + message = f'Scheduled {mode} has been cancelled {timenow}' run(f'wall {message} > /dev/null 2>&1') else: print("Reboot or poweroff is not scheduled") def execute_shutdown(time, reboot=True, ask=True): + action = "reboot" if reboot else "poweroff" if not ask: - action = "reboot" if reboot else "poweroff" - if not ask_yes_no("Are you sure you want to %s this system?" % action): + if not ask_yes_no(f"Are you sure you want to {action} this system?"): exit(0) - - action = "-r" if reboot else "-P" + action_cmd = "-r" if reboot else "-P" if len(time) == 0: # T870 legacy reboot job support chk_vyatta_based_reboots() ### - out = cmd(f'/sbin/shutdown {action} now', stderr=STDOUT) + out = cmd(f'/sbin/shutdown {action_cmd} now', stderr=STDOUT) print(out.split(",", 1)[0]) return elif len(time) == 1: # Assume the argument is just time ts = parse_time(time[0]) if ts: - cmd(f'/sbin/shutdown {action} {time[0]}', stderr=STDOUT) + cmd(f'/sbin/shutdown {action_cmd} {time[0]}', stderr=STDOUT) + # Inform all other logged in users about the reboot/shutdown + wall_msg = f'System {action} is scheduled {time[0]}' + cmd(f'/usr/bin/wall "{wall_msg}"') else: - exit("Invalid time \"{0}\". The valid format is HH:MM".format(time[0])) + exit(f'Invalid time "{time[0]}". The valid format is HH:MM') elif len(time) == 2: # Assume it's date and time ts = parse_time(time[0]) @@ -131,14 +134,18 @@ def execute_shutdown(time, reboot=True, ask=True): t = datetime.combine(ds, ts) td = t - datetime.now() t2 = 1 + int(td.total_seconds())//60 # Get total minutes - cmd('/sbin/shutdown {action} {t2}', stderr=STDOUT) + + cmd(f'/sbin/shutdown {action_cmd} {t2}', stderr=STDOUT) + # Inform all other logged in users about the reboot/shutdown + wall_msg = f'System {action} is scheduled {time[1]} {time[0]}' + cmd(f'/usr/bin/wall "{wall_msg}"') else: if not ts: - exit("Invalid time \"{0}\". The valid format is HH:MM".format(time[0])) + exit(f'Invalid time "{time[0]}". The valid format is HH:MM') else: - exit("Invalid time \"{0}\". A valid format is YYYY-MM-DD [HH:MM]".format(time[1])) + exit(f'Invalid date "{time[1]}". A valid format is YYYY-MM-DD [HH:MM]') else: - exit("Could not decode date and time. Valids formats are HH:MM or YYYY-MM-DD HH:MM") + exit('Could not decode date and time. Valids formats are HH:MM or YYYY-MM-DD HH:MM') check_shutdown() diff --git a/src/op_mode/ppp-server-ctrl.py b/src/op_mode/ppp-server-ctrl.py index 670cdf879..e93963fdd 100755 --- a/src/op_mode/ppp-server-ctrl.py +++ b/src/op_mode/ppp-server-ctrl.py @@ -60,7 +60,7 @@ def main(): output, err = popen(cmd_dict['cmd_base'].format(cmd_dict['vpn_types'][args.proto]) + args.action + ses_pattern, stderr=DEVNULL, decode='utf-8') if not err: try: - print(output) + print(f' {output}') except: sys.exit(0) else: diff --git a/src/op_mode/restart_frr.py b/src/op_mode/restart_frr.py index 109c8dd7b..e5014452f 100755 --- a/src/op_mode/restart_frr.py +++ b/src/op_mode/restart_frr.py @@ -138,7 +138,7 @@ def _reload_config(daemon): # define program arguments cmd_args_parser = argparse.ArgumentParser(description='restart frr daemons') cmd_args_parser.add_argument('--action', choices=['restart'], required=True, help='action to frr daemons') -cmd_args_parser.add_argument('--daemon', choices=['bfdd', 'bgpd', 'ospfd', 'ospf6d', 'isisd', 'ripd', 'ripngd', 'staticd', 'zebra'], required=False, nargs='*', help='select single or multiple daemons') +cmd_args_parser.add_argument('--daemon', choices=['bfdd', 'bgpd', 'ldpd', 'ospfd', 'ospf6d', 'isisd', 'ripd', 'ripngd', 'staticd', 'zebra'], required=False, nargs='*', help='select single or multiple daemons') # parse arguments cmd_args = cmd_args_parser.parse_args() diff --git a/src/op_mode/show_configuration_json.py b/src/op_mode/show_configuration_json.py new file mode 100755 index 000000000..fdece533b --- /dev/null +++ b/src/op_mode/show_configuration_json.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import json + +from vyos.configquery import ConfigTreeQuery + + +config = ConfigTreeQuery() +c = config.get_config_dict() + +parser = argparse.ArgumentParser() +parser.add_argument("-p", "--pretty", action="store_true", help="Show pretty configuration in JSON format") + + +if __name__ == '__main__': + args = parser.parse_args() + + if args.pretty: + print(json.dumps(c, indent=4)) + else: + print(json.dumps(c)) diff --git a/src/op_mode/show_interfaces.py b/src/op_mode/show_interfaces.py index 3d50eb938..eac068274 100755 --- a/src/op_mode/show_interfaces.py +++ b/src/op_mode/show_interfaces.py @@ -94,10 +94,8 @@ def split_text(text, used=0): used: number of characted already used in the screen """ no_tty = call('tty -s') - if no_tty: - return text.split() - returned = cmd('stty size') + returned = cmd('stty size') if not no_tty else '' if len(returned) == 2: rows, columns = [int(_) for _ in returned] else: diff --git a/src/op_mode/show_ipsec_sa.py b/src/op_mode/show_ipsec_sa.py index c964caaeb..e72f0f965 100755 --- a/src/op_mode/show_ipsec_sa.py +++ b/src/op_mode/show_ipsec_sa.py @@ -46,7 +46,6 @@ def format_output(conns, sas): if parent_sa["state"] == b"ESTABLISHED" and installed_sas: state = "up" - uptime = vyos.util.seconds_to_human(parent_sa["established"].decode()) remote_host = parent_sa["remote-host"].decode() remote_id = parent_sa["remote-id"].decode() @@ -75,6 +74,8 @@ def format_output(conns, sas): # Remove B from <1K values pkts_str = re.sub(r'B', r'', pkts_str) + uptime = vyos.util.seconds_to_human(isa['install-time'].decode()) + enc = isa["encr-alg"].decode() if "encr-keysize" in isa: key_size = isa["encr-keysize"].decode() diff --git a/src/op_mode/show_nat_rules.py b/src/op_mode/show_nat_rules.py index d68def26a..98adb31dd 100755 --- a/src/op_mode/show_nat_rules.py +++ b/src/op_mode/show_nat_rules.py @@ -32,7 +32,7 @@ args = parser.parse_args() if args.source or args.destination: tmp = cmd('sudo nft -j list table ip nat') tmp = json.loads(tmp) - + format_nat_rule = '{0: <10} {1: <50} {2: <50} {3: <10}' print(format_nat_rule.format("Rule", "Source" if args.source else "Destination", "Translation", "Outbound Interface" if args.source else "Inbound Interface")) print(format_nat_rule.format("----", "------" if args.source else "-----------", "-----------", "------------------" if args.source else "-----------------")) @@ -40,7 +40,7 @@ if args.source or args.destination: data_json = jmespath.search('nftables[?rule].rule[?chain]', tmp) for idx in range(0, len(data_json)): data = data_json[idx] - + # The following key values must exist # When the rule JSON does not have some keys, this is not a rule we can work with continue_rule = False @@ -50,9 +50,9 @@ if args.source or args.destination: continue if continue_rule: continue - + comment = data['comment'] - + # Check the annotation to see if the annotation format is created by VYOS continue_rule = True for comment_prefix in ['SRC-NAT-', 'DST-NAT-']: @@ -60,7 +60,7 @@ if args.source or args.destination: continue_rule = False if continue_rule: continue - + rule = int(''.join(list(filter(str.isdigit, comment)))) chain = data['chain'] if not ((args.source and chain == 'POSTROUTING') or (not args.source and chain == 'PREROUTING')): @@ -88,7 +88,7 @@ if args.source or args.destination: else: port_range = srcdest_json['set'][0]['range'] srcdest += 'port ' + str(port_range[0]) + '-' + str(port_range[1]) + ' ' - + tran_addr_json = dict_search('snat' if args.source else 'dnat', data['expr'][i]) if tran_addr_json: if isinstance(tran_addr_json['addr'],str): @@ -98,10 +98,10 @@ if args.source or args.destination: len_tmp = dict_search('snat.addr.prefix.len' if args.source else 'dnat.addr.prefix.len', data['expr'][3]) if addr_tmp and len_tmp: tran_addr += addr_tmp + '/' + str(len_tmp) + ' ' - + if isinstance(tran_addr_json['port'],int): - tran_addr += 'port ' + tran_addr_json['port'] - + tran_addr += 'port ' + str(tran_addr_json['port']) + else: if 'masquerade' in data['expr'][i]: tran_addr = 'masquerade' @@ -112,10 +112,10 @@ if args.source or args.destination: srcdests.append(srcdest) srcdest = '' print(format_nat_rule.format(rule, srcdests[0], tran_addr, interface)) - + for i in range(1, len(srcdests)): print(format_nat_rule.format(' ', srcdests[i], ' ', ' ')) - + exit(0) else: parser.print_help() diff --git a/src/op_mode/show_openvpn_mfa.py b/src/op_mode/show_openvpn_mfa.py new file mode 100755 index 000000000..1ab54600c --- /dev/null +++ b/src/op_mode/show_openvpn_mfa.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +# Copyright 2017, 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. If not, see <http://www.gnu.org/licenses/>. + +import re +import socket +import urllib.parse +import argparse + +from vyos.util import popen + +otp_file = '/config/auth/openvpn/{interface}-otp-secrets' + +def get_mfa_secret(interface, client): + try: + with open(otp_file.format(interface=interface), "r") as f: + users = f.readlines() + for user in users: + if re.search('^' + client + ' ', user): + return user.split(':')[3] + except: + pass + +def get_mfa_uri(client, secret): + hostname = socket.gethostname() + fqdn = socket.getfqdn() + uri = 'otpauth://totp/{hostname}:{client}@{fqdn}?secret={secret}' + + return urllib.parse.quote(uri.format(hostname=hostname, client=client, fqdn=fqdn, secret=secret), safe='/:@?=') + +if __name__ == '__main__': + parser = argparse.ArgumentParser(add_help=False, description='Show two-factor authentication information') + parser.add_argument('--intf', action="store", type=str, default='', help='only show the specified interface') + parser.add_argument('--user', action="store", type=str, default='', help='only show the specified users') + parser.add_argument('--action', action="store", type=str, default='show', help='action to perform') + + args = parser.parse_args() + secret = get_mfa_secret(args.intf, args.user) + + if args.action == "secret" and secret: + print(secret) + + if args.action == "uri" and secret: + uri = get_mfa_uri(args.user, secret) + print(uri) + + if args.action == "qrcode" and secret: + uri = get_mfa_uri(args.user, secret) + qrcode,err = popen('qrencode -t ansiutf8', input=uri) + print(qrcode) + diff --git a/src/op_mode/show_ram.py b/src/op_mode/show_ram.py new file mode 100755 index 000000000..5818ec132 --- /dev/null +++ b/src/op_mode/show_ram.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +def get_system_memory(): + from re import search as re_search + + def find_value(keyword, mem_data): + regex = keyword + ':\s+(\d+)' + res = re_search(regex, mem_data).group(1) + return int(res) + + with open("/proc/meminfo", "r") as f: + mem_data = f.read() + + total = find_value('MemTotal', mem_data) + available = find_value('MemAvailable', mem_data) + buffers = find_value('Buffers', mem_data) + cached = find_value('Cached', mem_data) + + used = total - available + + res = { + "total": total, + "free": available, + "used": used, + "buffers": buffers, + "cached": cached + } + + return res + +def get_system_memory_human(): + from vyos.util import bytes_to_human + + mem = get_system_memory() + + for key in mem: + # The Linux kernel exposes memory values in kilobytes, + # so we need to normalize them + mem[key] = bytes_to_human(mem[key], initial_exponent=10) + + return mem + +if __name__ == '__main__': + mem = get_system_memory_human() + + print("Total: {}".format(mem["total"])) + print("Free: {}".format(mem["free"])) + print("Used: {}".format(mem["used"])) + diff --git a/src/op_mode/show_ram.sh b/src/op_mode/show_ram.sh deleted file mode 100755 index b013e16f8..000000000 --- a/src/op_mode/show_ram.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -# Module: vyos-show-ram.sh -# Displays memory usage information in minimalistic format -# -# Copyright (C) 2019 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -MB_DIVISOR=1024 - -TOTAL=$(cat /proc/meminfo | grep -E "^MemTotal:" | awk -F ' ' '{print $2}') -FREE=$(cat /proc/meminfo | grep -E "^MemFree:" | awk -F ' ' '{print $2}') -BUFFERS=$(cat /proc/meminfo | grep -E "^Buffers:" | awk -F ' ' '{print $2}') -CACHED=$(cat /proc/meminfo | grep -E "^Cached:" | awk -F ' ' '{print $2}') - -DISPLAY_FREE=$(( ($FREE + $BUFFERS + $CACHED) / $MB_DIVISOR )) -DISPLAY_TOTAL=$(( $TOTAL / $MB_DIVISOR )) -DISPLAY_USED=$(( $DISPLAY_TOTAL - $DISPLAY_FREE )) - -echo "Total: $DISPLAY_TOTAL" -echo "Free: $DISPLAY_FREE" -echo "Used: $DISPLAY_USED" diff --git a/src/op_mode/show_uptime.py b/src/op_mode/show_uptime.py new file mode 100755 index 000000000..c3dea52e6 --- /dev/null +++ b/src/op_mode/show_uptime.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +def get_uptime_seconds(): + from re import search + from vyos.util import read_file + + data = read_file("/proc/uptime") + seconds = search("([0-9\.]+)\s", data).group(1) + + return int(float(seconds)) + +def get_load_averages(): + from re import search + from vyos.util import cmd + + data = cmd("uptime") + matches = search(r"load average:\s*(?P<one>[0-9\.]+)\s*,\s*(?P<five>[0-9\.]+)\s*,\s*(?P<fifteen>[0-9\.]+)\s*", data) + + res = {} + res[1] = float(matches["one"]) + res[5] = float(matches["five"]) + res[15] = float(matches["fifteen"]) + + return res + +if __name__ == '__main__': + from vyos.util import seconds_to_human + + print("Uptime: {}\n".format(seconds_to_human(get_uptime_seconds()))) + + avgs = get_load_averages() + + print("Load averages:") + print("1 minute: {:.02f}%".format(avgs[1]*100)) + print("5 minutes: {:.02f}%".format(avgs[5]*100)) + print("15 minutes: {:.02f}%".format(avgs[15]*100)) diff --git a/src/op_mode/vpn_ipsec.py b/src/op_mode/vpn_ipsec.py index 06e227ccf..40854fa8f 100755 --- a/src/op_mode/vpn_ipsec.py +++ b/src/op_mode/vpn_ipsec.py @@ -48,7 +48,7 @@ def reset_peer(peer, tunnel): result = True for conn in conns: try: - call(f'sudo /usr/sbin/ipsec down {conn}', timeout = 10) + call(f'sudo /usr/sbin/ipsec down {conn}{{*}}', timeout = 10) call(f'sudo /usr/sbin/ipsec up {conn}', timeout = 10) except TimeoutExpired as e: print(f'Timed out while resetting {conn}') diff --git a/src/op_mode/zone_policy.py b/src/op_mode/zone_policy.py new file mode 100755 index 000000000..7b43018c2 --- /dev/null +++ b/src/op_mode/zone_policy.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import tabulate + +from vyos.config import Config +from vyos.util import dict_search_args + +def get_config_zone(conf, name=None): + config_path = ['zone-policy'] + if name: + config_path += ['zone', name] + + zone_policy = conf.get_config_dict(config_path, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True) + return zone_policy + +def output_zone_name(zone, zone_conf): + print(f'\n---------------------------------\nZone: "{zone}"\n') + + interfaces = ', '.join(zone_conf['interface']) if 'interface' in zone_conf else '' + if 'local_zone' in zone_conf: + interfaces = 'LOCAL' + + print(f'Interfaces: {interfaces}\n') + + header = ['From Zone', 'Firewall'] + rows = [] + + if 'from' in zone_conf: + for from_name, from_conf in zone_conf['from'].items(): + row = [from_name] + v4_name = dict_search_args(from_conf, 'firewall', 'name') + v6_name = dict_search_args(from_conf, 'firewall', 'ipv6_name') + + if v4_name: + rows.append(row + [v4_name]) + + if v6_name: + rows.append(row + [f'{v6_name} [IPv6]']) + + if rows: + print('From Zones:\n') + print(tabulate.tabulate(rows, header)) + +def show_zone_policy(zone): + conf = Config() + zone_policy = get_config_zone(conf, zone) + + if not zone_policy: + return + + if 'zone' in zone_policy: + for zone, zone_conf in zone_policy['zone'].items(): + output_zone_name(zone, zone_conf) + elif zone: + output_zone_name(zone, zone_policy) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--action', help='Action', required=False) + parser.add_argument('--name', help='Zone name', required=False, action='store', nargs='?', default='') + + args = parser.parse_args() + + if args.action == 'show': + show_zone_policy(args.name) diff --git a/src/services/api/graphql/README.graphql b/src/services/api/graphql/README.graphql index a04138010..1133d79ed 100644 --- a/src/services/api/graphql/README.graphql +++ b/src/services/api/graphql/README.graphql @@ -1,7 +1,12 @@ +The following examples are in the form as entered in the GraphQL +'playground', which is found at: + +https://{{ host_address }}/graphql + Example using GraphQL mutations to configure a DHCP server: -This assumes that the http-api is running: +All examples assume that the http-api is running: 'set service https api' @@ -10,7 +15,7 @@ to run with that address as default router by requesting these 'mutations' in the GraphQL playground: mutation { - createInterfaceEthernet (data: {interface: "eth1", + CreateInterfaceEthernet (data: {interface: "eth1", address: "192.168.0.1/24", description: "BOB"}) { success @@ -22,10 +27,10 @@ mutation { } mutation { - createDhcpServer(data: {sharedNetworkName: "BOB", + CreateDhcpServer(data: {sharedNetworkName: "BOB", subnet: "192.168.0.0/24", defaultRouter: "192.168.0.1", - dnsServer: "192.168.0.1", + nameServer: "192.168.0.1", domainName: "vyos.net", lease: 86400, range: 0, @@ -42,37 +47,133 @@ mutation { } } -The GraphQL playground will be found at: +To save the configuration, use the following mutation: -https://{{ host_address }}/graphql +mutation { + SaveConfigFile(data: {fileName: "/config/config.boot"}) { + success + errors + data { + fileName + } + } +} + +N.B. fileName can be empty (fileName: "") or data can be empty (data: {}) to +save to /config/config.boot; to save to an alternative path, specify +fileName. + +Similarly, using an analogous 'endpoint' (meaning the form of the request +and resolver; the actual enpoint for all GraphQL requests is +https://hostname/graphql), one can load an arbitrary config file from a +path. + +mutation { + LoadConfigFile(data: {fileName: "/home/vyos/config.boot"}) { + success + errors + data { + fileName + } + } +} + +Op-mode 'show' commands may be requested by path, e.g.: + +query { + Show (data: {path: ["interfaces", "ethernet", "detail"]}) { + success + errors + data { + result + } + } +} + +N.B. to see the output the 'data' field 'result' must be present in the +request. + +Mutations to manipulate firewall address groups: + +mutation { + CreateFirewallAddressGroup (data: {name: "ADDR-GRP", address: "10.0.0.1"}) { + success + errors + } +} + +mutation { + UpdateFirewallAddressGroupMembers (data: {name: "ADDR-GRP", + address: ["10.0.0.1-10.0.0.8", "192.168.0.1"]}) { + success + errors + } +} + +mutation { + RemoveFirewallAddressGroupMembers (data: {name: "ADDR-GRP", + address: "192.168.0.1"}) { + success + errors + } +} -An equivalent curl command to the first example above would be: +N.B. The schema for the above specify that 'address' be of the form 'list of +strings' (SDL type [String!]! for UpdateFirewallAddressGroupMembers, where +the ! indicates that the input is required; SDL type [String] in +CreateFirewallAddressGroup, since a group may be created without any +addresses). However, notice that a single string may be passed without being +a member of a list, in which case the specification allows for 'input +coercion': + +http://spec.graphql.org/October2021/#sec-Scalars.Input-Coercion + +Similarly, IPv6 versions of the above: + +CreateFirewallAddressIpv6Group +UpdateFirewallAddressIpv6GroupMembers +RemoveFirewallAddressIpv6GroupMembers + + +Instead of using the GraphQL playground, an equivalent curl command to the +first example above would be: curl -k 'https://192.168.100.168/graphql' -H 'Content-Type: application/json' --data-binary '{"query": "mutation {createInterfaceEthernet (data: {interface: \"eth1\", address: \"192.168.0.1/24\", description: \"BOB\"}) {success errors data {address}}}"}' Note that the 'mutation' term is prefaced by 'query' in the curl command. +Curl equivalents may be read from within the GraphQL playground at the 'copy +curl' button. + What's here: services ├── api │  └── graphql +│  ├── bindings.py │  ├── graphql │  │  ├── directives.py │  │  ├── __init__.py │  │  ├── mutations.py │  │  └── schema +│  │  ├── config_file.graphql │  │  ├── dhcp_server.graphql +│  │  ├── firewall_group.graphql │  │  ├── interface_ethernet.graphql -│  │  └── schema.graphql +│  │  ├── schema.graphql +│  │  ├── show_config.graphql +│  │  └── show.graphql +│  ├── README.graphql │  ├── recipes -│  │  ├── dhcp_server.py │  │  ├── __init__.py -│  │  ├── interface_ethernet.py -│  │  ├── recipe.py +│  │  ├── remove_firewall_address_group_members.py +│  │  ├── session.py │  │  └── templates -│  │  ├── dhcp_server.tmpl -│  │  └── interface_ethernet.tmpl +│  │  ├── create_dhcp_server.tmpl +│  │  ├── create_firewall_address_group.tmpl +│  │  ├── create_interface_ethernet.tmpl +│  │  ├── remove_firewall_address_group_members.tmpl +│  │  └── update_firewall_address_group_members.tmpl │  └── state.py ├── vyos-configd ├── vyos-hostsd @@ -90,13 +191,14 @@ the Ur-data; the GraphQL schema is produced from those files, located in Resolvers for the schema Mutation fields are dynamically generated using a 'directive' added to the respective schema field. The directive, -'@generate', is handled by the class 'DataDirective' in -'api/graphql/graphql/directives.py', which calls the 'make_resolver' function in -'api/graphql/graphql/mutations.py'; the produced resolver calls the appropriate -wrapper in 'api/graphql/recipes', with base class doing the (overridable) -configuration steps of calling all defined 'set'/'delete' commands. - -Integrating the above with vyos-http-api-server is ~10 lines of code. +'@configure', is handled by the class 'ConfigureDirective' in +'api/graphql/graphql/directives.py', which calls the +'make_configure_resolver' function in 'api/graphql/graphql/mutations.py'; +the produced resolver calls the appropriate wrapper in +'api/graphql/recipes', with base class doing the (overridable) configuration +steps of calling all defined 'set'/'delete' commands. + +Integrating the above with vyos-http-api-server is 4 lines of code. What needs to be done: diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py new file mode 100644 index 000000000..84d719fda --- /dev/null +++ b/src/services/api/graphql/bindings.py @@ -0,0 +1,29 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +import vyos.defaults +from . graphql.queries import query +from . graphql.mutations import mutation +from . graphql.directives import directives_dict +from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers + +def generate_schema(): + api_schema_dir = vyos.defaults.directories['api_schema'] + + type_defs = load_schema_from_path(api_schema_dir) + + schema = make_executable_schema(type_defs, query, mutation, snake_case_fallback_resolvers, directives=directives_dict) + + return schema diff --git a/src/services/api/graphql/graphql/directives.py b/src/services/api/graphql/graphql/directives.py index 651421c35..0a9298f55 100644 --- a/src/services/api/graphql/graphql/directives.py +++ b/src/services/api/graphql/graphql/directives.py @@ -1,12 +1,27 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + from ariadne import SchemaDirectiveVisitor, ObjectType -from . mutations import make_resolver +from . queries import * +from . mutations import * -class DataDirective(SchemaDirectiveVisitor): - """ - Class providing implementation of 'generate' directive in schema. +def non(arg): + pass - """ - def visit_field_definition(self, field, object_type): +class VyosDirective(SchemaDirectiveVisitor): + def visit_field_definition(self, field, object_type, make_resolver=non): name = f'{field.type}' # field.type contains the return value of the mutation; trim value # to produce canonical name @@ -15,3 +30,50 @@ class DataDirective(SchemaDirectiveVisitor): func = make_resolver(name) field.resolve = func return field + + +class ConfigureDirective(VyosDirective): + """ + Class providing implementation of 'configure' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_configure_resolver) + +class ShowConfigDirective(VyosDirective): + """ + Class providing implementation of 'show' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_show_config_resolver) + +class ConfigFileDirective(VyosDirective): + """ + Class providing implementation of 'configfile' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_config_file_resolver) + +class ShowDirective(VyosDirective): + """ + Class providing implementation of 'show' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_show_resolver) + +class ImageDirective(VyosDirective): + """ + Class providing implementation of 'image' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_image_resolver) + +directives_dict = {"configure": ConfigureDirective, + "showconfig": ShowConfigDirective, + "configfile": ConfigFileDirective, + "show": ShowDirective, + "image": ImageDirective} diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py index 98c665c9a..0c3eb702a 100644 --- a/src/services/api/graphql/graphql/mutations.py +++ b/src/services/api/graphql/graphql/mutations.py @@ -1,3 +1,17 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. from importlib import import_module from typing import Any, Dict @@ -6,10 +20,11 @@ from graphql import GraphQLResolveInfo from makefun import with_signature from .. import state +from api.graphql.recipes.session import Session mutation = ObjectType("Mutation") -def make_resolver(mutation_name): +def make_mutation_resolver(mutation_name, class_name, session_func): """Dynamically generate a resolver for the mutation named in the schema by 'mutation_name'. @@ -19,11 +34,11 @@ def make_resolver(mutation_name): functools.wraps. :raise Exception: - encapsulating ConfigErrors, or internal errors + raising ConfigErrors, or internal errors """ - class_name = mutation_name.replace('create', '', 1).replace('delete', '', 1) + func_base_name = convert_camel_case_to_snake(class_name) - resolver_name = f'resolve_create_{func_base_name}' + resolver_name = f'resolve_{func_base_name}' func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Dict)' @mutation.field(mutation_name) @@ -40,10 +55,18 @@ def make_resolver(mutation_name): data = kwargs['data'] session = state.settings['app'].state.vyos_session - mod = import_module(f'api.graphql.recipes.{func_base_name}') - klass = getattr(mod, class_name) + # one may override the session functions with a local subclass + try: + mod = import_module(f'api.graphql.recipes.{func_base_name}') + klass = getattr(mod, class_name) + except ImportError: + # otherwise, dynamically generate subclass to invoke subclass + # name based templates + klass = type(class_name, (Session,), {}) k = klass(session, data) - k.configure() + method = getattr(k, session_func) + result = method() + data['result'] = result return { "success": True, @@ -57,4 +80,20 @@ def make_resolver(mutation_name): return func_impl +def make_prefix_resolver(mutation_name, prefix=[]): + for pre in prefix: + Pre = pre.capitalize() + if Pre in mutation_name: + class_name = mutation_name.replace(Pre, '', 1) + return make_mutation_resolver(mutation_name, class_name, pre) + raise Exception + +def make_configure_resolver(mutation_name): + class_name = mutation_name + return make_mutation_resolver(mutation_name, class_name, 'configure') + +def make_config_file_resolver(mutation_name): + return make_prefix_resolver(mutation_name, prefix=['save', 'load']) +def make_image_resolver(mutation_name): + return make_prefix_resolver(mutation_name, prefix=['add', 'delete']) diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py new file mode 100644 index 000000000..e1868091e --- /dev/null +++ b/src/services/api/graphql/graphql/queries.py @@ -0,0 +1,89 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from importlib import import_module +from typing import Any, Dict +from ariadne import ObjectType, convert_kwargs_to_snake_case, convert_camel_case_to_snake +from graphql import GraphQLResolveInfo +from makefun import with_signature + +from .. import state +from api.graphql.recipes.session import Session + +query = ObjectType("Query") + +def make_query_resolver(query_name, class_name, session_func): + """Dynamically generate a resolver for the query named in the + schema by 'query_name'. + + Dynamic generation is provided using the package 'makefun' (via the + decorator 'with_signature'), which provides signature-preserving + function wrappers; it provides several improvements over, say, + functools.wraps. + + :raise Exception: + raising ConfigErrors, or internal errors + """ + + func_base_name = convert_camel_case_to_snake(class_name) + resolver_name = f'resolve_{func_base_name}' + func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Dict)' + + @query.field(query_name) + @convert_kwargs_to_snake_case + @with_signature(func_sig, func_name=resolver_name) + async def func_impl(*args, **kwargs): + try: + if 'data' not in kwargs: + return { + "success": False, + "errors": ['missing data'] + } + + data = kwargs['data'] + session = state.settings['app'].state.vyos_session + + # one may override the session functions with a local subclass + try: + mod = import_module(f'api.graphql.recipes.{func_base_name}') + klass = getattr(mod, class_name) + except ImportError: + # otherwise, dynamically generate subclass to invoke subclass + # name based templates + klass = type(class_name, (Session,), {}) + k = klass(session, data) + method = getattr(k, session_func) + result = method() + data['result'] = result + + return { + "success": True, + "data": data + } + except Exception as error: + return { + "success": False, + "errors": [str(error)] + } + + return func_impl + +def make_show_config_resolver(query_name): + class_name = query_name + return make_query_resolver(query_name, class_name, 'show_config') + +def make_show_resolver(query_name): + class_name = query_name + return make_query_resolver(query_name, class_name, 'show') diff --git a/src/services/api/graphql/graphql/schema/config_file.graphql b/src/services/api/graphql/graphql/schema/config_file.graphql new file mode 100644 index 000000000..31ab26b9e --- /dev/null +++ b/src/services/api/graphql/graphql/schema/config_file.graphql @@ -0,0 +1,27 @@ +input SaveConfigFileInput { + fileName: String +} + +type SaveConfigFile { + fileName: String +} + +type SaveConfigFileResult { + data: SaveConfigFile + success: Boolean! + errors: [String] +} + +input LoadConfigFileInput { + fileName: String! +} + +type LoadConfigFile { + fileName: String! +} + +type LoadConfigFileResult { + data: LoadConfigFile + success: Boolean! + errors: [String] +} diff --git a/src/services/api/graphql/graphql/schema/dhcp_server.graphql b/src/services/api/graphql/graphql/schema/dhcp_server.graphql index a7ee75d40..25f091bfa 100644 --- a/src/services/api/graphql/graphql/schema/dhcp_server.graphql +++ b/src/services/api/graphql/graphql/schema/dhcp_server.graphql @@ -1,8 +1,8 @@ -input dhcpServerConfigInput { +input DhcpServerConfigInput { sharedNetworkName: String subnet: String defaultRouter: String - dnsServer: String + nameServer: String domainName: String lease: Int range: Int @@ -13,11 +13,11 @@ input dhcpServerConfigInput { dnsForwardingListenAddress: String } -type dhcpServerConfig { +type DhcpServerConfig { sharedNetworkName: String subnet: String defaultRouter: String - dnsServer: String + nameServer: String domainName: String lease: Int range: Int @@ -28,8 +28,8 @@ type dhcpServerConfig { dnsForwardingListenAddress: String } -type createDhcpServerResult { - data: dhcpServerConfig +type CreateDhcpServerResult { + data: DhcpServerConfig success: Boolean! errors: [String] } diff --git a/src/services/api/graphql/graphql/schema/firewall_group.graphql b/src/services/api/graphql/graphql/schema/firewall_group.graphql new file mode 100644 index 000000000..d89904b9e --- /dev/null +++ b/src/services/api/graphql/graphql/schema/firewall_group.graphql @@ -0,0 +1,95 @@ +input CreateFirewallAddressGroupInput { + name: String! + address: [String] +} + +type CreateFirewallAddressGroup { + name: String! + address: [String] +} + +type CreateFirewallAddressGroupResult { + data: CreateFirewallAddressGroup + success: Boolean! + errors: [String] +} + +input UpdateFirewallAddressGroupMembersInput { + name: String! + address: [String!]! +} + +type UpdateFirewallAddressGroupMembers { + name: String! + address: [String!]! +} + +type UpdateFirewallAddressGroupMembersResult { + data: UpdateFirewallAddressGroupMembers + success: Boolean! + errors: [String] +} + +input RemoveFirewallAddressGroupMembersInput { + name: String! + address: [String!]! +} + +type RemoveFirewallAddressGroupMembers { + name: String! + address: [String!]! +} + +type RemoveFirewallAddressGroupMembersResult { + data: RemoveFirewallAddressGroupMembers + success: Boolean! + errors: [String] +} + +input CreateFirewallAddressIpv6GroupInput { + name: String! + address: [String] +} + +type CreateFirewallAddressIpv6Group { + name: String! + address: [String] +} + +type CreateFirewallAddressIpv6GroupResult { + data: CreateFirewallAddressIpv6Group + success: Boolean! + errors: [String] +} + +input UpdateFirewallAddressIpv6GroupMembersInput { + name: String! + address: [String!]! +} + +type UpdateFirewallAddressIpv6GroupMembers { + name: String! + address: [String!]! +} + +type UpdateFirewallAddressIpv6GroupMembersResult { + data: UpdateFirewallAddressIpv6GroupMembers + success: Boolean! + errors: [String] +} + +input RemoveFirewallAddressIpv6GroupMembersInput { + name: String! + address: [String!]! +} + +type RemoveFirewallAddressIpv6GroupMembers { + name: String! + address: [String!]! +} + +type RemoveFirewallAddressIpv6GroupMembersResult { + data: RemoveFirewallAddressIpv6GroupMembers + success: Boolean! + errors: [String] +} diff --git a/src/services/api/graphql/graphql/schema/image.graphql b/src/services/api/graphql/graphql/schema/image.graphql new file mode 100644 index 000000000..7d1b4f9d0 --- /dev/null +++ b/src/services/api/graphql/graphql/schema/image.graphql @@ -0,0 +1,29 @@ +input AddSystemImageInput { + location: String! +} + +type AddSystemImage { + location: String + result: String +} + +type AddSystemImageResult { + data: AddSystemImage + success: Boolean! + errors: [String] +} + +input DeleteSystemImageInput { + name: String! +} + +type DeleteSystemImage { + name: String + result: String +} + +type DeleteSystemImageResult { + data: DeleteSystemImage + success: Boolean! + errors: [String] +} diff --git a/src/services/api/graphql/graphql/schema/interface_ethernet.graphql b/src/services/api/graphql/graphql/schema/interface_ethernet.graphql index fdcf97bad..32438b315 100644 --- a/src/services/api/graphql/graphql/schema/interface_ethernet.graphql +++ b/src/services/api/graphql/graphql/schema/interface_ethernet.graphql @@ -1,18 +1,18 @@ -input interfaceEthernetConfigInput { +input InterfaceEthernetConfigInput { interface: String address: String replace: Boolean = true description: String } -type interfaceEthernetConfig { +type InterfaceEthernetConfig { interface: String address: String description: String } -type createInterfaceEthernetResult { - data: interfaceEthernetConfig +type CreateInterfaceEthernetResult { + data: InterfaceEthernetConfig success: Boolean! errors: [String] } diff --git a/src/services/api/graphql/graphql/schema/schema.graphql b/src/services/api/graphql/graphql/schema/schema.graphql index 8a5e17962..952e46f34 100644 --- a/src/services/api/graphql/graphql/schema/schema.graphql +++ b/src/services/api/graphql/graphql/schema/schema.graphql @@ -3,13 +3,28 @@ schema { mutation: Mutation } +directive @configure on FIELD_DEFINITION +directive @configfile on FIELD_DEFINITION +directive @show on FIELD_DEFINITION +directive @showconfig on FIELD_DEFINITION +directive @image on FIELD_DEFINITION + type Query { - _dummy: String + Show(data: ShowInput) : ShowResult @show + ShowConfig(data: ShowConfigInput) : ShowConfigResult @showconfig } -directive @generate on FIELD_DEFINITION - type Mutation { - createDhcpServer(data: dhcpServerConfigInput) : createDhcpServerResult @generate - createInterfaceEthernet(data: interfaceEthernetConfigInput) : createInterfaceEthernetResult @generate + CreateDhcpServer(data: DhcpServerConfigInput) : CreateDhcpServerResult @configure + CreateInterfaceEthernet(data: InterfaceEthernetConfigInput) : CreateInterfaceEthernetResult @configure + CreateFirewallAddressGroup(data: CreateFirewallAddressGroupInput) : CreateFirewallAddressGroupResult @configure + UpdateFirewallAddressGroupMembers(data: UpdateFirewallAddressGroupMembersInput) : UpdateFirewallAddressGroupMembersResult @configure + RemoveFirewallAddressGroupMembers(data: RemoveFirewallAddressGroupMembersInput) : RemoveFirewallAddressGroupMembersResult @configure + CreateFirewallAddressIpv6Group(data: CreateFirewallAddressIpv6GroupInput) : CreateFirewallAddressIpv6GroupResult @configure + UpdateFirewallAddressIpv6GroupMembers(data: UpdateFirewallAddressIpv6GroupMembersInput) : UpdateFirewallAddressIpv6GroupMembersResult @configure + RemoveFirewallAddressIpv6GroupMembers(data: RemoveFirewallAddressIpv6GroupMembersInput) : RemoveFirewallAddressIpv6GroupMembersResult @configure + SaveConfigFile(data: SaveConfigFileInput) : SaveConfigFileResult @configfile + LoadConfigFile(data: LoadConfigFileInput) : LoadConfigFileResult @configfile + AddSystemImage(data: AddSystemImageInput) : AddSystemImageResult @image + DeleteSystemImage(data: DeleteSystemImageInput) : DeleteSystemImageResult @image } diff --git a/src/services/api/graphql/graphql/schema/show.graphql b/src/services/api/graphql/graphql/schema/show.graphql new file mode 100644 index 000000000..c7709e48b --- /dev/null +++ b/src/services/api/graphql/graphql/schema/show.graphql @@ -0,0 +1,14 @@ +input ShowInput { + path: [String!]! +} + +type Show { + path: [String] + result: String +} + +type ShowResult { + data: Show + success: Boolean! + errors: [String] +} diff --git a/src/services/api/graphql/graphql/schema/show_config.graphql b/src/services/api/graphql/graphql/schema/show_config.graphql new file mode 100644 index 000000000..34afd2aa9 --- /dev/null +++ b/src/services/api/graphql/graphql/schema/show_config.graphql @@ -0,0 +1,21 @@ +""" +Use 'scalar Generic' for show config output, to avoid attempts to +JSON-serialize in case of JSON output. +""" +scalar Generic + +input ShowConfigInput { + path: [String!]! + configFormat: String +} + +type ShowConfig { + path: [String] + result: Generic +} + +type ShowConfigResult { + data: ShowConfig + success: Boolean! + errors: [String] +} diff --git a/src/services/api/graphql/recipes/dhcp_server.py b/src/services/api/graphql/recipes/dhcp_server.py deleted file mode 100644 index 3edb3028e..000000000 --- a/src/services/api/graphql/recipes/dhcp_server.py +++ /dev/null @@ -1,13 +0,0 @@ - -from . recipe import Recipe - -class DhcpServer(Recipe): - def __init__(self, session, command_file): - super().__init__(session, command_file) - - # Define any custom processing of parameters here by overriding - # configure: - # - # def configure(self): - # self.data = transform_data(self.data) - # super().configure() diff --git a/src/services/api/graphql/recipes/interface_ethernet.py b/src/services/api/graphql/recipes/interface_ethernet.py deleted file mode 100644 index f88f5924f..000000000 --- a/src/services/api/graphql/recipes/interface_ethernet.py +++ /dev/null @@ -1,13 +0,0 @@ - -from . recipe import Recipe - -class InterfaceEthernet(Recipe): - def __init__(self, session, command_file): - super().__init__(session, command_file) - - # Define any custom processing of parameters here by overriding - # configure: - # - # def configure(self): - # self.data = transform_data(self.data) - # super().configure() diff --git a/src/services/api/graphql/recipes/recipe.py b/src/services/api/graphql/recipes/recipe.py deleted file mode 100644 index 8fbb9e0bf..000000000 --- a/src/services/api/graphql/recipes/recipe.py +++ /dev/null @@ -1,49 +0,0 @@ -from ariadne import convert_camel_case_to_snake -import vyos.defaults -from vyos.template import render - -class Recipe(object): - def __init__(self, session, data): - self._session = session - self.data = data - self._name = convert_camel_case_to_snake(type(self).__name__) - - @property - def data(self): - return self.__data - - @data.setter - def data(self, data): - if isinstance(data, dict): - self.__data = data - else: - raise ValueError("data must be of type dict") - - def configure(self): - session = self._session - data = self.data - func_base_name = self._name - - tmpl_file = f'{func_base_name}.tmpl' - cmd_file = f'/tmp/{func_base_name}.cmds' - tmpl_dir = vyos.defaults.directories['api_templates'] - - try: - render(cmd_file, tmpl_file, data, location=tmpl_dir) - commands = [] - with open(cmd_file) as f: - lines = f.readlines() - for line in lines: - commands.append(line.split()) - for cmd in commands: - if cmd[0] == 'set': - session.set(cmd[1:]) - elif cmd[0] == 'delete': - session.delete(cmd[1:]) - else: - raise ValueError('Operation must be "set" or "delete"') - session.commit() - except Exception as error: - raise error - - diff --git a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py b/src/services/api/graphql/recipes/remove_firewall_address_group_members.py new file mode 100644 index 000000000..b91932e14 --- /dev/null +++ b/src/services/api/graphql/recipes/remove_firewall_address_group_members.py @@ -0,0 +1,35 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from . session import Session + +class RemoveFirewallAddressGroupMembers(Session): + def __init__(self, session, data): + super().__init__(session, data) + + # Define any custom processing of parameters here by overriding + # configure: + # + # def configure(self): + # self._data = transform_data(self._data) + # super().configure() + # self.clean_up() + + def configure(self): + super().configure() + + group_name = self._data['name'] + path = ['firewall', 'group', 'address-group', group_name] + self.delete_path_if_childless(path) diff --git a/src/services/api/graphql/recipes/session.py b/src/services/api/graphql/recipes/session.py new file mode 100644 index 000000000..1f844ff70 --- /dev/null +++ b/src/services/api/graphql/recipes/session.py @@ -0,0 +1,138 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +import json + +from ariadne import convert_camel_case_to_snake + +import vyos.defaults +from vyos.config import Config +from vyos.configtree import ConfigTree +from vyos.template import render + +class Session: + """ + Wrapper for calling configsession functions based on GraphQL requests. + Non-nullable fields in the respective schema allow avoiding a key check + in 'data'. + """ + def __init__(self, session, data): + self._session = session + self._data = data + self._name = convert_camel_case_to_snake(type(self).__name__) + + def configure(self): + session = self._session + data = self._data + func_base_name = self._name + + tmpl_file = f'{func_base_name}.tmpl' + cmd_file = f'/tmp/{func_base_name}.cmds' + tmpl_dir = vyos.defaults.directories['api_templates'] + + try: + render(cmd_file, tmpl_file, data, location=tmpl_dir) + commands = [] + with open(cmd_file) as f: + lines = f.readlines() + for line in lines: + commands.append(line.split()) + for cmd in commands: + if cmd[0] == 'set': + session.set(cmd[1:]) + elif cmd[0] == 'delete': + session.delete(cmd[1:]) + else: + raise ValueError('Operation must be "set" or "delete"') + session.commit() + except Exception as error: + raise error + + def delete_path_if_childless(self, path): + session = self._session + config = Config(session.get_session_env()) + if not config.list_nodes(path): + session.delete(path) + session.commit() + + def show_config(self): + session = self._session + data = self._data + out = '' + + try: + out = session.show_config(data['path']) + if data.get('config_format', '') == 'json': + config_tree = vyos.configtree.ConfigTree(out) + out = json.loads(config_tree.to_json()) + except Exception as error: + raise error + + return out + + def save(self): + session = self._session + data = self._data + if 'file_name' not in data or not data['file_name']: + data['file_name'] = '/config/config.boot' + + try: + session.save_config(data['file_name']) + except Exception as error: + raise error + + def load(self): + session = self._session + data = self._data + + try: + session.load_config(data['file_name']) + session.commit() + except Exception as error: + raise error + + def show(self): + session = self._session + data = self._data + out = '' + + try: + out = session.show(data['path']) + except Exception as error: + raise error + + return out + + def add(self): + session = self._session + data = self._data + + try: + res = session.install_image(data['location']) + except Exception as error: + raise error + + return res + + def delete(self): + session = self._session + data = self._data + + try: + res = session.remove_image(data['name']) + except Exception as error: + raise error + + return res diff --git a/src/services/api/graphql/recipes/templates/dhcp_server.tmpl b/src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl index 629ce83c1..70de43183 100644 --- a/src/services/api/graphql/recipes/templates/dhcp_server.tmpl +++ b/src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl @@ -1,5 +1,5 @@ set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} default-router {{ default_router }} -set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} dns-server {{ dns_server }} +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} name-server {{ name_server }} set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} domain-name {{ domain_name }} set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} lease {{ lease }} set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} range {{ range }} start {{ start }} diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl b/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl new file mode 100644 index 000000000..a890d0086 --- /dev/null +++ b/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl @@ -0,0 +1,4 @@ +set firewall group address-group {{ name }} +{% for add in address %} +set firewall group address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl b/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl new file mode 100644 index 000000000..e9b660722 --- /dev/null +++ b/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl @@ -0,0 +1,4 @@ +set firewall group ipv6-address-group {{ name }} +{% for add in address %} +set firewall group ipv6-address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/recipes/templates/interface_ethernet.tmpl b/src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl index d9d7ed691..d9d7ed691 100644 --- a/src/services/api/graphql/recipes/templates/interface_ethernet.tmpl +++ b/src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl b/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl new file mode 100644 index 000000000..458f3e5fc --- /dev/null +++ b/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +delete firewall group address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl new file mode 100644 index 000000000..0efa0b226 --- /dev/null +++ b/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +delete firewall group ipv6-address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl b/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl new file mode 100644 index 000000000..f56c61231 --- /dev/null +++ b/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +set firewall group address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl new file mode 100644 index 000000000..f98a5517c --- /dev/null +++ b/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +set firewall group ipv6-address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/vyos-configd b/src/services/vyos-configd index 670b6e66a..48c9135e2 100755 --- a/src/services/vyos-configd +++ b/src/services/vyos-configd @@ -28,6 +28,7 @@ import zmq from contextlib import contextmanager from vyos.defaults import directories +from vyos.util import boot_configuration_complete from vyos.configsource import ConfigSourceString, ConfigSourceError from vyos.config import Config from vyos import ConfigError @@ -186,7 +187,7 @@ def initialization(socket): session_out = None # if not a 'live' session, for example on boot, write to file - if not session_out or not os.path.isfile('/tmp/vyos-config-status'): + if not session_out or not boot_configuration_complete(): session_out = script_stdout_log session_mode = 'a' diff --git a/src/services/vyos-hostsd b/src/services/vyos-hostsd index 4c4bb036e..df9f18d2d 100755 --- a/src/services/vyos-hostsd +++ b/src/services/vyos-hostsd @@ -139,6 +139,27 @@ # } # # +### authoritative_zones +## Additional zones hosted authoritatively by pdns-recursor. +## We add NTAs for these zones but do not do much else here. +# +# { 'type': 'authoritative_zones', +# 'op': 'add', +# 'data': ['<str zone>', ...] +# } +# +# { 'type': 'authoritative_zones', +# 'op': 'delete', +# 'data': ['<str zone>', ...] +# } +# +# { 'type': 'authoritative_zones', +# 'op': 'get', +# } +# response: +# { 'data': ['<str zone>', ...] } +# +# ### search_domains # # { 'type': 'search_domains', @@ -255,6 +276,7 @@ STATE = { "name_server_tags_recursor": [], "name_server_tags_system": [], "forward_zones": {}, + "authoritative_zones": [], "hosts": {}, "host_name": "vyos", "domain_name": "", @@ -267,7 +289,8 @@ base_schema = Schema({ Required('op'): Any('add', 'delete', 'set', 'get', 'apply'), 'type': Any('name_servers', 'name_server_tags_recursor', 'name_server_tags_system', - 'forward_zones', 'search_domains', 'hosts', 'host_name'), + 'forward_zones', 'authoritative_zones', 'search_domains', + 'hosts', 'host_name'), 'data': Any(list, dict), 'tag': str, 'tag_regex': str @@ -317,7 +340,7 @@ hosts_add_schema = op_type_schema.extend({ 'data': { str: { str: { - 'address': str, + 'address': [str], 'aliases': [str] } } @@ -347,6 +370,11 @@ msg_schema_map = { 'delete': data_list_schema, 'get': op_type_schema }, + 'authoritative_zones': { + 'add': data_list_schema, + 'delete': data_list_schema, + 'get': op_type_schema + }, 'search_domains': { 'add': data_dict_list_schema, 'delete': data_list_schema, @@ -522,7 +550,7 @@ def handle_message(msg): data = get_option(msg, 'data') if _type in ['name_servers', 'forward_zones', 'search_domains', 'hosts']: delete_items_from_dict(STATE[_type], data) - elif _type in ['name_server_tags_recursor', 'name_server_tags_system']: + elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'authoritative_zones']: delete_items_from_list(STATE[_type], data) else: raise ValueError(f'Operation "{op}" unknown data type "{_type}"') @@ -534,7 +562,7 @@ def handle_message(msg): elif _type in ['forward_zones', 'hosts']: add_items_to_dict(STATE[_type], data) # maybe we need to rec_control clear-nta each domain that was removed here? - elif _type in ['name_server_tags_recursor', 'name_server_tags_system']: + elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'authoritative_zones']: add_items_to_list(STATE[_type], data) else: raise ValueError(f'Operation "{op}" unknown data type "{_type}"') @@ -550,7 +578,7 @@ def handle_message(msg): if _type in ['name_servers', 'search_domains', 'hosts']: tag_regex = get_option(msg, 'tag_regex') result = get_items_from_dict_regex(STATE[_type], tag_regex) - elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'forward_zones']: + elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'forward_zones', 'authoritative_zones']: result = STATE[_type] else: raise ValueError(f'Operation "{op}" unknown data type "{_type}"') diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index cb4ce4072..06871f1d6 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -32,16 +32,14 @@ from fastapi.responses import HTMLResponse from fastapi.exceptions import RequestValidationError from fastapi.routing import APIRoute from pydantic import BaseModel, StrictStr, validator -from starlette.datastructures import FormData, MutableHeaders +from starlette.middleware.cors import CORSMiddleware +from starlette.datastructures import FormData from starlette.formparsers import FormParser, MultiPartParser from multipart.multipart import parse_options_header -from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers from ariadne.asgi import GraphQL import vyos.config -import vyos.defaults - from vyos.configsession import ConfigSession, ConfigSessionError import api.graphql.state @@ -69,11 +67,11 @@ def load_server_config(): return config def check_auth(key_list, key): - id = None + key_id = None for k in key_list: if k['key'] == key: - id = k['id'] - return id + key_id = k['id'] + return key_id def error(code, msg): resp = {"success": False, "error": msg, "data": None} @@ -223,10 +221,10 @@ responses = { def auth_required(data: ApiModel): key = data.key api_keys = app.state.vyos_keys - id = check_auth(api_keys, key) - if not id: + key_id = check_auth(api_keys, key) + if not key_id: raise HTTPException(status_code=401, detail="Valid API key is required") - app.state.vyos_id = id + app.state.vyos_id = key_id # override Request and APIRoute classes in order to convert form request to json; # do all explicit validation here, for backwards compatability of error messages; @@ -613,18 +611,19 @@ def show_op(data: ShowModel): # GraphQL integration ### -api.graphql.state.init() - -from api.graphql.graphql.mutations import mutation -from api.graphql.graphql.directives import DataDirective +def graphql_init(fast_api_app): + from api.graphql.bindings import generate_schema -api_schema_dir = vyos.defaults.directories['api_schema'] - -type_defs = load_schema_from_path(api_schema_dir) + api.graphql.state.init() + api.graphql.state.settings['app'] = app -schema = make_executable_schema(type_defs, mutation, snake_case_fallback_resolvers, directives={"generate": DataDirective}) + schema = generate_schema() -app.add_route('/graphql', GraphQL(schema, debug=True)) + if app.state.vyos_origins: + origins = app.state.vyos_origins + app.add_route('/graphql', CORSMiddleware(GraphQL(schema, debug=True), allow_origins=origins, allow_methods=("GET", "POST", "OPTIONS"))) + else: + app.add_route('/graphql', GraphQL(schema, debug=True)) ### @@ -640,23 +639,28 @@ if __name__ == '__main__': try: server_config = load_server_config() - except Exception as e: - logger.critical("Failed to load the HTTP API server config: {0}".format(e)) + except Exception as err: + logger.critical(f"Failed to load the HTTP API server config: {err}") - session = ConfigSession(os.getpid()) + config_session = ConfigSession(os.getpid()) - app.state.vyos_session = session + app.state.vyos_session = config_session app.state.vyos_keys = server_config['api_keys'] - app.state.vyos_debug = True if server_config['debug'] == 'true' else False - app.state.vyos_strict = True if server_config['strict'] == 'true' else False + app.state.vyos_debug = server_config['debug'] + app.state.vyos_strict = server_config['strict'] + app.state.vyos_origins = server_config.get('cors', {}).get('origins', []) - api.graphql.state.settings['app'] = app + graphql_init(app) try: - uvicorn.run(app, host=server_config["listen_address"], - port=int(server_config["port"]), - proxy_headers=True) - except OSError as e: - logger.critical(f"OSError {e}") + if not server_config['socket']: + uvicorn.run(app, host=server_config["listen_address"], + port=int(server_config["port"]), + proxy_headers=True) + else: + uvicorn.run(app, uds="/run/api.sock", + proxy_headers=True) + except OSError as err: + logger.critical(f"OSError {err}") sys.exit(1) diff --git a/src/system/keepalived-fifo.py b/src/system/keepalived-fifo.py index 1fba0d75b..b1fe7e43f 100755 --- a/src/system/keepalived-fifo.py +++ b/src/system/keepalived-fifo.py @@ -29,6 +29,7 @@ from logging.handlers import SysLogHandler from vyos.ifconfig.vrrp import VRRP from vyos.configquery import ConfigTreeQuery from vyos.util import cmd +from vyos.util import dict_search # configure logging logger = logging.getLogger(__name__) @@ -69,22 +70,10 @@ class KeepalivedFifo: raise ValueError() # Read VRRP configuration directly from CLI - vrrp_config_dict = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - self.vrrp_config = {'vrrp_groups': {}, 'sync_groups': {}} - for key in ['group', 'sync_group']: - if key not in vrrp_config_dict: - continue - for group, group_config in vrrp_config_dict[key].items(): - if 'transition_script' not in group_config: - continue - self.vrrp_config['vrrp_groups'][group] = { - 'STOP': group_config['transition_script'].get('stop'), - 'FAULT': group_config['transition_script'].get('fault'), - 'BACKUP': group_config['transition_script'].get('backup'), - 'MASTER': group_config['transition_script'].get('master'), - } - logger.info(f'Loaded configuration: {self.vrrp_config}') + self.vrrp_config_dict = conf.get_config_dict(base, + key_mangling=('-', '_'), get_first_key=True) + + logger.debug(f'Loaded configuration: {self.vrrp_config_dict}') except Exception as err: logger.error(f'Unable to load configuration: {err}') @@ -129,20 +118,17 @@ class KeepalivedFifo: if os.path.exists(mdns_running_file): cmd(mdns_update_command) - if n_name in self.vrrp_config['vrrp_groups'] and n_state in self.vrrp_config['vrrp_groups'][n_name]: - n_script = self.vrrp_config['vrrp_groups'][n_name].get(n_state) - if n_script: - self._run_command(n_script) + tmp = dict_search(f'group.{n_name}.transition_script.{n_state.lower()}', self.vrrp_config_dict) + if tmp != None: + self._run_command(tmp) # check and run commands for VRRP sync groups - # currently, this is not available in VyOS CLI - if n_type == 'GROUP': + elif n_type == 'GROUP': if os.path.exists(mdns_running_file): cmd(mdns_update_command) - if n_name in self.vrrp_config['sync_groups'] and n_state in self.vrrp_config['sync_groups'][n_name]: - n_script = self.vrrp_config['sync_groups'][n_name].get(n_state) - if n_script: - self._run_command(n_script) + tmp = dict_search(f'sync_group.{n_name}.transition_script.{n_state.lower()}', self.vrrp_config_dict) + if tmp != None: + self._run_command(tmp) # mark task in queue as done self.message_queue.task_done() except Exception as err: diff --git a/src/systemd/dhcp6c@.service b/src/systemd/dhcp6c@.service index fdd6d7d88..9a97ee261 100644 --- a/src/systemd/dhcp6c@.service +++ b/src/systemd/dhcp6c@.service @@ -9,7 +9,7 @@ StartLimitIntervalSec=0 WorkingDirectory=/run/dhcp6c Type=forking PIDFile=/run/dhcp6c/dhcp6c.%i.pid -ExecStart=/usr/sbin/dhcp6c -k /run/dhcp6c/dhcp6c.%i.sock -c /run/dhcp6c/dhcp6c.%i.conf -p /run/dhcp6c/dhcp6c.%i.pid %i +ExecStart=/usr/sbin/dhcp6c -D -k /run/dhcp6c/dhcp6c.%i.sock -c /run/dhcp6c/dhcp6c.%i.conf -p /run/dhcp6c/dhcp6c.%i.pid %i Restart=on-failure RestartSec=20 diff --git a/src/systemd/root-partition-auto-resize.service b/src/systemd/root-partition-auto-resize.service new file mode 100644 index 000000000..a57fbc3d8 --- /dev/null +++ b/src/systemd/root-partition-auto-resize.service @@ -0,0 +1,12 @@ +[Unit] +Description=VyOS root partition auto resizing +After=multi-user.target + +[Service] +Type=oneshot +User=root +Group=root +ExecStart=/usr/libexec/vyos/op_mode/force_root-partition-auto-resize.sh + +[Install] +WantedBy=vyos.target
\ No newline at end of file diff --git a/src/systemd/tftpd@.service b/src/systemd/tftpd@.service index 266bc0962..a674bf598 100644 --- a/src/systemd/tftpd@.service +++ b/src/systemd/tftpd@.service @@ -7,7 +7,7 @@ RequiresMountsFor=/run Type=forking #NotifyAccess=main EnvironmentFile=-/etc/default/tftpd%I -ExecStart=/usr/sbin/in.tftpd "$DAEMON_ARGS" +ExecStart=/bin/sh -c "${VRF_ARGS} /usr/sbin/in.tftpd ${DAEMON_ARGS}" Restart=on-failure [Install] diff --git a/src/systemd/vyos-hostsd.service b/src/systemd/vyos-hostsd.service index b77335778..4da55f518 100644 --- a/src/systemd/vyos-hostsd.service +++ b/src/systemd/vyos-hostsd.service @@ -7,7 +7,7 @@ DefaultDependencies=no # Seemingly sensible way to say "as early as the system is ready" # All vyos-hostsd needs is read/write mounted root -After=systemd-remount-fs.service +After=systemd-remount-fs.service cloud-init.service [Service] WorkingDirectory=/run/vyos-hostsd diff --git a/src/systemd/vyos-http-api.service b/src/systemd/vyos-http-api.service deleted file mode 100644 index ba5df5984..000000000 --- a/src/systemd/vyos-http-api.service +++ /dev/null @@ -1,23 +0,0 @@ -[Unit] -Description=VyOS HTTP API service -After=auditd.service systemd-user-sessions.service time-sync.target vyos-router.service -Requires=vyos-router.service - -[Service] -ExecStartPre=/usr/libexec/vyos/init/vyos-config -ExecStart=/usr/libexec/vyos/services/vyos-http-api-server -Type=idle - -SyslogIdentifier=vyos-http-api -SyslogFacility=daemon - -Restart=on-failure - -# Does't work but leave it here -User=root -Group=vyattacfg - -[Install] -# Installing in a earlier target leaves ExecStartPre waiting -WantedBy=getty.target - diff --git a/src/utils/vyos-hostsd-client b/src/utils/vyos-hostsd-client index d4d38315a..a0515951a 100755 --- a/src/utils/vyos-hostsd-client +++ b/src/utils/vyos-hostsd-client @@ -129,7 +129,8 @@ try: params = h.split(",") if len(params) < 2: raise ValueError("Malformed host entry") - entry['address'] = params[1] + # Address needs to be a list because of changes made in T2683 + entry['address'] = [params[1]] entry['aliases'] = params[2:] data[params[0]] = entry client.add_hosts({args.tag: data}) diff --git a/src/validators/bgp-large-community-list b/src/validators/bgp-large-community-list index c07268e81..80112dfdc 100755 --- a/src/validators/bgp-large-community-list +++ b/src/validators/bgp-large-community-list @@ -30,7 +30,7 @@ if __name__ == '__main__': sys.exit(1) if not (re.match(pattern, sys.argv[1]) and - (is_ipv4(value[0]) or value[0].isdigit()) and value[1].isdigit()): + (is_ipv4(value[0]) or value[0].isdigit()) and (value[1].isdigit() or value[1] == '*')): sys.exit(1) sys.exit(0) diff --git a/src/validators/bgp-route-target b/src/validators/bgp-rd-rt index e7e4d403f..b2b69c9be 100755 --- a/src/validators/bgp-route-target +++ b/src/validators/bgp-rd-rt @@ -19,29 +19,37 @@ from vyos.template import is_ipv4 parser = ArgumentParser() group = parser.add_mutually_exclusive_group() -group.add_argument('--single', action='store', help='Validate and allow only one route-target') -group.add_argument('--multi', action='store', help='Validate multiple, whitespace separated route-targets') +group.add_argument('--route-distinguisher', action='store', help='Validate BGP route distinguisher') +group.add_argument('--route-target', action='store', help='Validate one BGP route-target') +group.add_argument('--route-target-multi', action='store', help='Validate multiple, whitespace separated BGP route-targets') args = parser.parse_args() -def is_valid_rt(rt): - # every route target needs to have a colon and must consists of two parts +def is_valid(rt): + """ Verify BGP RD/RT - both can be verified using the same logic """ + # every RD/RT (route distinguisher/route target) needs to have a colon and + # must consists of two parts value = rt.split(':') if len(value) != 2: return False - # A route target must either be only numbers, or the first part must be an - # IPv4 address + + # An RD/RT must either be only numbers, or the first part must be an IPv4 + # address if (is_ipv4(value[0]) or value[0].isdigit()) and value[1].isdigit(): return True return False if __name__ == '__main__': - if args.single: - if not is_valid_rt(args.single): + if args.route_distinguisher: + if not is_valid(args.route_distinguisher): + exit(1) + + elif args.route_target: + if not is_valid(args.route_target): exit(1) - elif args.multi: - for rt in args.multi.split(' '): - if not is_valid_rt(rt): + elif args.route_target_multi: + for rt in args.route_target_multi.split(' '): + if not is_valid(rt): exit(1) else: diff --git a/src/validators/ip-protocol b/src/validators/ip-protocol index 078f8e319..7898fa6d0 100755 --- a/src/validators/ip-protocol +++ b/src/validators/ip-protocol @@ -31,7 +31,7 @@ if __name__ == '__main__': pattern = "!?\\b(all|ip|hopopt|icmp|igmp|ggp|ipencap|st|tcp|egp|igp|pup|udp|" \ "tcp_udp|hmp|xns-idp|rdp|iso-tp4|dccp|xtp|ddp|idpr-cmtp|ipv6|" \ - "ipv6-route|ipv6-frag|idrp|rsvp|gre|esp|ah|skip|ipv6-icmp|" \ + "ipv6-route|ipv6-frag|idrp|rsvp|gre|esp|ah|skip|ipv6-icmp|icmpv6|" \ "ipv6-nonxt|ipv6-opts|rspf|vmtp|eigrp|ospf|ax.25|ipip|etherip|" \ "encap|99|pim|ipcomp|vrrp|l2tp|isis|sctp|fc|mobility-header|" \ "udplite|mpls-in-ip|manet|hip|shim6|wesp|rohc)\\b" diff --git a/src/validators/ipv4-multicast b/src/validators/ipv4-multicast index e5cbc9532..5465c728d 100755 --- a/src/validators/ipv4-multicast +++ b/src/validators/ipv4-multicast @@ -1,3 +1,3 @@ #!/bin/sh -ipaddrcheck --is-ipv4-multicast $1 +ipaddrcheck --is-ipv4-multicast $1 && ipaddrcheck --is-ipv4-single $1 diff --git a/src/validators/ipv6-link-local b/src/validators/ipv6-link-local new file mode 100755 index 000000000..05e693b77 --- /dev/null +++ b/src/validators/ipv6-link-local @@ -0,0 +1,12 @@ +#!/usr/bin/python3 + +import sys +from vyos.validate import is_ipv6_link_local + +if __name__ == '__main__': + if len(sys.argv)>1: + addr = sys.argv[1] + if not is_ipv6_link_local(addr): + sys.exit(1) + + sys.exit(0) diff --git a/src/validators/ipv6-multicast b/src/validators/ipv6-multicast index 66cd90c9c..5afc437e5 100755 --- a/src/validators/ipv6-multicast +++ b/src/validators/ipv6-multicast @@ -1,3 +1,3 @@ #!/bin/sh -ipaddrcheck --is-ipv6-multicast $1 +ipaddrcheck --is-ipv6-multicast $1 && ipaddrcheck --is-ipv6-single $1 diff --git a/src/validators/range b/src/validators/range new file mode 100755 index 000000000..d4c25f3c4 --- /dev/null +++ b/src/validators/range @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import re +import sys +import argparse + +class MalformedRange(Exception): + pass + +def validate_range(value, min=None, max=None): + try: + lower, upper = re.match(r'^(\d+)-(\d+)$', value).groups() + + lower, upper = int(lower), int(upper) + + if int(lower) > int(upper): + raise MalformedRange("the lower bound exceeds the upper bound".format(value)) + + if min is not None: + if lower < min: + raise MalformedRange("the lower bound must not be less than {}".format(min)) + + if max is not None: + if upper > max: + raise MalformedRange("the upper bound must not be greater than {}".format(max)) + + except (AttributeError, ValueError): + raise MalformedRange("range syntax error") + +parser = argparse.ArgumentParser(description='Range validator.') +parser.add_argument('--min', type=int, action='store') +parser.add_argument('--max', type=int, action='store') +parser.add_argument('value', action='store') + +if __name__ == '__main__': + args = parser.parse_args() + + try: + validate_range(args.value, min=args.min, max=args.max) + except MalformedRange as e: + print("Incorrect range '{}': {}".format(args.value, e)) + sys.exit(1) diff --git a/src/validators/script b/src/validators/script index 1d8a27e5c..4ffdeb2a0 100755 --- a/src/validators/script +++ b/src/validators/script @@ -36,7 +36,7 @@ if __name__ == '__main__': # File outside the config dir is just a warning if not vyos.util.file_is_persistent(script): - sys.exit( - f'Warning: file {path} is outside the / config directory\n' + sys.exit(0)( + f'Warning: file {script} is outside the "/config" directory\n' 'It will not be automatically migrated to a new image on system update' ) |