diff options
Diffstat (limited to 'src')
194 files changed, 10343 insertions, 4609 deletions
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py index 14387cbbf..94882fc14 100755 --- a/src/conf_mode/container.py +++ b/src/conf_mode/container.py @@ -22,6 +22,7 @@ from ipaddress import ip_address from ipaddress import ip_network from json import dumps as json_write +import psutil from vyos.base import Warning from vyos.config import Config from vyos.configdict import dict_merge @@ -148,6 +149,9 @@ def verify(container): if network_name not in container.get('network', {}): raise ConfigError(f'Container network "{network_name}" does not exist!') + if 'name_server' in container_config and 'no_name_server' not in container['network'][network_name]: + raise ConfigError(f'Setting name server has no effect when attached container network has DNS enabled!') + if 'address' in container_config['network'][network_name]: cnt_ipv4 = 0 cnt_ipv6 = 0 @@ -220,6 +224,21 @@ def verify(container): if not os.path.exists(source): raise ConfigError(f'Volume "{volume}" source path "{source}" does not exist!') + if 'tmpfs' in container_config: + for tmpfs, tmpfs_config in container_config['tmpfs'].items(): + if 'destination' not in tmpfs_config: + raise ConfigError(f'tmpfs "{tmpfs}" has no destination path configured!') + if 'size' in tmpfs_config: + free_mem_mb: int = psutil.virtual_memory().available / 1024 / 1024 + if int(tmpfs_config['size']) > free_mem_mb: + Warning(f'tmpfs "{tmpfs}" size is greater than the current free memory!') + + total_mem_mb: int = (psutil.virtual_memory().total / 1024 / 1024) / 2 + if int(tmpfs_config['size']) > total_mem_mb: + raise ConfigError(f'tmpfs "{tmpfs}" size should not be more than 50% of total system memory!') + else: + raise ConfigError(f'tmpfs "{tmpfs}" has no size configured!') + if 'port' in container_config: for tmp in container_config['port']: if not {'source', 'destination'} <= set(container_config['port'][tmp]): @@ -270,6 +289,13 @@ def verify(container): if 'registry' in container: for registry, registry_config in container['registry'].items(): + if 'mirror' in registry_config: + if 'host_name' in registry_config['mirror'] and 'address' in registry_config['mirror']: + raise ConfigError(f'Container registry mirror address/host-name are mutually exclusive!') + + if 'path' in registry_config['mirror'] and not registry_config['mirror']['path'].startswith('/'): + raise ConfigError('Container registry mirror path must start with "/"!') + if 'authentication' not in registry_config: continue if not {'username', 'password'} <= set(registry_config['authentication']): @@ -298,6 +324,11 @@ def generate_run_arguments(name, container_config): cap = cap.upper().replace('-', '_') capabilities += f' --cap-add={cap}' + # Grant root capabilities to the container + privileged = '' + if 'privileged' in container_config: + privileged = '--privileged' + # Add a host device to the container /dev/x:/dev/x device = '' if 'device' in container_config: @@ -359,13 +390,26 @@ def generate_run_arguments(name, container_config): prop = vol_config['propagation'] volume += f' --volume {svol}:{dvol}:{mode},{prop}' + # Mount tmpfs + tmpfs = '' + if 'tmpfs' in container_config: + for tmpfs_config in container_config['tmpfs'].values(): + dest = tmpfs_config['destination'] + size = tmpfs_config['size'] + tmpfs += f' --mount=type=tmpfs,tmpfs-size={size}M,destination={dest}' + host_pid = '' if 'allow_host_pid' in container_config: host_pid = '--pid host' - container_base_cmd = f'--detach --interactive --tty --replace {capabilities} --cpus {cpu_quota} {sysctl_opt} ' \ + name_server = '' + if 'name_server' in container_config: + for ns in container_config['name_server']: + name_server += f'--dns {ns}' + + container_base_cmd = f'--detach --interactive --tty --replace {capabilities} {privileged} --cpus {cpu_quota} {sysctl_opt} ' \ f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \ - f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid} {host_pid}' + f'--name {name} {hostname} {device} {port} {name_server} {volume} {tmpfs} {env_opt} {label} {uid} {host_pid}' entrypoint = '' if 'entrypoint' in container_config: @@ -419,12 +463,18 @@ def generate(container): 'dns_enabled': True, 'ipam_options': { 'driver': 'host-local' + }, + 'options': { + 'mtu': '1500' } } if 'no_name_server' in network_config: tmp['dns_enabled'] = False + if 'mtu' in network_config: + tmp['options']['mtu'] = network_config['mtu'] + for prefix in network_config['prefix']: net = {'subnet': prefix, 'gateway': inc_ip(prefix, 1)} tmp['subnets'].append(net) diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py index 5638a9668..274ca2ce6 100755 --- a/src/conf_mode/firewall.py +++ b/src/conf_mode/firewall.py @@ -18,7 +18,6 @@ import os import re from sys import exit - from vyos.base import Warning from vyos.config import Config from vyos.configdict import is_node_changed @@ -34,13 +33,19 @@ from vyos.utils.dict import dict_search_recursive from vyos.utils.process import call from vyos.utils.process import cmd from vyos.utils.process import rc_cmd +from vyos.utils.network import get_vrf_members +from vyos.utils.network import get_interface_vrf from vyos import ConfigError from vyos import airbag +from pathlib import Path from subprocess import run as subp_run airbag.enable() nftables_conf = '/run/nftables.conf' +domain_resolver_usage = '/run/use-vyos-domain-resolver-firewall' +firewall_config_dir = "/config/firewall" + sysctl_file = r'/run/sysctl/10-vyos-firewall.conf' valid_groups = [ @@ -49,7 +54,8 @@ valid_groups = [ 'network_group', 'port_group', 'interface_group', - ## Added for group ussage in bridge firewall + 'remote_group', + ## Added for group usage in bridge firewall 'ipv4_address_group', 'ipv6_address_group', 'ipv4_network_group', @@ -128,7 +134,28 @@ def get_config(config=None): firewall['geoip_updated'] = geoip_updated(conf, firewall) - fqdn_config_parse(firewall) + fqdn_config_parse(firewall, 'firewall') + + if not os.path.exists(nftables_conf): + firewall['first_install'] = True + + if 'zone' in firewall: + for local_zone, local_zone_conf in firewall['zone'].items(): + if 'local_zone' not in local_zone_conf: + # Get physical interfaces assigned to the zone if vrf is used: + if 'vrf' in local_zone_conf['member']: + local_zone_conf['vrf_interfaces'] = {} + for vrf_name in local_zone_conf['member']['vrf']: + local_zone_conf['vrf_interfaces'][vrf_name] = ','.join(get_vrf_members(vrf_name)) + continue + + local_zone_conf['from_local'] = {} + + for zone, zone_conf in firewall['zone'].items(): + if zone == local_zone or 'from' not in zone_conf: + continue + if local_zone in zone_conf['from']: + local_zone_conf['from_local'][zone] = zone_conf['from'][local_zone] set_dependents('conntrack', conf) @@ -178,7 +205,7 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): if 'jump' not in rule_conf['action']: raise ConfigError('jump-target defined, but action jump needed and it is not defined') target = rule_conf['jump_target'] - if hook != 'name': # This is a bit clumsy, but consolidates a chunk of code. + if hook != 'name': # This is a bit clumsy, but consolidates a chunk of code. verify_jump_target(firewall, hook, target, family, recursive=True) else: verify_jump_target(firewall, hook, target, family, recursive=False) @@ -241,12 +268,12 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): if dict_search_args(rule_conf, 'gre', 'flags', 'checksum') is None: # There is no builtin match in nftables for the GRE key, so we need to do a raw lookup. - # The offset of the key within the packet shifts depending on the C-flag. - # 99% of the time, nobody will have checksums enabled - it's usually a manual config option. - # We can either assume it is unset unless otherwise directed + # The offset of the key within the packet shifts depending on the C-flag. + # 99% of the time, nobody will have checksums enabled - it's usually a manual config option. + # We can either assume it is unset unless otherwise directed # (confusing, requires doco to explain why it doesn't work sometimes) - # or, demand an explicit selection to be made for this specific match rule. - # This check enforces the latter. The user is free to create rules for both cases. + # or, demand an explicit selection to be made for this specific match rule. + # This check enforces the latter. The user is free to create rules for both cases. raise ConfigError('Matching GRE tunnel key requires an explicit checksum flag match. For most cases, use "gre flags checksum unset"') if dict_search_args(rule_conf, 'gre', 'flags', 'key', 'unset') is not None: @@ -259,7 +286,7 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): if gre_inner_value < 0 or gre_inner_value > 65535: raise ConfigError('inner-proto outside valid ethertype range 0-65535') except ValueError: - pass # Symbolic constant, pre-validated before reaching here. + pass # Symbolic constant, pre-validated before reaching here. tcp_flags = dict_search_args(rule_conf, 'tcp', 'flags') if tcp_flags: @@ -286,8 +313,8 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): raise ConfigError('Only one of address, fqdn or geoip can be specified') if 'group' in side_conf: - if len({'address_group', 'network_group', 'domain_group'} & set(side_conf['group'])) > 1: - raise ConfigError('Only one address-group, network-group or domain-group can be specified') + if len({'address_group', 'network_group', 'domain_group', 'remote_group'} & set(side_conf['group'])) > 1: + raise ConfigError('Only one address-group, network-group, remote-group or domain-group can be specified') for group in valid_groups: if group in side_conf['group']: @@ -307,7 +334,7 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): error_group = fw_group.replace("_", "-") - if group in ['address_group', 'network_group', 'domain_group']: + if group in ['address_group', 'network_group', 'domain_group', 'remote_group']: types = [t for t in ['address', 'fqdn', 'geoip'] if t in side_conf] if types: raise ConfigError(f'{error_group} and {types[0]} cannot both be defined') @@ -410,6 +437,16 @@ def verify(firewall): for ifname in interfaces: verify_hardware_offload(ifname) + if 'offload' in firewall.get('global_options', {}).get('state_policy', {}): + offload_path = firewall['global_options']['state_policy']['offload'] + if 'offload_target' not in offload_path: + raise ConfigError('offload-target must be specified') + + offload_target = offload_path['offload_target'] + + if not dict_search_args(firewall, 'flowtable', offload_target): + raise ConfigError(f'Invalid offload-target. Flowtable "{offload_target}" does not exist on the system') + if 'group' in firewall: for group_type in nested_group_types: if group_type in firewall['group']: @@ -417,6 +454,11 @@ def verify(firewall): for group_name, group in groups.items(): verify_nested_group(group_name, group, groups, []) + if 'remote_group' in firewall['group']: + for group_name, group in firewall['group']['remote_group'].items(): + if 'url' not in group: + raise ConfigError(f'remote-group {group_name} must have a url configured') + for family in ['ipv4', 'ipv6', 'bridge']: if family in firewall: for chain in ['name','forward','input','output', 'prerouting']: @@ -438,28 +480,45 @@ def verify(firewall): local_zone = False zone_interfaces = [] + zone_vrf = [] if 'zone' in firewall: for zone, zone_conf in firewall['zone'].items(): - if 'local_zone' not in zone_conf and 'interface' not in zone_conf: + if 'local_zone' not in zone_conf and 'member' not in zone_conf: raise ConfigError(f'Zone "{zone}" has no interfaces and is not the local zone') if 'local_zone' in zone_conf: if local_zone: raise ConfigError('There cannot be multiple local zones') - if 'interface' in zone_conf: + if 'member' in zone_conf: raise ConfigError('Local zone cannot have interfaces assigned') if 'intra_zone_filtering' in zone_conf: raise ConfigError('Local zone cannot use intra-zone-filtering') local_zone = True - if 'interface' in zone_conf: - found_duplicates = [intf for intf in zone_conf['interface'] if intf in zone_interfaces] + if 'member' in zone_conf: + if 'interface' in zone_conf['member']: + for iface in zone_conf['member']['interface']: + + if iface in zone_interfaces: + raise ConfigError(f'Interfaces cannot be assigned to multiple zones') + + iface_vrf = get_interface_vrf(iface) + if iface_vrf != 'default': + Warning(f"Interface {iface} assigned to zone {zone} is in VRF {iface_vrf}. This might not work as expected.") + zone_interfaces.append(iface) - if found_duplicates: - raise ConfigError(f'Interfaces cannot be assigned to multiple zones') + if 'vrf' in zone_conf['member']: + for vrf in zone_conf['member']['vrf']: + if vrf in zone_vrf: + raise ConfigError(f'VRF cannot be assigned to multiple zones') + zone_vrf.append(vrf) - zone_interfaces += zone_conf['interface'] + if 'vrf_interfaces' in zone_conf: + for vrf_name, vrf_interfaces in zone_conf['vrf_interfaces'].items(): + if not vrf_interfaces: + raise ConfigError( + f'VRF "{vrf_name}" cannot be a member of any zone. It does not contain any interfaces.') if 'intra_zone_filtering' in zone_conf: intra_zone = zone_conf['intra_zone_filtering'] @@ -495,24 +554,17 @@ def verify(firewall): return None def generate(firewall): - if not os.path.exists(nftables_conf): - firewall['first_install'] = True - - if 'zone' in firewall: - for local_zone, local_zone_conf in firewall['zone'].items(): - if 'local_zone' not in local_zone_conf: - continue - - local_zone_conf['from_local'] = {} - - for zone, zone_conf in firewall['zone'].items(): - if zone == local_zone or 'from' not in zone_conf: - continue - if local_zone in zone_conf['from']: - local_zone_conf['from_local'][zone] = zone_conf['from'][local_zone] - render(nftables_conf, 'firewall/nftables.j2', firewall) render(sysctl_file, 'firewall/sysctl-firewall.conf.j2', firewall) + + # Cleanup remote-group cache files + if os.path.exists(firewall_config_dir): + for fw_file in os.listdir(firewall_config_dir): + # Delete matching files in 'config/firewall' that no longer exist as a remote-group in config + if fw_file.startswith("R_") and fw_file.endswith(".txt"): + if 'group' not in firewall or 'remote_group' not in firewall['group'] or fw_file[2:-4] not in firewall['group']['remote_group'].keys(): + os.unlink(os.path.join(firewall_config_dir, fw_file)) + return None def parse_firewall_error(output): @@ -570,19 +622,22 @@ def apply(firewall): call_dependents() - # T970 Enable a resolver (systemd daemon) that checks - # domain-group/fqdn addresses and update entries for domains by timeout - # If router loaded without internet connection or for synchronization - domain_action = 'stop' - if dict_search_args(firewall, 'group', 'domain_group') or firewall['ip_fqdn'] or firewall['ip6_fqdn']: - domain_action = 'restart' + ## DOMAIN RESOLVER + domain_action = 'restart' + if dict_search_args(firewall, 'group', 'remote_group') or dict_search_args(firewall, 'group', 'domain_group') or firewall['ip_fqdn'].items() or firewall['ip6_fqdn'].items(): + text = f'# Automatically generated by firewall.py\nThis file indicates that vyos-domain-resolver service is used by the firewall.\n' + Path(domain_resolver_usage).write_text(text) + else: + Path(domain_resolver_usage).unlink(missing_ok=True) + if not Path('/run').glob('use-vyos-domain-resolver*'): + domain_action = 'stop' call(f'systemctl {domain_action} vyos-domain-resolver.service') if firewall['geoip_updated']: # Call helper script to Update set contents if 'name' in firewall['geoip_updated'] or 'ipv6_name' in firewall['geoip_updated']: print('Updating GeoIP. Please wait...') - geoip_update(firewall) + geoip_update(firewall=firewall) return None diff --git a/src/conf_mode/interfaces_bonding.py b/src/conf_mode/interfaces_bonding.py index bbbfb0385..84316c16e 100755 --- a/src/conf_mode/interfaces_bonding.py +++ b/src/conf_mode/interfaces_bonding.py @@ -30,19 +30,21 @@ from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.ifconfig import BondIf from vyos.ifconfig.ethernet import EthernetIf from vyos.ifconfig import Section -from vyos.template import render_to_string from vyos.utils.assertion import assert_mac from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.network import interface_exists +from vyos.utils.process import is_systemd_service_running from vyos.configdict import has_address_configured from vyos.configdict import has_vrf_configured -from vyos.configdep import set_dependents, call_dependents +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -87,10 +89,13 @@ def get_config(config=None): bond['mode'] = get_bond_mode(bond['mode']) tmp = is_node_changed(conf, base + [ifname, 'mode']) - if tmp: bond['shutdown_required'] = {} + if tmp: bond.update({'shutdown_required' : {}}) tmp = is_node_changed(conf, base + [ifname, 'lacp-rate']) - if tmp: bond['shutdown_required'] = {} + if tmp: bond.update({'shutdown_required' : {}}) + + tmp = is_node_changed(conf, base + [ifname, 'evpn']) + if tmp: bond.update({'frr_dict' : get_frrender_dict(conf)}) # determine which members have been removed interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface']) @@ -121,9 +126,8 @@ def get_config(config=None): # Restore existing config level conf.set_level(old_level) - if dict_search('member.interface', bond): - for interface, interface_config in bond['member']['interface'].items(): - + if dict_search('member.interface', bond) is not None: + for interface in bond['member']['interface']: interface_ethernet_config = conf.get_config_dict( ['interfaces', 'ethernet', interface], key_mangling=('-', '_'), @@ -132,44 +136,45 @@ def get_config(config=None): with_defaults=False, with_recursive_defaults=False) - interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config) + bond['member']['interface'][interface].update({'config_paths' : + dict_to_paths_values(interface_ethernet_config)}) # Check if member interface is a new member if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]): bond['shutdown_required'] = {} - interface_config['new_added'] = {} + bond['member']['interface'][interface].update({'new_added' : {}}) # Check if member interface is disabled conf.set_level(['interfaces']) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): - interface_config['disable'] = '' + if tmp: bond['member']['interface'][interface].update({'disable': ''}) conf.set_level(old_level) # Check if member interface is already member of another bridge tmp = is_member(conf, interface, 'bridge') - if tmp: interface_config['is_bridge_member'] = tmp + if tmp: bond['member']['interface'][interface].update({'is_bridge_member' : tmp}) # Check if member interface is already member of a bond tmp = is_member(conf, interface, 'bonding') - for tmp in is_member(conf, interface, 'bonding'): - if bond['ifname'] == tmp: - continue - interface_config['is_bond_member'] = tmp + if ifname in tmp: + del tmp[ifname] + if tmp: bond['member']['interface'][interface].update({'is_bond_member' : tmp}) # Check if member interface is used as source-interface on another interface tmp = is_source_interface(conf, interface) - if tmp: interface_config['is_source_interface'] = tmp + if tmp: bond['member']['interface'][interface].update({'is_source_interface' : tmp}) # bond members must not have an assigned address tmp = has_address_configured(conf, interface) - if tmp: interface_config['has_address'] = {} + if tmp: bond['member']['interface'][interface].update({'has_address' : ''}) # bond members must not have a VRF attached tmp = has_vrf_configured(conf, interface) - if tmp: interface_config['has_vrf'] = {} + if tmp: bond['member']['interface'][interface].update({'has_vrf' : ''}) + return bond @@ -260,16 +265,16 @@ def verify(bond): return None def generate(bond): - bond['frr_zebra_config'] = '' - if 'deleted' not in bond: - bond['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', bond) + if 'frr_dict' in bond and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(bond['frr_dict']) return None def apply(bond): - ifname = bond['ifname'] - b = BondIf(ifname) + if 'frr_dict' in bond and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() + + b = BondIf(bond['ifname']) if 'deleted' in bond: - # delete interface b.remove() else: b.update(bond) @@ -281,17 +286,6 @@ def apply(bond): raise ConfigError('Error in updating ethernet interface ' 'after deleting it from bond') - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_zebra_config' in bond: - frr_cfg.add_before(frr.default_add_before, bond['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) - return None if __name__ == '__main__': diff --git a/src/conf_mode/interfaces_bridge.py b/src/conf_mode/interfaces_bridge.py index 7b2c1ee0b..95dcc543e 100755 --- a/src/conf_mode/interfaces_bridge.py +++ b/src/conf_mode/interfaces_bridge.py @@ -25,6 +25,7 @@ from vyos.configdict import has_vlan_subinterface_configured from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_vrf +from vyos.configverify import verify_mtu_ipv6 from vyos.ifconfig import BridgeIf from vyos.configdict import has_address_configured from vyos.configdict import has_vrf_configured @@ -53,27 +54,30 @@ def get_config(config=None): tmp = node_changed(conf, base + [ifname, 'member', 'interface']) if tmp: if 'member' in bridge: - bridge['member'].update({'interface_remove' : tmp }) + bridge['member'].update({'interface_remove': {t: {} for t in tmp}}) else: - bridge.update({'member' : {'interface_remove' : tmp }}) - for interface in tmp: - # When using VXLAN member interfaces that are configured for Single - # VXLAN Device (SVD) we need to call the VXLAN conf-mode script to - # re-create VLAN to VNI mappings if required, but only if the interface - # is already live on the system - this must not be done on first commit - if interface.startswith('vxlan') and interface_exists(interface): - set_dependents('vxlan', conf, interface) - # When using Wireless member interfaces we need to inform hostapd - # to properly set-up the bridge - elif interface.startswith('wlan') and interface_exists(interface): - set_dependents('wlan', conf, interface) + bridge.update({'member': {'interface_remove': {t: {} for t in tmp}}}) + for interface in tmp: + # When using VXLAN member interfaces that are configured for Single + # VXLAN Device (SVD) we need to call the VXLAN conf-mode script to + # re-create VLAN to VNI mappings if required, but only if the interface + # is already live on the system - this must not be done on first commit + if interface.startswith('vxlan') and interface_exists(interface): + set_dependents('vxlan', conf, interface) + _, vxlan = get_interface_dict(conf, ['interfaces', 'vxlan'], ifname=interface) + bridge['member']['interface_remove'].update({interface: vxlan}) + # When using Wireless member interfaces we need to inform hostapd + # to properly set-up the bridge + elif interface.startswith('wlan') and interface_exists(interface): + set_dependents('wlan', conf, interface) if dict_search('member.interface', bridge) is not None: for interface in list(bridge['member']['interface']): # Check if member interface is already member of another bridge tmp = is_member(conf, interface, 'bridge') - if tmp and bridge['ifname'] not in tmp: - bridge['member']['interface'][interface].update({'is_bridge_member' : tmp}) + if ifname in tmp: + del tmp[ifname] + if tmp: bridge['member']['interface'][interface].update({'is_bridge_member' : tmp}) # Check if member interface is already member of a bond tmp = is_member(conf, interface, 'bonding') @@ -118,11 +122,22 @@ def get_config(config=None): return bridge def verify(bridge): + # to delete interface or remove a member interface VXLAN first need to check if + # VXLAN does not require to be a member of a bridge interface + if dict_search('member.interface_remove', bridge): + for iface, iface_config in bridge['member']['interface_remove'].items(): + if iface.startswith('vxlan') and dict_search('parameters.neighbor_suppress', iface_config) != None: + raise ConfigError( + f'To detach interface {iface} from bridge you must first ' + f'disable "neighbor-suppress" parameter in the VXLAN interface {iface}' + ) + if 'deleted' in bridge: return None verify_dhcpv6(bridge) verify_vrf(bridge) + verify_mtu_ipv6(bridge) verify_mirror_redirect(bridge) ifname = bridge['ifname'] @@ -192,7 +207,7 @@ def apply(bridge): try: call_dependents() except ConfigError: - raise ConfigError('Error updating member interface configuration after changing bridge!') + raise ConfigError(f'Error updating member interface {interface} configuration after changing bridge!') return None diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py index 34ce7bc47..41c89fdf8 100755 --- a/src/conf_mode/interfaces_ethernet.py +++ b/src/conf_mode/interfaces_ethernet.py @@ -33,15 +33,16 @@ from vyos.configverify import verify_vrf from vyos.configverify import verify_bond_bridge_member from vyos.configverify import verify_eapol from vyos.ethtool import Ethtool +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.ifconfig import EthernetIf from vyos.ifconfig import BondIf -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.dict import dict_set from vyos.utils.dict import dict_delete +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -164,6 +165,9 @@ def get_config(config=None): tmp = is_node_changed(conf, base + [ifname, 'duplex']) if tmp: ethernet.update({'speed_duplex_changed': {}}) + tmp = is_node_changed(conf, base + [ifname, 'evpn']) + if tmp: ethernet.update({'frr_dict' : get_frrender_dict(conf)}) + return ethernet def verify_speed_duplex(ethernet: dict, ethtool: Ethtool): @@ -318,42 +322,25 @@ def verify_ethernet(ethernet): return None def generate(ethernet): - if 'deleted' in ethernet: - return None - - ethernet['frr_zebra_config'] = '' - if 'deleted' not in ethernet: - ethernet['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', ethernet) - + if 'frr_dict' in ethernet and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(ethernet['frr_dict']) return None def apply(ethernet): - ifname = ethernet['ifname'] - - e = EthernetIf(ifname) + if 'frr_dict' in ethernet and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() + e = EthernetIf(ethernet['ifname']) if 'deleted' in ethernet: - # delete interface e.remove() else: e.update(ethernet) - - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_zebra_config' in ethernet: - frr_cfg.add_before(frr.default_add_before, ethernet['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) - apply(c) except ConfigError as e: print(e) diff --git a/src/conf_mode/interfaces_geneve.py b/src/conf_mode/interfaces_geneve.py index 007708d4a..1c5b4d0e7 100755 --- a/src/conf_mode/interfaces_geneve.py +++ b/src/conf_mode/interfaces_geneve.py @@ -47,7 +47,7 @@ def get_config(config=None): # GENEVE interfaces are picky and require recreation if certain parameters # change. But a GENEVE interface should - of course - not be re-created if # it's description or IP address is adjusted. Feels somehow logic doesn't it? - for cli_option in ['remote', 'vni', 'parameters']: + for cli_option in ['remote', 'vni', 'parameters', 'port']: if is_node_changed(conf, base + [ifname, cli_option]): geneve.update({'rebuild_required': {}}) diff --git a/src/conf_mode/interfaces_openvpn.py b/src/conf_mode/interfaces_openvpn.py index 8c1213e2b..a9b4e570d 100755 --- a/src/conf_mode/interfaces_openvpn.py +++ b/src/conf_mode/interfaces_openvpn.py @@ -32,6 +32,7 @@ from vyos.base import DeprecationWarning from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed +from vyos.configdiff import get_config_diff from vyos.configverify import verify_vrf from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_mirror_redirect @@ -94,6 +95,23 @@ def get_config(config=None): if 'deleted' in openvpn: return openvpn + if not is_node_changed(conf, base) and dict_search_args(openvpn, 'tls'): + diff = get_config_diff(conf) + if diff.get_child_nodes_diff(['pki'], recursive=True).get('add') == ['ca', 'certificate']: + crl_path = os.path.join(cfg_dir, f'{ifname}_crl.pem') + if os.path.exists(crl_path): + # do not restart service when changed only CRL and crl file already exist + openvpn.update({'no_restart_crl': True}) + for rec in diff.get_child_nodes_diff(['pki', 'ca'], recursive=True).get('add'): + if diff.get_child_nodes_diff(['pki', 'ca', rec], recursive=True).get('add') != ['crl']: + openvpn.update({'no_restart_crl': False}) + break + if openvpn.get('no_restart_crl'): + for rec in diff.get_child_nodes_diff(['pki', 'certificate'], recursive=True).get('add'): + if diff.get_child_nodes_diff(['pki', 'certificate', rec], recursive=True).get('add') != ['revoke']: + openvpn.update({'no_restart_crl': False}) + break + if is_node_changed(conf, base + [ifname, 'openvpn-option']): openvpn.update({'restart_required': {}}) if is_node_changed(conf, base + [ifname, 'enable-dco']): @@ -786,10 +804,12 @@ def apply(openvpn): # No matching OpenVPN process running - maybe it got killed or none # existed - nevertheless, spawn new OpenVPN process - action = 'reload-or-restart' - if 'restart_required' in openvpn: - action = 'restart' - call(f'systemctl {action} openvpn@{interface}.service') + + if not openvpn.get('no_restart_crl'): + action = 'reload-or-restart' + if 'restart_required' in openvpn: + action = 'restart' + call(f'systemctl {action} openvpn@{interface}.service') o = VTunIf(**openvpn) o.update(openvpn) diff --git a/src/conf_mode/interfaces_pseudo-ethernet.py b/src/conf_mode/interfaces_pseudo-ethernet.py index 446beffd3..b066fd542 100755 --- a/src/conf_mode/interfaces_pseudo-ethernet.py +++ b/src/conf_mode/interfaces_pseudo-ethernet.py @@ -27,6 +27,7 @@ from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_source_interface from vyos.configverify import verify_vlan_config from vyos.configverify import verify_mtu_parent +from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_mirror_redirect from vyos.ifconfig import MACVLANIf from vyos.utils.network import interface_exists @@ -71,6 +72,7 @@ def verify(peth): verify_vrf(peth) verify_address(peth) verify_mtu_parent(peth, peth['parent']) + verify_mtu_ipv6(peth) verify_mirror_redirect(peth) # use common function to verify VLAN configuration verify_vlan_config(peth) diff --git a/src/conf_mode/interfaces_tunnel.py b/src/conf_mode/interfaces_tunnel.py index 98ef98d12..ee1436e49 100755 --- a/src/conf_mode/interfaces_tunnel.py +++ b/src/conf_mode/interfaces_tunnel.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2024 yOS maintainers and contributors +# Copyright (C) 2018-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -13,9 +13,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. - from sys import exit - +import ipaddress from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed @@ -89,6 +88,13 @@ def verify(tunnel): raise ConfigError('Tunnel used for NHRP, it can not be deleted!') return None + if 'nhrp' in tunnel: + if 'address' in tunnel: + address_list = dict_search('address', tunnel) + for tunip in address_list: + if ipaddress.ip_network(tunip, strict=False).prefixlen != 32: + raise ConfigError( + 'Tunnel is used for NHRP, Netmask should be /32!') verify_tunnel(tunnel) diff --git a/src/conf_mode/interfaces_virtual-ethernet.py b/src/conf_mode/interfaces_virtual-ethernet.py index cb6104f59..59ce474fc 100755 --- a/src/conf_mode/interfaces_virtual-ethernet.py +++ b/src/conf_mode/interfaces_virtual-ethernet.py @@ -23,6 +23,7 @@ from vyos.configdict import get_interface_dict from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_vrf +from vyos.configverify import verify_mtu_ipv6 from vyos.ifconfig import VethIf from vyos.utils.network import interface_exists airbag.enable() @@ -62,6 +63,7 @@ def verify(veth): return None verify_vrf(veth) + verify_mtu_ipv6(veth) verify_address(veth) if 'peer_name' not in veth: diff --git a/src/conf_mode/interfaces_vti.py b/src/conf_mode/interfaces_vti.py index 20629c6c1..915bde066 100755 --- a/src/conf_mode/interfaces_vti.py +++ b/src/conf_mode/interfaces_vti.py @@ -20,6 +20,7 @@ from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_vrf +from vyos.configverify import verify_mtu_ipv6 from vyos.ifconfig import VTIIf from vyos import ConfigError from vyos import airbag @@ -40,6 +41,7 @@ def get_config(config=None): def verify(vti): verify_vrf(vti) + verify_mtu_ipv6(vti) verify_mirror_redirect(vti) return None diff --git a/src/conf_mode/interfaces_vxlan.py b/src/conf_mode/interfaces_vxlan.py index 68646e8ff..256b65708 100755 --- a/src/conf_mode/interfaces_vxlan.py +++ b/src/conf_mode/interfaces_vxlan.py @@ -95,6 +95,8 @@ def verify(vxlan): if 'group' in vxlan: if 'source_interface' not in vxlan: raise ConfigError('Multicast VXLAN requires an underlaying interface') + if 'remote' in vxlan: + raise ConfigError('Both group and remote cannot be specified') verify_source_interface(vxlan) if not any(tmp in ['group', 'remote', 'source_address', 'source_interface'] for tmp in vxlan): diff --git a/src/conf_mode/interfaces_wireguard.py b/src/conf_mode/interfaces_wireguard.py index 7abdfdbfa..3ca6ecdca 100755 --- a/src/conf_mode/interfaces_wireguard.py +++ b/src/conf_mode/interfaces_wireguard.py @@ -19,6 +19,9 @@ from sys import exit from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed +from vyos.configdict import is_source_interface +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents from vyos.configverify import verify_vrf from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete @@ -29,8 +32,10 @@ from vyos.ifconfig import WireGuardIf from vyos.utils.kernel import check_kmod from vyos.utils.network import check_port_availability from vyos.utils.network import is_wireguard_key_pair +from vyos.utils.process import call from vyos import ConfigError from vyos import airbag +from pathlib import Path airbag.enable() @@ -54,11 +59,31 @@ def get_config(config=None): if is_node_changed(conf, base + [ifname, 'peer']): wireguard.update({'rebuild_required': {}}) + wireguard['peers_need_resolve'] = [] + if 'peer' in wireguard: + for peer, peer_config in wireguard['peer'].items(): + if 'disable' not in peer_config and 'host_name' in peer_config: + wireguard['peers_need_resolve'].append(peer) + + # Check if interface is used as source-interface on VXLAN interface + tmp = is_source_interface(conf, ifname, 'vxlan') + if tmp: + if 'deleted' not in wireguard: + set_dependents('vxlan', conf, tmp) + else: + wireguard['is_source_interface'] = tmp + return wireguard + def verify(wireguard): if 'deleted' in wireguard: verify_bridge_delete(wireguard) + if 'is_source_interface' in wireguard: + raise ConfigError( + f'Interface "{wireguard["ifname"]}" cannot be deleted as it is used ' + f'as source interface for "{wireguard["is_source_interface"]}"!' + ) return None verify_mtu_ipv6(wireguard) @@ -70,42 +95,53 @@ def verify(wireguard): if 'private_key' not in wireguard: raise ConfigError('Wireguard private-key not defined') - if 'peer' not in wireguard: - raise ConfigError('At least one Wireguard peer is required!') - if 'port' in wireguard and 'port_changed' in wireguard: listen_port = int(wireguard['port']) - if check_port_availability('0.0.0.0', listen_port, 'udp') is not True: + if check_port_availability(None, listen_port, protocol='udp') is not True: raise ConfigError(f'UDP port {listen_port} is busy or unavailable and ' 'cannot be used for the interface!') # run checks on individual configured WireGuard peer - public_keys = [] - for tmp in wireguard['peer']: - peer = wireguard['peer'][tmp] + if 'peer' in wireguard: + public_keys = [] + for tmp in wireguard['peer']: + peer = wireguard['peer'][tmp] + + base_error = f'WireGuard peer "{tmp}":' + + if 'host_name' in peer and 'address' in peer: + raise ConfigError(f'{base_error} address/host-name are mutually exclusive!') + + if 'allowed_ips' not in peer: + raise ConfigError(f'{base_error} missing mandatory allowed-ips!') - if 'allowed_ips' not in peer: - raise ConfigError(f'Wireguard allowed-ips required for peer "{tmp}"!') + if 'public_key' not in peer: + raise ConfigError(f'{base_error} missing mandatory public-key!') - if 'public_key' not in peer: - raise ConfigError(f'Wireguard public-key required for peer "{tmp}"!') + if peer['public_key'] in public_keys: + raise ConfigError(f'{base_error} duplicate public-key!') - if ('address' in peer and 'port' not in peer) or ('port' in peer and 'address' not in peer): - raise ConfigError('Both Wireguard port and address must be defined ' - f'for peer "{tmp}" if either one of them is set!') + if 'disable' not in peer: + if is_wireguard_key_pair(wireguard['private_key'], peer['public_key']): + tmp = wireguard["ifname"] + raise ConfigError(f'{base_error} identical public key as interface "{tmp}"!') - if peer['public_key'] in public_keys: - raise ConfigError(f'Duplicate public-key defined on peer "{tmp}"') + port_addr_error = f'{base_error} both port and address/host-name must '\ + 'be defined if either one of them is set!' + if 'port' not in peer: + if 'host_name' in peer or 'address' in peer: + raise ConfigError(port_addr_error) + else: + if 'host_name' not in peer and 'address' not in peer: + raise ConfigError(port_addr_error) - if 'disable' not in peer: - if is_wireguard_key_pair(wireguard['private_key'], peer['public_key']): - raise ConfigError(f'Peer "{tmp}" has the same public key as the interface "{wireguard["ifname"]}"') + public_keys.append(peer['public_key']) - public_keys.append(peer['public_key']) def generate(wireguard): return None + def apply(wireguard): check_kmod('wireguard') @@ -124,8 +160,28 @@ def apply(wireguard): wg = WireGuardIf(**wireguard) wg.update(wireguard) + domain_resolver_usage = '/run/use-vyos-domain-resolver-interfaces-wireguard-' + wireguard['ifname'] + + ## DOMAIN RESOLVER + domain_action = 'restart' + if 'peers_need_resolve' in wireguard and len(wireguard['peers_need_resolve']) > 0 and 'disable' not in wireguard: + from vyos.utils.file import write_file + + text = f'# Automatically generated by interfaces_wireguard.py\nThis file indicates that vyos-domain-resolver service is used by the interfaces_wireguard.\n' + text += "intefaces:\n" + "".join([f" - {peer}\n" for peer in wireguard['peers_need_resolve']]) + Path(domain_resolver_usage).write_text(text) + write_file(domain_resolver_usage, text) + else: + Path(domain_resolver_usage).unlink(missing_ok=True) + if not Path('/run').glob('use-vyos-domain-resolver*'): + domain_action = 'stop' + call(f'systemctl {domain_action} vyos-domain-resolver.service') + + call_dependents() + return None + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/interfaces_wwan.py b/src/conf_mode/interfaces_wwan.py index 230eb14d6..ddbebfb4a 100755 --- a/src/conf_mode/interfaces_wwan.py +++ b/src/conf_mode/interfaces_wwan.py @@ -26,6 +26,7 @@ from vyos.configverify import verify_authentication from vyos.configverify import verify_interface_exists from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_vrf +from vyos.configverify import verify_mtu_ipv6 from vyos.ifconfig import WWANIf from vyos.utils.dict import dict_search from vyos.utils.process import cmd @@ -98,6 +99,7 @@ def verify(wwan): verify_interface_exists(wwan, ifname) verify_authentication(wwan) verify_vrf(wwan) + verify_mtu_ipv6(wwan) verify_mirror_redirect(wwan) return None diff --git a/src/conf_mode/load-balancing_reverse-proxy.py b/src/conf_mode/load-balancing_haproxy.py index 17226efe9..504a90596 100755..100644 --- a/src/conf_mode/load-balancing_reverse-proxy.py +++ b/src/conf_mode/load-balancing_haproxy.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2023-2024 VyOS maintainers and contributors +# Copyright (C) 2023-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -19,6 +19,7 @@ import os from sys import exit from shutil import rmtree +from vyos.defaults import systemd_services from vyos.config import Config from vyos.configverify import verify_pki_certificate from vyos.configverify import verify_pki_ca_certificate @@ -39,7 +40,6 @@ airbag.enable() load_balancing_dir = '/run/haproxy' load_balancing_conf_file = f'{load_balancing_dir}/haproxy.cfg' -systemd_service = 'haproxy.service' systemd_override = '/run/systemd/system/haproxy.service.d/10-override.conf' def get_config(config=None): @@ -48,7 +48,7 @@ def get_config(config=None): else: conf = Config() - base = ['load-balancing', 'reverse-proxy'] + base = ['load-balancing', 'haproxy'] if not conf.exists(base): return None lb = conf.get_config_dict(base, @@ -65,29 +65,39 @@ def verify(lb): return None if 'backend' not in lb or 'service' not in lb: - raise ConfigError(f'"service" and "backend" must be configured!') + raise ConfigError('Both "service" and "backend" must be configured!') for front, front_config in lb['service'].items(): if 'port' not in front_config: raise ConfigError(f'"{front} service port" must be configured!') # Check if bind address:port are used by another service - tmp_address = front_config.get('address', '0.0.0.0') + tmp_address = front_config.get('address', None) tmp_port = front_config['port'] if check_port_availability(tmp_address, int(tmp_port), 'tcp') is not True and \ not is_listen_port_bind_service(int(tmp_port), 'haproxy'): - raise ConfigError(f'"TCP" port "{tmp_port}" is used by another service') + raise ConfigError(f'TCP port "{tmp_port}" is used by another service') + + if 'http_compression' in front_config: + if front_config['mode'] != 'http': + raise ConfigError(f'service {front} must be set to http mode to use http-compression!') + if len(front_config['http_compression']['mime_type']) == 0: + raise ConfigError(f'service {front} must have at least one mime-type configured to use' + f'http_compression!') + + for cert in dict_search('ssl.certificate', front_config) or []: + verify_pki_certificate(lb, cert) for back, back_config in lb['backend'].items(): if 'http_check' in back_config: http_check = back_config['http_check'] if 'expect' in http_check and 'status' in http_check['expect'] and 'string' in http_check['expect']: - raise ConfigError(f'"expect status" and "expect string" can not be configured together!') + raise ConfigError('"expect status" and "expect string" can not be configured together!') if 'health_check' in back_config: if back_config['mode'] != 'tcp': raise ConfigError(f'backend "{back}" can only be configured with {back_config["health_check"]} ' + - f'health-check whilst in TCP mode!') + 'health-check whilst in TCP mode!') if 'http_check' in back_config: raise ConfigError(f'backend "{back}" cannot be configured with both http-check and health-check!') @@ -105,20 +115,15 @@ def verify(lb): if {'no_verify', 'ca_certificate'} <= set(back_config['ssl']): raise ConfigError(f'backend {back} cannot have both ssl options no-verify and ca-certificate set!') + tmp = dict_search('ssl.ca_certificate', back_config) + if tmp: verify_pki_ca_certificate(lb, tmp) + # Check if http-response-headers are configured in any frontend/backend where mode != http for group in ['service', 'backend']: for config_name, config in lb[group].items(): if 'http_response_headers' in config and config['mode'] != 'http': raise ConfigError(f'{group} {config_name} must be set to http mode to use http_response_headers!') - for front, front_config in lb['service'].items(): - for cert in dict_search('ssl.certificate', front_config) or []: - verify_pki_certificate(lb, cert) - - for back, back_config in lb['backend'].items(): - tmp = dict_search('ssl.ca_certificate', back_config) - if tmp: verify_pki_ca_certificate(lb, tmp) - def generate(lb): if not lb: @@ -186,12 +191,11 @@ def generate(lb): return None def apply(lb): + action = 'stop' + if lb: + action = 'reload-or-restart' call('systemctl daemon-reload') - if not lb: - call(f'systemctl stop {systemd_service}') - else: - call(f'systemctl reload-or-restart {systemd_service}') - + call(f'systemctl {action} {systemd_services["haproxy"]}') return None diff --git a/src/conf_mode/load-balancing_wan.py b/src/conf_mode/load-balancing_wan.py index 5da0b906b..92d9acfba 100755 --- a/src/conf_mode/load-balancing_wan.py +++ b/src/conf_mode/load-balancing_wan.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2023 VyOS maintainers and contributors +# Copyright (C) 2023-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -14,24 +14,16 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os - from sys import exit -from shutil import rmtree -from vyos.base import Warning from vyos.config import Config from vyos.configdep import set_dependents, call_dependents from vyos.utils.process import cmd -from vyos.template import render from vyos import ConfigError from vyos import airbag airbag.enable() -load_balancing_dir = '/run/load-balance' -load_balancing_conf_file = f'{load_balancing_dir}/wlb.conf' -systemd_service = 'vyos-wan-load-balance.service' - +service = 'vyos-wan-load-balance.service' def get_config(config=None): if config: @@ -40,6 +32,7 @@ def get_config(config=None): conf = Config() base = ['load-balancing', 'wan'] + lb = conf.get_config_dict(base, key_mangling=('-', '_'), no_tag_node_value_mangle=True, get_first_key=True, @@ -59,87 +52,61 @@ def verify(lb): if not lb: return None - if 'interface_health' not in lb: - raise ConfigError( - 'A valid WAN load-balance configuration requires an interface with a nexthop!' - ) - - for interface, interface_config in lb['interface_health'].items(): - if 'nexthop' not in interface_config: - raise ConfigError( - f'interface-health {interface} nexthop must be specified!') - - if 'test' in interface_config: - for test_rule, test_config in interface_config['test'].items(): - if 'type' in test_config: - if test_config['type'] == 'user-defined' and 'test_script' not in test_config: - raise ConfigError( - f'test {test_rule} script must be defined for test-script!' - ) - - if 'rule' not in lb: - Warning( - 'At least one rule with an (outbound) interface must be defined for WAN load balancing to be active!' - ) + if 'interface_health' in lb: + for ifname, health_conf in lb['interface_health'].items(): + if 'nexthop' not in health_conf: + raise ConfigError(f'Nexthop must be configured for interface {ifname}') + + if 'test' not in health_conf: + continue + + for test_id, test_conf in health_conf['test'].items(): + if 'type' not in test_conf: + raise ConfigError(f'No type configured for health test on interface {ifname}') + + if test_conf['type'] == 'user-defined' and 'test_script' not in test_conf: + raise ConfigError(f'Missing user-defined script for health test on interface {ifname}') else: - for rule, rule_config in lb['rule'].items(): - if 'inbound_interface' not in rule_config: - raise ConfigError(f'rule {rule} inbound-interface must be specified!') - if {'failover', 'exclude'} <= set(rule_config): - raise ConfigError(f'rule {rule} failover cannot be configured with exclude!') - if {'limit', 'exclude'} <= set(rule_config): - raise ConfigError(f'rule {rule} limit cannot be used with exclude!') - if 'interface' not in rule_config: - if 'exclude' not in rule_config: - Warning( - f'rule {rule} will be inactive because no (outbound) interfaces have been defined for this rule' - ) - for direction in {'source', 'destination'}: - if direction in rule_config: - if 'protocol' in rule_config and 'port' in rule_config[ - direction]: - if rule_config['protocol'] not in {'tcp', 'udp'}: - raise ConfigError('ports can only be specified when protocol is "tcp" or "udp"') + raise ConfigError('Interface health tests must be configured') + if 'rule' in lb: + for rule_id, rule_conf in lb['rule'].items(): + if 'interface' not in rule_conf and 'exclude' not in rule_conf: + raise ConfigError(f'Interface or exclude not specified on load-balancing wan rule {rule_id}') -def generate(lb): - if not lb: - # Delete /run/load-balance/wlb.conf - if os.path.isfile(load_balancing_conf_file): - os.unlink(load_balancing_conf_file) - # Delete old directories - if os.path.isdir(load_balancing_dir): - rmtree(load_balancing_dir, ignore_errors=True) - if os.path.exists('/var/run/load-balance/wlb.out'): - os.unlink('/var/run/load-balance/wlb.out') + if 'failover' in rule_conf and 'exclude' in rule_conf: + raise ConfigError(f'Failover cannot be configured with exclude on load-balancing wan rule {rule_id}') - return None + if 'limit' in rule_conf: + if 'exclude' in rule_conf: + raise ConfigError(f'Limit cannot be configured with exclude on load-balancing wan rule {rule_id}') - # Create load-balance dir - if not os.path.isdir(load_balancing_dir): - os.mkdir(load_balancing_dir) + if 'rate' in rule_conf['limit'] and 'period' not in rule_conf['limit']: + raise ConfigError(f'Missing "limit period" on load-balancing wan rule {rule_id}') - render(load_balancing_conf_file, 'load-balancing/wlb.conf.j2', lb) + if 'period' in rule_conf['limit'] and 'rate' not in rule_conf['limit']: + raise ConfigError(f'Missing "limit rate" on load-balancing wan rule {rule_id}') - return None + for direction in ['source', 'destination']: + if direction in rule_conf: + if 'port' in rule_conf[direction]: + if 'protocol' not in rule_conf: + raise ConfigError(f'Protocol required to specify port on load-balancing wan rule {rule_id}') + + if rule_conf['protocol'] not in ['tcp', 'udp', 'tcp_udp']: + raise ConfigError(f'Protocol must be tcp, udp or tcp_udp to specify port on load-balancing wan rule {rule_id}') +def generate(lb): + return None def apply(lb): if not lb: - try: - cmd(f'systemctl stop {systemd_service}') - except Exception as e: - print(f"Error message: {e}") - + cmd(f'sudo systemctl stop {service}') else: - cmd('sudo sysctl -w net.netfilter.nf_conntrack_acct=1') - cmd(f'systemctl restart {systemd_service}') + cmd(f'sudo systemctl restart {service}') call_dependents() - return None - - if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py index 39803fa02..504b3e82a 100755 --- a/src/conf_mode/nat.py +++ b/src/conf_mode/nat.py @@ -17,6 +17,7 @@ import os from sys import exit +from pathlib import Path from vyos.base import Warning from vyos.config import Config @@ -26,10 +27,13 @@ from vyos.template import is_ip_network from vyos.utils.kernel import check_kmod from vyos.utils.dict import dict_search from vyos.utils.dict import dict_search_args +from vyos.utils.file import write_file from vyos.utils.process import cmd from vyos.utils.process import run +from vyos.utils.process import call from vyos.utils.network import is_addr_assigned from vyos.utils.network import interface_exists +from vyos.firewall import fqdn_config_parse from vyos import ConfigError from vyos import airbag @@ -39,6 +43,7 @@ k_mod = ['nft_nat', 'nft_chain_nat'] nftables_nat_config = '/run/nftables_nat.conf' nftables_static_nat_conf = '/run/nftables_static-nat-rules.nft' +domain_resolver_usage = '/run/use-vyos-domain-resolver-nat' valid_groups = [ 'address_group', @@ -71,6 +76,8 @@ def get_config(config=None): if 'dynamic_group' in nat['firewall_group']: del nat['firewall_group']['dynamic_group'] + fqdn_config_parse(nat, 'nat') + return nat def verify_rule(config, err_msg, groups_dict): @@ -251,6 +258,19 @@ def apply(nat): call_dependents() + # DOMAIN RESOLVER + if nat and 'deleted' not in nat: + domain_action = 'restart' + if nat['ip_fqdn'].items(): + text = f'# Automatically generated by nat.py\nThis file indicates that vyos-domain-resolver service is used by nat.\n' + write_file(domain_resolver_usage, text) + elif os.path.exists(domain_resolver_usage): + Path(domain_resolver_usage).unlink(missing_ok=True) + + if not Path('/run').glob('use-vyos-domain-resolver*'): + domain_action = 'stop' + call(f'systemctl {domain_action} vyos-domain-resolver.service') + return None if __name__ == '__main__': diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py index 95dfae3a5..c65950c9e 100755 --- a/src/conf_mode/nat66.py +++ b/src/conf_mode/nat66.py @@ -92,6 +92,10 @@ def verify(nat): if prefix != None: if not is_ipv6(prefix): raise ConfigError(f'{err_msg} source-prefix not specified') + + if 'destination' in config and 'group' in config['destination']: + if len({'address_group', 'network_group', 'domain_group'} & set(config['destination']['group'])) > 1: + raise ConfigError('Only one address-group, network-group or domain-group can be specified') if dict_search('destination.rule', nat): for rule, config in dict_search('destination.rule', nat).items(): diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py index 215b22b37..869518dd9 100755 --- a/src/conf_mode/pki.py +++ b/src/conf_mode/pki.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2024 VyOS maintainers and contributors +# Copyright (C) 2021-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -19,6 +19,7 @@ import os from sys import argv from sys import exit +from vyos.base import Message from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdep import set_dependents @@ -27,6 +28,9 @@ from vyos.configdict import node_changed from vyos.configdiff import Diff from vyos.configdiff import get_config_diff from vyos.defaults import directories +from vyos.defaults import internal_ports +from vyos.defaults import systemd_services +from vyos.pki import encode_certificate from vyos.pki import is_ca_certificate from vyos.pki import load_certificate from vyos.pki import load_public_key @@ -36,17 +40,22 @@ from vyos.pki import load_private_key from vyos.pki import load_crl from vyos.pki import load_dh_parameters from vyos.utils.boot import boot_configuration_complete +from vyos.utils.configfs import add_cli_node from vyos.utils.dict import dict_search from vyos.utils.dict import dict_search_args from vyos.utils.dict import dict_search_recursive +from vyos.utils.file import read_file +from vyos.utils.network import check_port_availability from vyos.utils.process import call from vyos.utils.process import cmd from vyos.utils.process import is_systemd_service_active +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError from vyos import airbag airbag.enable() vyos_certbot_dir = directories['certbot'] +vyos_ca_certificates_dir = directories['ca_certificates'] # keys to recursively search for under specified path sync_search = [ @@ -68,7 +77,7 @@ sync_search = [ }, { 'keys': ['certificate', 'ca_certificate'], - 'path': ['load_balancing', 'reverse_proxy'], + 'path': ['load_balancing', 'haproxy'], }, { 'keys': ['key'], @@ -124,8 +133,20 @@ def certbot_request(name: str, config: dict, dry_run: bool=True): f'--standalone --agree-tos --no-eff-email --expand --server {config["url"]} '\ f'--email {config["email"]} --key-type rsa --rsa-key-size {config["rsa_key_size"]} '\ f'{domains}' + + listen_address = None if 'listen_address' in config: - tmp += f' --http-01-address {config["listen_address"]}' + listen_address = config['listen_address'] + + # When ACME is used behind a reverse proxy, we always bind to localhost + # whatever the CLI listen-address is configured for. + if ('haproxy' in dict_search('used_by', config) and + is_systemd_service_running(systemd_services['haproxy']) and + not check_port_availability(listen_address, 80)): + tmp += f' --http-01-address 127.0.0.1 --http-01-port {internal_ports["certbot_haproxy"]}' + elif listen_address: + tmp += f' --http-01-address {listen_address}' + # verify() does not need to actually request a cert but only test for plausability if dry_run: tmp += ' --dry-run' @@ -146,35 +167,19 @@ def get_config(config=None): if len(argv) > 1 and argv[1] == 'certbot_renew': pki['certbot_renew'] = {} - tmp = node_changed(conf, base + ['ca'], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if tmp: - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({'ca' : tmp}) - - tmp = node_changed(conf, base + ['certificate'], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if tmp: - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({'certificate' : tmp}) - - tmp = node_changed(conf, base + ['dh'], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if tmp: - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({'dh' : tmp}) - - tmp = node_changed(conf, base + ['key-pair'], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if tmp: - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({'key_pair' : tmp}) - - tmp = node_changed(conf, base + ['openssh'], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if tmp: - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({'openssh' : tmp}) - tmp = node_changed(conf, base + ['openvpn', 'shared-secret'], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if tmp: - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({'openvpn' : tmp}) + # Walk through the list of sync_translate mapping and build a list + # which is later used to check if the node was changed in the CLI config + changed_keys = [] + for value in sync_translate.values(): + if value not in changed_keys: + changed_keys.append(value) + # Check for changes to said given keys in the CLI config + for key in changed_keys: + tmp = node_changed(conf, base + [key], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) + if 'changed' not in pki: + pki.update({'changed':{}}) + pki['changed'].update({key.replace('-', '_') : tmp}) # We only merge on the defaults of there is a configuration at all if conf.exists(base): @@ -235,8 +240,8 @@ def get_config(config=None): continue path = search['path'] - path_str = ' '.join(path + found_path) - #print(f'PKI: Updating config: {path_str} {item_name}') + path_str = ' '.join(path + found_path).replace('_','-') + Message(f'Updating configuration: "{path_str} {item_name}"') if path[0] == 'interfaces': ifname = found_path[0] @@ -246,6 +251,24 @@ def get_config(config=None): if not D.node_changed_presence(path): set_dependents(path[1], conf) + # Check PKI certificates if they are auto-generated by ACME. If they are, + # traverse the current configuration and determine the service where the + # certificate is used by. + # Required to check if we might need to run certbot behing a reverse proxy. + if 'certificate' in pki: + for name, cert_config in pki['certificate'].items(): + if 'acme' not in cert_config: + continue + if not dict_search('system.load_balancing.haproxy', pki): + continue + used_by = [] + for cert_list, _ in dict_search_recursive( + pki['system']['load_balancing']['haproxy'], 'certificate'): + if name in cert_list: + used_by.append('haproxy') + if used_by: + pki['certificate'][name]['acme'].update({'used_by': used_by}) + return pki def is_valid_certificate(raw_data): @@ -337,6 +360,15 @@ def verify(pki): raise ConfigError(f'An email address is required to request '\ f'certificate for "{name}" via ACME!') + listen_address = None + if 'listen_address' in cert_conf['acme']: + listen_address = cert_conf['acme']['listen_address'] + + if 'used_by' not in cert_conf['acme']: + if not check_port_availability(listen_address, 80): + raise ConfigError('Port 80 is already in use and not available '\ + f'to provide ACME challenge for "{name}"!') + if 'certbot_renew' not in pki: # Only run the ACME command if something on this entity changed, # as this is time intensive @@ -390,34 +422,65 @@ def verify(pki): for search in sync_search: for key in search['keys']: changed_key = sync_translate[key] - if changed_key not in pki['changed']: continue - for item_name in pki['changed'][changed_key]: node_present = False if changed_key == 'openvpn': node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name) else: node_present = dict_search_args(pki, changed_key, item_name) + # If the node is still present, we can skip the check + # as we are not deleting it + if node_present: + continue - if not node_present: - search_dict = dict_search_args(pki['system'], *search['path']) - - if not search_dict: - continue + search_dict = dict_search_args(pki['system'], *search['path']) + if not search_dict: + continue - for found_name, found_path in dict_search_recursive(search_dict, key): - if found_name == item_name: - path_str = " ".join(search['path'] + found_path) - raise ConfigError(f'PKI object "{item_name}" still in use by "{path_str}"') + for found_name, found_path in dict_search_recursive(search_dict, key): + # Check if the name matches either by string compare, or beeing + # part of a list + if ((isinstance(found_name, str) and found_name == item_name) or + (isinstance(found_name, list) and item_name in found_name)): + # We do not support _ in CLI paths - this is only a convenience + # as we mangle all - to _, now it's time to reverse this! + path_str = ' '.join(search['path'] + found_path).replace('_','-') + object = changed_key.replace('_','-') + tmp = f'Embedded PKI {object} with name "{item_name}" is still '\ + f'in use by CLI path "{path_str}"' + raise ConfigError(tmp) return None +def cleanup_system_ca(): + if not os.path.exists(vyos_ca_certificates_dir): + os.mkdir(vyos_ca_certificates_dir) + else: + for filename in os.listdir(vyos_ca_certificates_dir): + full_path = os.path.join(vyos_ca_certificates_dir, filename) + if os.path.isfile(full_path): + os.unlink(full_path) + def generate(pki): if not pki: + cleanup_system_ca() return None + # Create or cleanup CA install directory + if 'changed' in pki and 'ca' in pki['changed']: + cleanup_system_ca() + + if 'ca' in pki: + for ca, ca_conf in pki['ca'].items(): + if 'system_install' in ca_conf: + ca_obj = load_certificate(ca_conf['certificate']) + ca_path = os.path.join(vyos_ca_certificates_dir, f'{ca}.crt') + + with open(ca_path, 'w') as f: + f.write(encode_certificate(ca_obj)) + # Certbot renewal only needs to re-trigger the services to load up the # new PEM file if 'certbot_renew' in pki: @@ -433,22 +496,58 @@ def generate(pki): for name, cert_conf in pki['certificate'].items(): if 'acme' in cert_conf: certbot_list.append(name) - # generate certificate if not found on disk + # There is no ACME/certbot managed certificate presend on the + # system, generate it if name not in certbot_list_on_disk: certbot_request(name, cert_conf['acme'], dry_run=False) + # Now that the certificate was properly generated we have + # the PEM files on disk. We need to add the certificate to + # certbot_list_on_disk to automatically import the CA chain + certbot_list_on_disk.append(name) + # We alredy had an ACME managed certificate on the system, but + # something changed in the configuration elif changed_certificates != None and name in changed_certificates: - # when something for the certificate changed, we should delete it + # Delete old ACME certificate first if name in certbot_list_on_disk: certbot_delete(name) + # Request new certificate via certbot certbot_request(name, cert_conf['acme'], dry_run=False) # Cleanup certbot configuration and certificates if no longer in use by CLI # Get foldernames under vyos_certbot_dir which each represent a certbot cert if os.path.exists(f'{vyos_certbot_dir}/live'): for cert in certbot_list_on_disk: + # ACME certificate is no longer in use by CLI remove it if cert not in certbot_list: - # certificate is no longer active on the CLI - remove it certbot_delete(cert) + continue + # ACME not enabled for individual certificate - bail out early + if 'acme' not in pki['certificate'][cert]: + continue + + # Read in ACME certificate chain information + tmp = read_file(f'{vyos_certbot_dir}/live/{cert}/chain.pem') + tmp = load_certificate(tmp, wrap_tags=False) + cert_chain_base64 = "".join(encode_certificate(tmp).strip().split("\n")[1:-1]) + + # Check if CA chain certificate is already present on CLI to avoid adding + # a duplicate. This only checks for manual added CA certificates and not + # auto added ones with the AUTOCHAIN_ prefix + autochain_prefix = 'AUTOCHAIN_' + ca_cert_present = False + if 'ca' in pki: + for ca_base64, cli_path in dict_search_recursive(pki['ca'], 'certificate'): + # Ignore automatic added CA certificates + if any(item.startswith(autochain_prefix) for item in cli_path): + continue + if cert_chain_base64 == ca_base64: + ca_cert_present = True + + if not ca_cert_present: + tmp = dict_search_args(pki, 'ca', f'{autochain_prefix}{cert}', 'certificate') + if not bool(tmp) or tmp != cert_chain_base64: + Message(f'Add/replace automatically imported CA certificate for "{cert}"...') + add_cli_node(['pki', 'ca', f'{autochain_prefix}{cert}', 'certificate'], value=cert_chain_base64) return None @@ -456,6 +555,7 @@ def apply(pki): systemd_certbot_name = 'certbot.timer' if not pki: call(f'systemctl stop {systemd_certbot_name}') + call('update-ca-certificates') return None has_certbot = False @@ -473,6 +573,10 @@ def apply(pki): if 'changed' in pki: call_dependents() + # Rebuild ca-certificates bundle + if 'ca' in pki['changed']: + call('update-ca-certificates') + return None if __name__ == '__main__': diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py index a5963e72c..a90e33e81 100755 --- a/src/conf_mode/policy.py +++ b/src/conf_mode/policy.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2022 VyOS maintainers and contributors +# Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -17,16 +17,16 @@ from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import frr_protocols +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag - airbag.enable() - def community_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in community and large community sections @@ -87,31 +87,27 @@ def get_config(config=None): else: conf = Config() - base = ['policy'] - policy = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['protocols'], key_mangling=('-', '_'), - no_tag_node_value_mangle=True) - # Merge policy dict into "regular" config dict - policy = dict_merge(tmp, policy) - return policy - - -def verify(policy): - if not policy: + return get_frrender_dict(conf) + + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'policy'): return None - for policy_type in ['access_list', 'access_list6', 'as_path_list', - 'community_list', 'extcommunity_list', - 'large_community_list', - 'prefix_list', 'prefix_list6', 'route_map']: + policy_types = ['access_list', 'access_list6', 'as_path_list', + 'community_list', 'extcommunity_list', + 'large_community_list', 'prefix_list', + 'prefix_list6', 'route_map'] + + policy = config_dict['policy'] + for protocol in frr_protocols: + if protocol not in config_dict: + continue + if 'protocol' not in policy: + policy.update({'protocol': {}}) + policy['protocol'].update({protocol : config_dict[protocol]}) + + for policy_type in policy_types: # Bail out early and continue with next policy type if policy_type not in policy: continue @@ -246,72 +242,36 @@ def verify(policy): # When the "routing policy" changes and policies, route-maps etc. are deleted, # it is our responsibility to verify that the policy can not be deleted if it # is used by any routing protocol - if 'protocols' in policy: - for policy_type in ['access_list', 'access_list6', 'as_path_list', - 'community_list', - 'extcommunity_list', 'large_community_list', - 'prefix_list', 'route_map']: - if policy_type in policy: - for policy_name in list(set(routing_policy_find(policy_type, - policy[ - 'protocols']))): - found = False - if policy_name in policy[policy_type]: - found = True - # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related - # list - we need to go the extra mile here and check both prefix-lists - if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ - policy['prefix_list6']: - found = True - if not found: - tmp = policy_type.replace('_', '-') - raise ConfigError( - f'Can not delete {tmp} "{policy_name}", still in use!') + # Check if any routing protocol is activated + if 'protocol' in policy: + for policy_type in policy_types: + for policy_name in list(set(routing_policy_find(policy_type, policy['protocol']))): + found = False + if policy_type in policy and policy_name in policy[policy_type]: + found = True + # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related + # list - we need to go the extra mile here and check both prefix-lists + if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ + policy['prefix_list6']: + found = True + if not found: + tmp = policy_type.replace('_', '-') + raise ConfigError( + f'Can not delete {tmp} "{policy_name}", still in use!') return None -def generate(policy): - if not policy: - return None - policy['new_frr_config'] = render_to_string('frr/policy.frr.j2', policy) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None - -def apply(policy): - bgp_daemon = 'bgpd' - zebra_daemon = 'zebra' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(bgp_daemon) - frr_cfg.modify_section(r'^bgp as-path access-list .*') - frr_cfg.modify_section(r'^bgp community-list .*') - frr_cfg.modify_section(r'^bgp extcommunity-list .*') - frr_cfg.modify_section(r'^bgp large-community-list .*') - frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', - remove_stop_mark=True) - if 'new_frr_config' in policy: - frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(bgp_daemon) - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'^access-list .*') - frr_cfg.modify_section(r'^ipv6 access-list .*') - frr_cfg.modify_section(r'^ip prefix-list .*') - frr_cfg.modify_section(r'^ipv6 prefix-list .*') - frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', - remove_stop_mark=True) - if 'new_frr_config' in policy: - frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(zebra_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None - if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/policy_local-route.py b/src/conf_mode/policy_local-route.py index 331fd972d..9be2bc227 100755 --- a/src/conf_mode/policy_local-route.py +++ b/src/conf_mode/policy_local-route.py @@ -54,6 +54,7 @@ def get_config(config=None): dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address']) dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port']) table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) + vrf = leaf_node_changed(conf, base_rule + [rule, 'set', 'vrf']) proto = leaf_node_changed(conf, base_rule + [rule, 'protocol']) rule_def = {} if src: @@ -70,6 +71,8 @@ def get_config(config=None): rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def) if table: rule_def = dict_merge({'table' : table}, rule_def) + if vrf: + rule_def = dict_merge({'vrf' : vrf}, rule_def) if proto: rule_def = dict_merge({'protocol' : proto}, rule_def) dict = dict_merge({dict_id : {rule : rule_def}}, dict) @@ -90,6 +93,7 @@ def get_config(config=None): dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address']) dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port']) table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) + vrf = leaf_node_changed(conf, base_rule + [rule, 'set', 'vrf']) proto = leaf_node_changed(conf, base_rule + [rule, 'protocol']) # keep track of changes in configuration # otherwise we might remove an existing node although nothing else has changed @@ -179,6 +183,15 @@ def get_config(config=None): if len(table) > 0: rule_def = dict_merge({'table' : table}, rule_def) + # vrf + if vrf is None: + if 'set' in rule_config and 'vrf' in rule_config['set']: + rule_def = dict_merge({'vrf': [rule_config['set']['vrf']]}, rule_def) + else: + changed = True + if len(vrf) > 0: + rule_def = dict_merge({'vrf' : vrf}, rule_def) + # protocol if proto is None: if 'protocol' in rule_config: @@ -218,8 +231,15 @@ def verify(pbr): ): raise ConfigError('Source or destination address or fwmark or inbound-interface or protocol is required!') - if 'set' not in pbr_route['rule'][rule] or 'table' not in pbr_route['rule'][rule]['set']: - raise ConfigError('Table set is required!') + if 'set' not in pbr_route['rule'][rule]: + raise ConfigError('Either set table or set vrf is required!') + + set_tgts = pbr_route['rule'][rule]['set'] + if 'table' not in set_tgts and 'vrf' not in set_tgts: + raise ConfigError('Either set table or set vrf is required!') + + if 'table' in set_tgts and 'vrf' in set_tgts: + raise ConfigError('set table and set vrf cannot both be set!') if 'inbound_interface' in pbr_route['rule'][rule]: interface = pbr_route['rule'][rule]['inbound_interface'] @@ -250,11 +270,14 @@ def apply(pbr): fwmark = rule_config.get('fwmark', ['']) inbound_interface = rule_config.get('inbound_interface', ['']) protocol = rule_config.get('protocol', ['']) - table = rule_config.get('table', ['']) + # VRF 'default' is actually table 'main' for RIB rules + vrf = [ 'main' if x == 'default' else x for x in rule_config.get('vrf', ['']) ] + # See generate section below for table/vrf overlap explanation + table_or_vrf = rule_config.get('table', vrf) - for src, dst, src_port, dst_port, fwmk, iif, proto, table in product( + for src, dst, src_port, dst_port, fwmk, iif, proto, table_or_vrf in product( source, destination, source_port, destination_port, - fwmark, inbound_interface, protocol, table): + fwmark, inbound_interface, protocol, table_or_vrf): f_src = '' if src == '' else f' from {src} ' f_src_port = '' if src_port == '' else f' sport {src_port} ' f_dst = '' if dst == '' else f' to {dst} ' @@ -262,7 +285,7 @@ def apply(pbr): f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} ' f_iif = '' if iif == '' else f' iif {iif} ' f_proto = '' if proto == '' else f' ipproto {proto} ' - f_table = '' if table == '' else f' lookup {table} ' + f_table = '' if table_or_vrf == '' else f' lookup {table_or_vrf} ' call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif}{f_table}') @@ -276,7 +299,13 @@ def apply(pbr): if 'rule' in pbr_route: for rule, rule_config in pbr_route['rule'].items(): - table = rule_config['set'].get('table', '') + # VRFs get configred as route table alias names for iproute2 and only + # one 'set' can get past validation. Either can be fed to lookup. + vrf = rule_config['set'].get('vrf', '') + if vrf == 'default': + table_or_vrf = 'main' + else: + table_or_vrf = rule_config['set'].get('table', vrf) source = rule_config.get('source', {}).get('address', ['all']) source_port = rule_config.get('source', {}).get('port', '') destination = rule_config.get('destination', {}).get('address', ['all']) @@ -295,7 +324,7 @@ def apply(pbr): f_iif = f' iif {inbound_interface} ' if inbound_interface else '' f_proto = f' ipproto {protocol} ' if protocol else '' - call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table}') + call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table_or_vrf}') return None diff --git a/src/conf_mode/policy_route.py b/src/conf_mode/policy_route.py index 223175b8a..521764896 100755 --- a/src/conf_mode/policy_route.py +++ b/src/conf_mode/policy_route.py @@ -21,13 +21,16 @@ from sys import exit from vyos.base import Warning from vyos.config import Config +from vyos.configdiff import get_config_diff, Diff from vyos.template import render from vyos.utils.dict import dict_search_args +from vyos.utils.dict import dict_search_recursive from vyos.utils.process import cmd from vyos.utils.process import run from vyos.utils.network import get_vrf_tableid from vyos.defaults import rt_global_table from vyos.defaults import rt_global_vrf +from vyos.firewall import geoip_update from vyos import ConfigError from vyos import airbag airbag.enable() @@ -43,6 +46,43 @@ valid_groups = [ 'interface_group' ] +def geoip_updated(conf, policy): + diff = get_config_diff(conf) + node_diff = diff.get_child_nodes_diff(['policy'], expand_nodes=Diff.DELETE, recursive=True) + + out = { + 'name': [], + 'ipv6_name': [], + 'deleted_name': [], + 'deleted_ipv6_name': [] + } + updated = False + + for key, path in dict_search_recursive(policy, 'geoip'): + set_name = f'GEOIP_CC_{path[0]}_{path[1]}_{path[3]}' + if (path[0] == 'route'): + out['name'].append(set_name) + elif (path[0] == 'route6'): + set_name = f'GEOIP_CC6_{path[0]}_{path[1]}_{path[3]}' + out['ipv6_name'].append(set_name) + + updated = True + + if 'delete' in node_diff: + for key, path in dict_search_recursive(node_diff['delete'], 'geoip'): + set_name = f'GEOIP_CC_{path[0]}_{path[1]}_{path[3]}' + if (path[0] == 'route'): + out['deleted_name'].append(set_name) + elif (path[0] == 'route6'): + set_name = f'GEOIP_CC6_{path[0]}_{path[1]}_{path[3]}' + out['deleted_ipv6_name'].append(set_name) + updated = True + + if updated: + return out + + return False + def get_config(config=None): if config: conf = config @@ -60,6 +100,7 @@ def get_config(config=None): if 'dynamic_group' in policy['firewall_group']: del policy['firewall_group']['dynamic_group'] + policy['geoip_updated'] = geoip_updated(conf, policy) return policy def verify_rule(policy, name, rule_conf, ipv6, rule_id): @@ -203,6 +244,12 @@ def apply(policy): apply_table_marks(policy) + if policy['geoip_updated']: + # Call helper script to Update set contents + if 'name' in policy['geoip_updated'] or 'ipv6_name' in policy['geoip_updated']: + print('Updating GeoIP. Please wait...') + geoip_update(policy=policy) + return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_babel.py b/src/conf_mode/protocols_babel.py index 90b6e4a31..80a847af8 100755 --- a/src/conf_mode/protocols_babel.py +++ b/src/conf_mode/protocols_babel.py @@ -17,15 +17,14 @@ from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -34,46 +33,16 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'babel'] - babel = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - babel['interface_removed'] = list(interfaces_removed) + return get_frrender_dict(conf) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - babel.update({'deleted' : ''}) - return babel - - # We have gathered the dict representation of the CLI, but there are default - # values which we need to update into the dictionary retrieved. - default_values = conf.get_config_defaults(base, key_mangling=('-', '_'), - get_first_key=True, - recursive=True) - - # merge in default values - babel = config_dict_merge(default_values, babel) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - babel = dict_merge(tmp, babel) - return babel - -def verify(babel): - if not babel: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'babel'): return None + babel = config_dict['babel'] + babel['policy'] = config_dict['policy'] + # verify distribute_list if "distribute_list" in babel: acl_keys = { @@ -120,32 +89,14 @@ def verify(babel): verify_prefix_list(prefix_list, babel, version='6' if address_family == 'ipv6' else '') -def generate(babel): - if not babel or 'deleted' in babel: - return None - - babel['new_frr_config'] = render_to_string('frr/babeld.frr.j2', babel) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(babel): - babel_daemon = 'babeld' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(babel_daemon) - frr_cfg.modify_section('^router babel', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in babel: - continue - for interface in babel[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in babel: - frr_cfg.add_before(frr.default_add_before, babel['new_frr_config']) - frr_cfg.commit_configuration(babel_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index 1361bb1a9..d3bc3e961 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -16,11 +16,13 @@ from vyos.config import Config from vyos.configverify import verify_vrf +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.template import is_ipv6 -from vyos.template import render_to_string from vyos.utils.network import is_ipv6_link_local +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -29,22 +31,14 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'bfd'] - bfd = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - return bfd - bfd = conf.merge_defaults(bfd, recursive=True) + return get_frrender_dict(conf) - return bfd - -def verify(bfd): - if not bfd: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'bfd'): return None + bfd = config_dict['bfd'] if 'peer' in bfd: for peer, peer_config in bfd['peer'].items(): # IPv6 link local peers require an explicit local address/interface @@ -83,22 +77,13 @@ def verify(bfd): return None -def generate(bfd): - if not bfd: - return None - bfd['new_frr_config'] = render_to_string('frr/bfdd.frr.j2', bfd) - -def apply(bfd): - bfd_daemon = 'bfdd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(bfd_daemon) - frr_cfg.modify_section('^bfd', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in bfd: - frr_cfg.add_before(frr.default_add_before, bfd['new_frr_config']) - frr_cfg.commit_configuration(bfd_daemon) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index 22f020099..99d8eb9d1 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -19,21 +19,20 @@ from sys import argv from vyos.base import Warning from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_prefix_list from vyos.configverify import verify_route_map from vyos.configverify import verify_vrf +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.template import is_ip from vyos.template import is_interface -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_vrf from vyos.utils.network import is_addr_assigned +from vyos.utils.process import is_systemd_service_running from vyos.utils.process import process_named_running -from vyos.utils.process import call from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -43,68 +42,7 @@ def get_config(config=None): else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'bgp'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path - bgp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - - bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], - key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Remove per interface MPLS configuration - get a list if changed - # nodes under the interface tagNode - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - bgp['interface_removed'] = list(interfaces_removed) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: - bgp.update({'vrf' : vrf}) - # We can not delete the BGP VRF instance if there is a L3VNI configured - # FRR L3VNI must be deleted first otherwise we will see error: - # "FRR error: Please unconfigure l3vni 3000" - tmp = ['vrf', 'name', vrf, 'vni'] - if conf.exists_effective(tmp): - bgp.update({'vni' : conf.return_effective_value(tmp)}) - # We can safely delete ourself from the dependent vrf list - if vrf in bgp['dependent_vrfs']: - del bgp['dependent_vrfs'][vrf] - - bgp['dependent_vrfs'].update({'default': {'protocols': { - 'bgp': conf.get_config_dict(base_path, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True)}}}) - - if not conf.exists(base): - # If bgp instance is deleted then mark it - bgp.update({'deleted' : ''}) - return bgp - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - bgp = conf.merge_defaults(bgp, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - bgp = dict_merge(tmp, bgp) - - return bgp - + return get_frrender_dict(conf, argv) def verify_vrf_as_import(search_vrf_name: str, afi_name: str, vrfs_config: dict) -> bool: """ @@ -237,13 +175,24 @@ def verify_afi(peer_config, bgp_config): if tmp: return True return False -def verify(bgp): +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'bgp'): + return None + + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] + + # eqivalent of the C foo ? 'a' : 'b' statement + bgp = vrf and config_dict['vrf']['name'][vrf]['protocols']['bgp'] or config_dict['bgp'] + bgp['policy'] = config_dict['policy'] + if 'deleted' in bgp: - if 'vrf' in bgp: + if vrf: # Cannot delete vrf if it exists in import vrf list in other vrfs for tmp_afi in ['ipv4_unicast', 'ipv6_unicast']: - if verify_vrf_as_import(bgp['vrf'], tmp_afi, bgp['dependent_vrfs']): - raise ConfigError(f'Cannot delete VRF instance "{bgp["vrf"]}", ' \ + if verify_vrf_as_import(vrf, tmp_afi, bgp['dependent_vrfs']): + raise ConfigError(f'Cannot delete VRF instance "{vrf}", ' \ 'unconfigure "import vrf" commands!') else: # We are running in the default VRF context, thus we can not delete @@ -252,8 +201,9 @@ def verify(bgp): for vrf, vrf_options in bgp['dependent_vrfs'].items(): if vrf != 'default': if dict_search('protocols.bgp', vrf_options): - raise ConfigError('Cannot delete default BGP instance, ' \ - 'dependent VRF instance(s) exist(s)!') + dependent_vrfs = ', '.join(bgp['dependent_vrfs'].keys()) + raise ConfigError(f'Cannot delete default BGP instance, ' \ + f'dependent VRF instance(s): {dependent_vrfs}') if 'vni' in vrf_options: raise ConfigError('Cannot delete default BGP instance, ' \ 'dependent L3VNI exists!') @@ -281,9 +231,8 @@ def verify(bgp): for interface in bgp['interface']: error_msg = f'Interface "{interface}" belongs to different VRF instance' tmp = get_interface_vrf(interface) - if 'vrf' in bgp: - if bgp['vrf'] != tmp: - vrf = bgp['vrf'] + if vrf: + if vrf != tmp: raise ConfigError(f'{error_msg} "{vrf}"!') elif tmp != 'default': raise ConfigError(f'{error_msg} "{tmp}"!') @@ -384,10 +333,8 @@ def verify(bgp): # Only checks for ipv4 and ipv6 neighbors # Check if neighbor address is assigned as system interface address - vrf = None vrf_error_msg = f' in default VRF!' - if 'vrf' in bgp: - vrf = bgp['vrf'] + if vrf: vrf_error_msg = f' in VRF "{vrf}"!' if is_ip(peer) and is_addr_assigned(peer, vrf): @@ -466,15 +413,19 @@ def verify(bgp): verify_route_map(afi_config['route_map'][tmp], bgp) if 'route_reflector_client' in afi_config: - peer_group_as = peer_config.get('remote_as') + peer_as = peer_config.get('remote_as') - if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): + if peer_as is not None and (peer_as != 'internal' and peer_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') else: + # Check into the peer group for the remote as, if we are in a peer group, check in peer itself if 'peer_group' in peer_config: peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', bgp) - if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): - raise ConfigError('route-reflector-client only supported for iBGP peers') + elif neighbor == 'peer_group': + peer_group_as = peer_config.get('remote_as') + + if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): + raise ConfigError('route-reflector-client only supported for iBGP peers') # T5833 not all AFIs are supported for VRF if 'vrf' in bgp and 'address_family' in peer_config: @@ -529,7 +480,7 @@ def verify(bgp): f'{afi} administrative distance {key}!') if afi in ['ipv4_unicast', 'ipv6_unicast']: - vrf_name = bgp['vrf'] if dict_search('vrf', bgp) else 'default' + vrf_name = vrf if vrf else 'default' # Verify if currant VRF contains rd and route-target options # and does not exist in import list in other VRFs if dict_search(f'rd.vpn.export', afi_config): @@ -576,12 +527,21 @@ def verify(bgp): raise ConfigError( 'Please unconfigure import vrf commands before using vpn commands in dependent VRFs!') + if (dict_search('route_map.vrf.import', afi_config) is not None + or dict_search('import.vrf', afi_config) is not None): # FRR error: please unconfigure vpn to vrf commands before # using import vrf commands - if 'vpn' in afi_config['import'] or dict_search('export.vpn', afi_config) != None: + if (dict_search('import.vpn', afi_config) is not None + or dict_search('export.vpn', afi_config) is not None): raise ConfigError('Please unconfigure VPN to VRF commands before '\ 'using "import vrf" commands!') + if (dict_search('route_map.vpn.import', afi_config) is not None + or dict_search('route_map.vpn.export', afi_config) is not None) : + raise ConfigError('Please unconfigure route-map VPN to VRF commands before '\ + 'using "import vrf" commands!') + + # Verify that the export/import route-maps do exist for export_import in ['export', 'import']: tmp = dict_search(f'route_map.vpn.{export_import}', afi_config) @@ -602,46 +562,14 @@ def verify(bgp): return None -def generate(bgp): - if not bgp or 'deleted' in bgp: - return None - - bgp['frr_bgpd_config'] = render_to_string('frr/bgpd.frr.j2', bgp) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(bgp): - if 'deleted' in bgp: - # We need to ensure that the L3VNI is deleted first. - # This is not possible with old config backend - # priority bug - if {'vrf', 'vni'} <= set(bgp): - call('vtysh -c "conf t" -c "vrf {vrf}" -c "no vni {vni}"'.format(**bgp)) - - bgp_daemon = 'bgpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in bgp: - vrf = ' vrf ' + bgp['vrf'] - - frr_cfg.load_configuration(bgp_daemon) - - # Remove interface specific config - for key in ['interface', 'interface_removed']: - if key not in bgp: - continue - for interface in bgp[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - frr_cfg.modify_section(f'^router bgp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_bgpd_config' in bgp: - frr_cfg.add_before(frr.default_add_before, bgp['frr_bgpd_config']) - frr_cfg.commit_configuration(bgp_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_eigrp.py b/src/conf_mode/protocols_eigrp.py index c13e52a3d..324ff883f 100755 --- a/src/conf_mode/protocols_eigrp.py +++ b/src/conf_mode/protocols_eigrp.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2022 VyOS maintainers and contributors +# Copyright (C) 2022-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -18,94 +18,49 @@ from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_vrf -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() - def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'eigrp'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'eigrp'] or base_path - eigrp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) + return get_frrender_dict(conf, argv) - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: eigrp.update({'vrf' : vrf}) - - if not conf.exists(base): - eigrp.update({'deleted' : ''}) - if not vrf: - # We are running in the default VRF context, thus we can not delete - # our main EIGRP instance if there are dependent EIGRP VRF instances. - eigrp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], - key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - return eigrp - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - eigrp = dict_merge(tmp, eigrp) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'eigrp'): + return None - return eigrp + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] -def verify(eigrp): - if not eigrp or 'deleted' in eigrp: - return + # eqivalent of the C foo ? 'a' : 'b' statement + eigrp = vrf and config_dict['vrf']['name'][vrf]['protocols']['eigrp'] or config_dict['eigrp'] + eigrp['policy'] = config_dict['policy'] if 'system_as' not in eigrp: raise ConfigError('EIGRP system-as must be defined!') - if 'vrf' in eigrp: - verify_vrf(eigrp) - -def generate(eigrp): - if not eigrp or 'deleted' in eigrp: - return None - - eigrp['frr_eigrpd_config'] = render_to_string('frr/eigrpd.frr.j2', eigrp) + if vrf: + verify_vrf({'vrf': vrf}) -def apply(eigrp): - eigrp_daemon = 'eigrpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in eigrp: - vrf = ' vrf ' + eigrp['vrf'] - - frr_cfg.load_configuration(eigrp_daemon) - frr_cfg.modify_section(f'^router eigrp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_eigrpd_config' in eigrp: - frr_cfg.add_before(frr.default_add_before, eigrp['frr_eigrpd_config']) - frr_cfg.commit_configuration(eigrp_daemon) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index ba2f3cf0d..1c994492e 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -18,16 +18,16 @@ from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_interface_exists +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -37,54 +37,21 @@ def get_config(config=None): else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf, argv) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'isis'): + return None - base_path = ['protocols', 'isis'] + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path - isis = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: isis['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - isis['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - isis.update({'deleted' : ''}) - return isis - - # merge in default values - isis = conf.merge_defaults(isis, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - isis = dict_merge(tmp, isis) - - return isis - -def verify(isis): - # bail out early - looks like removal from running config - if not isis or 'deleted' in isis: + isis = vrf and config_dict['vrf']['name'][vrf]['protocols']['isis'] or config_dict['isis'] + isis['policy'] = config_dict['policy'] + + if 'deleted' in isis: return None if 'net' not in isis: @@ -114,12 +81,11 @@ def verify(isis): f'Recommended area lsp-mtu {recom_area_mtu} or less ' \ '(calculated on MTU size).') - if 'vrf' in isis: + if vrf: # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - vrf = isis['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') @@ -266,39 +232,14 @@ def verify(isis): return None -def generate(isis): - if not isis or 'deleted' in isis: - return None - - isis['frr_isisd_config'] = render_to_string('frr/isisd.frr.j2', isis) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(isis): - isis_daemon = 'isisd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in isis: - vrf = ' vrf ' + isis['vrf'] - - frr_cfg.load_configuration(isis_daemon) - frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in isis: - continue - for interface in isis[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_isisd_config' in isis: - frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config']) - - frr_cfg.commit_configuration(isis_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py index ad164db9f..33d9a6dae 100755 --- a/src/conf_mode/protocols_mpls.py +++ b/src/conf_mode/protocols_mpls.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020-2022 VyOS maintainers and contributors +# Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -20,33 +20,32 @@ from sys import exit from glob import glob from vyos.config import Config -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search from vyos.utils.file import read_file +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write from vyos.configverify import verify_interface_exists from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() -config_file = r'/tmp/ldpd.frr' - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'mpls'] - mpls = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - return mpls + return get_frrender_dict(conf) -def verify(mpls): - # If no config, then just bail out early. - if not mpls: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'mpls'): return None + mpls = config_dict['mpls'] + if 'interface' in mpls: for interface in mpls['interface']: verify_interface_exists(mpls, interface) @@ -68,26 +67,19 @@ def verify(mpls): return None -def generate(mpls): - # If there's no MPLS config generated, create dictionary key with no value. - if not mpls or 'deleted' in mpls: - return None - - mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.j2', mpls) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(mpls): - ldpd_damon = 'ldpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() - frr_cfg.load_configuration(ldpd_damon) - frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True) + if not has_frr_protocol_in_dict(config_dict, 'mpls'): + return None - if 'frr_ldpd_config' in mpls: - frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config']) - frr_cfg.commit_configuration(ldpd_damon) + mpls = config_dict['mpls'] # Set number of entries in the platform label tables labels = '0' diff --git a/src/conf_mode/protocols_nhrp.py b/src/conf_mode/protocols_nhrp.py index 0bd68b7d8..ac92c9d99 100755 --- a/src/conf_mode/protocols_nhrp.py +++ b/src/conf_mode/protocols_nhrp.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2024 VyOS maintainers and contributors +# Copyright (C) 2021-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -14,95 +14,112 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os +from sys import exit +from sys import argv +import ipaddress from vyos.config import Config -from vyos.configdict import node_changed from vyos.template import render +from vyos.configverify import has_frr_protocol_in_dict from vyos.utils.process import run +from vyos.utils.dict import dict_search from vyos import ConfigError from vyos import airbag +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict +from vyos.utils.process import is_systemd_service_running + airbag.enable() -opennhrp_conf = '/run/opennhrp/opennhrp.conf' +nflog_redirect = 1 +nflog_multicast = 2 nhrp_nftables_conf = '/run/nftables_nhrp.conf' + def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'nhrp'] - - nhrp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - nhrp['del_tunnels'] = node_changed(conf, base + ['tunnel']) - - if not conf.exists(base): - return nhrp - nhrp['if_tunnel'] = conf.get_config_dict(['interfaces', 'tunnel'], key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) + return get_frrender_dict(conf, argv) - nhrp['profile_map'] = {} - profile = conf.get_config_dict(['vpn', 'ipsec', 'profile'], key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - for name, profile_conf in profile.items(): - if 'bind' in profile_conf and 'tunnel' in profile_conf['bind']: - interfaces = profile_conf['bind']['tunnel'] - if isinstance(interfaces, str): - interfaces = [interfaces] - for interface in interfaces: - nhrp['profile_map'][interface] = name - - return nhrp - -def verify(nhrp): - if 'tunnel' in nhrp: - for name, nhrp_conf in nhrp['tunnel'].items(): - if not nhrp['if_tunnel'] or name not in nhrp['if_tunnel']: +def verify(config_dict): + if not config_dict or 'deleted' in config_dict: + return None + if 'tunnel' in config_dict: + for name, nhrp_conf in config_dict['tunnel'].items(): + if not config_dict['if_tunnel'] or name not in config_dict['if_tunnel']: raise ConfigError(f'Tunnel interface "{name}" does not exist') - tunnel_conf = nhrp['if_tunnel'][name] + tunnel_conf = config_dict['if_tunnel'][name] + if 'address' in tunnel_conf: + address_list = dict_search('address', tunnel_conf) + for tunip in address_list: + if ipaddress.ip_network(tunip, + strict=False).prefixlen != 32: + raise ConfigError( + f'Tunnel {name} is used for NHRP, Netmask should be /32!') if 'encapsulation' not in tunnel_conf or tunnel_conf['encapsulation'] != 'gre': raise ConfigError(f'Tunnel "{name}" is not an mGRE tunnel') + if 'network_id' not in nhrp_conf: + raise ConfigError(f'network-id is not specified in tunnel "{name}"') + if 'remote' in tunnel_conf: raise ConfigError(f'Tunnel "{name}" cannot have a remote address defined') - if 'map' in nhrp_conf: - for map_name, map_conf in nhrp_conf['map'].items(): - if 'nbma_address' not in map_conf: + map_tunnelip = dict_search('map.tunnel_ip', nhrp_conf) + if map_tunnelip: + for map_name, map_conf in map_tunnelip.items(): + if 'nbma' not in map_conf: raise ConfigError(f'nbma-address missing on map {map_name} on tunnel {name}') - if 'dynamic_map' in nhrp_conf: - for map_name, map_conf in nhrp_conf['dynamic_map'].items(): - if 'nbma_domain_name' not in map_conf: - raise ConfigError(f'nbma-domain-name missing on dynamic-map {map_name} on tunnel {name}') + nhs_tunnelip = dict_search('nhs.tunnel_ip', nhrp_conf) + nbma_list = [] + if nhs_tunnelip: + for nhs_name, nhs_conf in nhs_tunnelip.items(): + if 'nbma' not in nhs_conf: + raise ConfigError(f'nbma-address missing on map nhs {nhs_name} on tunnel {name}') + if nhs_name != 'dynamic': + if len(list(dict_search('nbma', nhs_conf))) > 1: + raise ConfigError( + f'Static nhs tunnel-ip {nhs_name} cannot contain multiple nbma-addresses') + for nbma_ip in dict_search('nbma', nhs_conf): + if nbma_ip not in nbma_list: + nbma_list.append(nbma_ip) + else: + raise ConfigError( + f'Nbma address {nbma_ip} cannot be maped to several tunnel-ip') return None -def generate(nhrp): - if not os.path.exists(nhrp_nftables_conf): - nhrp['first_install'] = True - render(opennhrp_conf, 'nhrp/opennhrp.conf.j2', nhrp) - render(nhrp_nftables_conf, 'nhrp/nftables.conf.j2', nhrp) +def generate(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'nhrp'): + return None + + if 'deleted' in config_dict['nhrp']: + return None + render(nhrp_nftables_conf, 'frr/nhrpd_nftables.conf.j2', config_dict['nhrp']) + + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(nhrp): + +def apply(config_dict): + nft_rc = run(f'nft --file {nhrp_nftables_conf}') if nft_rc != 0: raise ConfigError('Failed to apply NHRP tunnel firewall rules') - action = 'restart' if nhrp and 'tunnel' in nhrp else 'stop' - service_rc = run(f'systemctl {action} opennhrp.service') - if service_rc != 0: - raise ConfigError(f'Failed to {action} the NHRP service') - + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None + if __name__ == '__main__': try: c = get_config() @@ -112,3 +129,4 @@ if __name__ == '__main__': except ConfigError as e: print(e) exit(1) + diff --git a/src/conf_mode/protocols_openfabric.py b/src/conf_mode/protocols_openfabric.py index 8e8c50c06..7df11fb20 100644 --- a/src/conf_mode/protocols_openfabric.py +++ b/src/conf_mode/protocols_openfabric.py @@ -18,13 +18,13 @@ from sys import exit from vyos.base import Warning from vyos.config import Config -from vyos.configdict import node_changed from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.utils.process import is_systemd_service_running +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos import ConfigError -from vyos import frr from vyos import airbag - airbag.enable() def get_config(config=None): @@ -33,32 +33,14 @@ def get_config(config=None): else: conf = Config() - base_path = ['protocols', 'openfabric'] - - openfabric = conf.get_config_dict(base_path, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Remove per domain MPLS configuration - get a list of all changed Openfabric domains - # (removed and added) so that they will be properly rendered for the FRR config. - openfabric['domains_all'] = list(conf.list_nodes(' '.join(base_path) + f' domain') + - node_changed(conf, base_path + ['domain'])) - - # Get a list of all interfaces - openfabric['interfaces_all'] = [] - for domain in openfabric['domains_all']: - interfaces_modified = list(node_changed(conf, base_path + ['domain', domain, 'interface']) + - conf.list_nodes(' '.join(base_path) + f' domain {domain} interface')) - openfabric['interfaces_all'].extend(interfaces_modified) + return get_frrender_dict(conf) - if not conf.exists(base_path): - openfabric.update({'deleted': ''}) - - return openfabric +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'openfabric'): + return None -def verify(openfabric): - # bail out early - looks like removal from running config - if not openfabric or 'deleted' in openfabric: + openfabric = config_dict['openfabric'] + if 'deleted' in openfabric: return None if 'net' not in openfabric: @@ -107,31 +89,14 @@ def verify(openfabric): return None -def generate(openfabric): - if not openfabric or 'deleted' in openfabric: - return None - - openfabric['frr_fabricd_config'] = render_to_string('frr/fabricd.frr.j2', openfabric) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(openfabric): - openfabric_daemon = 'fabricd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(openfabric_daemon) - for domain in openfabric['domains_all']: - frr_cfg.modify_section(f'^router openfabric {domain}', stop_pattern='^exit', remove_stop_mark=True) - - for interface in openfabric['interfaces_all']: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_fabricd_config' in openfabric: - frr_cfg.add_before(frr.default_add_before, openfabric['frr_fabricd_config']) - - frr_cfg.commit_configuration(openfabric_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py index 7347c4faa..c06c0aafc 100755 --- a/src/conf_mode/protocols_ospf.py +++ b/src/conf_mode/protocols_ospf.py @@ -18,18 +18,17 @@ from sys import exit from sys import argv from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists from vyos.configverify import verify_access_list -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -39,85 +38,19 @@ def get_config(config=None): else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'ospf'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospf'] or base_path - ospf = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: ospf['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - ospf['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - ospf.update({'deleted' : ''}) - return ospf + return get_frrender_dict(conf, argv) - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**ospf.kwargs, recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - if dict_search('default_information.originate', ospf) is None: - del default_values['default_information'] - if 'mpls_te' not in ospf: - del default_values['mpls_te'] - if 'graceful_restart' not in ospf: - del default_values['graceful_restart'] - for area_num in default_values.get('area', []): - if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None: - del default_values['area'][area_num]['area_type']['nssa'] - - for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: - if dict_search(f'redistribute.{protocol}', ospf) is None: - del default_values['redistribute'][protocol] - if not bool(default_values['redistribute']): - del default_values['redistribute'] - - for interface in ospf.get('interface', []): - # We need to reload the defaults on every pass b/c of - # hello-multiplier dependency on dead-interval - # If hello-multiplier is set, we need to remove the default from - # dead-interval. - if 'hello_multiplier' in ospf['interface'][interface]: - del default_values['interface'][interface]['dead_interval'] - - ospf = config_dict_merge(default_values, ospf) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ospf = dict_merge(tmp, ospf) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ospf'): + return None - return ospf + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] -def verify(ospf): - if not ospf: - return None + # eqivalent of the C foo ? 'a' : 'b' statement + ospf = vrf and config_dict['vrf']['name'][vrf]['protocols']['ospf'] or config_dict['ospf'] + ospf['policy'] = config_dict['policy'] verify_common_route_maps(ospf) @@ -164,8 +97,7 @@ def verify(ospf): # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - if 'vrf' in ospf: - vrf = ospf['vrf'] + if vrf: tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') @@ -244,39 +176,14 @@ def verify(ospf): return None -def generate(ospf): - if not ospf or 'deleted' in ospf: - return None - - ospf['frr_ospfd_config'] = render_to_string('frr/ospfd.frr.j2', ospf) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(ospf): - ospf_daemon = 'ospfd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in ospf: - vrf = ' vrf ' + ospf['vrf'] - - frr_cfg.load_configuration(ospf_daemon) - frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in ospf: - continue - for interface in ospf[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_ospfd_config' in ospf: - frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config']) - - frr_cfg.commit_configuration(ospf_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py index 60c2a9b16..2563eb7d5 100755 --- a/src/conf_mode/protocols_ospfv3.py +++ b/src/conf_mode/protocols_ospfv3.py @@ -18,18 +18,17 @@ from sys import exit from sys import argv from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -39,75 +38,19 @@ def get_config(config=None): else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf, argv) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ospfv3'): + return None - base_path = ['protocols', 'ospfv3'] + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path - ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: ospfv3['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - ospfv3['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - ospfv3.update({'deleted' : ''}) - return ospfv3 - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**ospfv3.kwargs, - recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - if dict_search('default_information.originate', ospfv3) is None: - del default_values['default_information'] - if 'graceful_restart' not in ospfv3: - del default_values['graceful_restart'] - - for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']: - if dict_search(f'redistribute.{protocol}', ospfv3) is None: - del default_values['redistribute'][protocol] - if not bool(default_values['redistribute']): - del default_values['redistribute'] - - default_values.pop('interface', {}) - - # merge in remaining default values - ospfv3 = config_dict_merge(default_values, ospfv3) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ospfv3 = dict_merge(tmp, ospfv3) - - return ospfv3 - -def verify(ospfv3): - if not ospfv3: - return None + ospfv3 = vrf and config_dict['vrf']['name'][vrf]['protocols']['ospfv3'] or config_dict['ospfv3'] + ospfv3['policy'] = config_dict['policy'] verify_common_route_maps(ospfv3) @@ -137,47 +80,21 @@ def verify(ospfv3): # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - if 'vrf' in ospfv3: - vrf = ospfv3['vrf'] + if vrf: tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') return None -def generate(ospfv3): - if not ospfv3 or 'deleted' in ospfv3: - return None - - ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.j2', ospfv3) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(ospfv3): - ospf6_daemon = 'ospf6d' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in ospfv3: - vrf = ' vrf ' + ospfv3['vrf'] - - frr_cfg.load_configuration(ospf6_daemon) - frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in ospfv3: - continue - for interface in ospfv3[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in ospfv3: - frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config']) - - frr_cfg.commit_configuration(ospf6_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_pim.py b/src/conf_mode/protocols_pim.py index 79294a1f0..632099964 100755 --- a/src/conf_mode/protocols_pim.py +++ b/src/conf_mode/protocols_pim.py @@ -22,72 +22,33 @@ from signal import SIGTERM from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import node_changed from vyos.configverify import verify_interface_exists +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict +from vyos.frrender import pim_daemon +from vyos.utils.process import is_systemd_service_running from vyos.utils.process import process_named_running from vyos.utils.process import call -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() -RESERVED_MC_NET = '224.0.0.0/24' - - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'pim'] - - pim = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - - # We can not run both IGMP proxy and PIM at the same time - get IGMP - # proxy status - if conf.exists(['protocols', 'igmp-proxy']): - pim.update({'igmp_proxy_enabled' : {}}) - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - pim['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - pim.update({'deleted' : ''}) - return pim - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**pim.kwargs, recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - for interface in pim.get('interface', []): - # We need to reload the defaults on every pass b/c of - # hello-multiplier dependency on dead-interval - # If hello-multiplier is set, we need to remove the default from - # dead-interval. - if 'igmp' not in pim['interface'][interface]: - del default_values['interface'][interface]['igmp'] - - pim = config_dict_merge(default_values, pim) - return pim - -def verify(pim): - if not pim or 'deleted' in pim: + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'pim'): + return None + + pim = config_dict['pim'] + + if 'deleted' in pim: return None if 'igmp_proxy_enabled' in pim: @@ -96,6 +57,7 @@ def verify(pim): if 'interface' not in pim: raise ConfigError('PIM require defined interfaces!') + RESERVED_MC_NET = '224.0.0.0/24' for interface, interface_config in pim['interface'].items(): verify_interface_exists(pim, interface) @@ -124,41 +86,26 @@ def verify(pim): raise ConfigError(f'{pim_base_error} must be unique!') unique.append(gr_addr) -def generate(pim): - if not pim or 'deleted' in pim: - return None - pim['frr_pimd_config'] = render_to_string('frr/pimd.frr.j2', pim) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(pim): - pim_daemon = 'pimd' - pim_pid = process_named_running(pim_daemon) - - if not pim or 'deleted' in pim: - if 'deleted' in pim: - os.kill(int(pim_pid), SIGTERM) +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'pim'): + return None + pim_pid = process_named_running(pim_daemon) + pim = config_dict['pim'] + if 'deleted' in pim: + os.kill(int(pim_pid), SIGTERM) return None if not pim_pid: call('/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(pim_daemon) - frr_cfg.modify_section(f'^ip pim') - frr_cfg.modify_section(f'^ip igmp') - - for key in ['interface', 'interface_removed']: - if key not in pim: - continue - for interface in pim[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_pimd_config' in pim: - frr_cfg.add_before(frr.default_add_before, pim['frr_pimd_config']) - frr_cfg.commit_configuration(pim_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_pim6.py b/src/conf_mode/protocols_pim6.py index 581ffe238..03a79139a 100755 --- a/src/conf_mode/protocols_pim6.py +++ b/src/conf_mode/protocols_pim6.py @@ -19,12 +19,12 @@ from ipaddress import IPv6Network from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import node_changed +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -33,34 +33,15 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'pim6'] - pim6 = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, with_recursive_defaults=True) + return get_frrender_dict(conf) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - pim6['interface_removed'] = list(interfaces_removed) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'pim6'): + return None - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - pim6.update({'deleted' : ''}) - return pim6 - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**pim6.kwargs, recursive=True) - - pim6 = config_dict_merge(default_values, pim6) - return pim6 - -def verify(pim6): - if not pim6 or 'deleted' in pim6: - return + pim6 = config_dict['pim6'] + if 'deleted' in pim6: + return None for interface, interface_config in pim6.get('interface', {}).items(): verify_interface_exists(pim6, interface) @@ -94,32 +75,14 @@ def verify(pim6): raise ConfigError(f'{pim_base_error} must be unique!') unique.append(gr_addr) -def generate(pim6): - if not pim6 or 'deleted' in pim6: - return - pim6['new_frr_config'] = render_to_string('frr/pim6d.frr.j2', pim6) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(pim6): - if pim6 is None: - return - - pim6_daemon = 'pim6d' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(pim6_daemon) - - for key in ['interface', 'interface_removed']: - if key not in pim6: - continue - for interface in pim6[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in pim6: - frr_cfg.add_before(frr.default_add_before, pim6['new_frr_config']) - frr_cfg.commit_configuration(pim6_daemon) +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py index 9afac544d..ec9dfbb8b 100755 --- a/src/conf_mode/protocols_rip.py +++ b/src/conf_mode/protocols_rip.py @@ -17,15 +17,15 @@ from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -34,41 +34,16 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'rip'] - rip = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - rip['interface_removed'] = list(interfaces_removed) + return get_frrender_dict(conf) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - rip.update({'deleted' : ''}) - return rip - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - rip = conf.merge_defaults(rip, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - rip = dict_merge(tmp, rip) - - return rip - -def verify(rip): - if not rip: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'rip'): return None + rip = config_dict['rip'] + rip['policy'] = config_dict['policy'] + verify_common_route_maps(rip) acl_in = dict_search('distribute_list.access_list.in', rip) @@ -93,39 +68,14 @@ def verify(rip): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') -def generate(rip): - if not rip or 'deleted' in rip: - return None - - rip['new_frr_config'] = render_to_string('frr/ripd.frr.j2', rip) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(rip): - rip_daemon = 'ripd' - zebra_daemon = 'zebra' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section('^ip protocol rip route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.commit_configuration(zebra_daemon) - - frr_cfg.load_configuration(rip_daemon) - frr_cfg.modify_section('^key chain \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('^router rip', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in rip: - continue - for interface in rip[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in rip: - frr_cfg.add_before(frr.default_add_before, rip['new_frr_config']) - frr_cfg.commit_configuration(rip_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py index 23416ff96..9a9ac8ec8 100755 --- a/src/conf_mode/protocols_ripng.py +++ b/src/conf_mode/protocols_ripng.py @@ -17,14 +17,15 @@ from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -33,32 +34,16 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'ripng'] - ripng = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - return ripng + return get_frrender_dict(conf) - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - ripng = conf.merge_defaults(ripng, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ripng = dict_merge(tmp, ripng) - - return ripng - -def verify(ripng): - if not ripng: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ripng'): return None + ripng = config_dict['ripng'] + ripng['policy'] = config_dict['policy'] + verify_common_route_maps(ripng) acl_in = dict_search('distribute_list.access_list.in', ripng) @@ -83,34 +68,14 @@ def verify(ripng): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') -def generate(ripng): - if not ripng: - ripng['new_frr_config'] = '' - return None - - ripng['new_frr_config'] = render_to_string('frr/ripngd.frr.j2', ripng) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(ripng): - ripng_daemon = 'ripngd' - zebra_daemon = 'zebra' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section('^ipv6 protocol ripng route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.commit_configuration(zebra_daemon) - - frr_cfg.load_configuration(ripng_daemon) - frr_cfg.modify_section('key chain \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('interface \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('^router ripng', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in ripng: - frr_cfg.add_before(frr.default_add_before, ripng['new_frr_config']) - frr_cfg.commit_configuration(ripng_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py index a59ecf3e4..ef0250e3d 100755 --- a/src/conf_mode/protocols_rpki.py +++ b/src/conf_mode/protocols_rpki.py @@ -20,13 +20,15 @@ from glob import glob from sys import exit from vyos.config import Config +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.pki import wrap_openssh_public_key from vyos.pki import wrap_openssh_private_key -from vyos.template import render_to_string from vyos.utils.dict import dict_search_args from vyos.utils.file import write_file +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -37,25 +39,14 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['protocols', 'rpki'] + return get_frrender_dict(conf) - rpki = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, with_pki=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - rpki.update({'deleted' : ''}) - return rpki - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - rpki = conf.merge_defaults(rpki, recursive=True) - - return rpki - -def verify(rpki): - if not rpki: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'rpki'): return None + rpki = config_dict['rpki'] + if 'cache' in rpki: preferences = [] for peer, peer_config in rpki['cache'].items(): @@ -81,12 +72,14 @@ def verify(rpki): return None -def generate(rpki): +def generate(config_dict): for key in glob(f'{rpki_ssh_key_base}*'): os.unlink(key) - if not rpki: - return + if not has_frr_protocol_in_dict(config_dict, 'rpki'): + return None + + rpki = config_dict['rpki'] if 'cache' in rpki: for cache, cache_config in rpki['cache'].items(): @@ -102,21 +95,13 @@ def generate(rpki): write_file(cache_config['ssh']['public_key_file'], wrap_openssh_public_key(public_key_data, public_key_type)) write_file(cache_config['ssh']['private_key_file'], wrap_openssh_private_key(private_key_data)) - rpki['new_frr_config'] = render_to_string('frr/rpki.frr.j2', rpki) - + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(rpki): - bgp_daemon = 'bgpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(bgp_daemon) - frr_cfg.modify_section('^rpki', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in rpki: - frr_cfg.add_before(frr.default_add_before, rpki['new_frr_config']) - - frr_cfg.commit_configuration(bgp_daemon) +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_segment-routing.py b/src/conf_mode/protocols_segment-routing.py index b36c2ca11..f2bd42a79 100755 --- a/src/conf_mode/protocols_segment-routing.py +++ b/src/conf_mode/protocols_segment-routing.py @@ -17,12 +17,15 @@ from sys import exit from vyos.config import Config -from vyos.configdict import node_changed -from vyos.template import render_to_string +from vyos.configdict import list_diff +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict +from vyos.ifconfig import Section from vyos.utils.dict import dict_search +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -32,25 +35,14 @@ def get_config(config=None): else: conf = Config() - base = ['protocols', 'segment-routing'] - sr = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True, - with_recursive_defaults=True) + return get_frrender_dict(conf) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - sr['interface_removed'] = list(interfaces_removed) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'segment_routing'): + return None - import pprint - pprint.pprint(sr) - return sr + sr = config_dict['segment_routing'] -def verify(sr): if 'srv6' in sr: srv6_enable = False if 'interface' in sr: @@ -62,47 +54,43 @@ def verify(sr): raise ConfigError('SRv6 should be enabled on at least one interface!') return None -def generate(sr): - if not sr: - return None - - sr['new_frr_config'] = render_to_string('frr/zebra.segment_routing.frr.j2', sr) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(sr): - zebra_daemon = 'zebra' +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'segment_routing'): + return None - if 'interface_removed' in sr: - for interface in sr['interface_removed']: - # Disable processing of IPv6-SR packets - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + sr = config_dict['segment_routing'] + + current_interfaces = Section.interfaces() + sr_interfaces = list(sr.get('interface', {}).keys()) - if 'interface' in sr: - for interface, interface_config in sr['interface'].items(): - # Accept or drop SR-enabled IPv6 packets on this interface - if 'srv6' in interface_config: - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') - # Define HMAC policy for ingress SR-enabled packets on this interface - # It's a redundant check as HMAC has a default value - but better safe - # then sorry - tmp = dict_search('srv6.hmac', interface_config) - if tmp == 'accept': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') - elif tmp == 'drop': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') - elif tmp == 'ignore': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') - else: - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + for interface in list_diff(current_interfaces, sr_interfaces): + # Disable processing of IPv6-SR packets + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'^segment-routing') - if 'new_frr_config' in sr: - frr_cfg.add_before(frr.default_add_before, sr['new_frr_config']) - frr_cfg.commit_configuration(zebra_daemon) + for interface, interface_config in sr.get('interface', {}).items(): + # Accept or drop SR-enabled IPv6 packets on this interface + if 'srv6' in interface_config: + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') + # Define HMAC policy for ingress SR-enabled packets on this interface + # It's a redundant check as HMAC has a default value - but better safe + # then sorry + tmp = dict_search('srv6.hmac', interface_config) + if tmp == 'accept': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') + elif tmp == 'drop': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') + elif tmp == 'ignore': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') + else: + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index a2373218a..1b9e51167 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -14,19 +14,19 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +from ipaddress import IPv4Network from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import get_dhcp_interfaces -from vyos.configdict import get_pppoe_interfaces +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_vrf +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict +from vyos.utils.process import is_systemd_service_running from vyos.template import render -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -38,36 +38,20 @@ def get_config(config=None): else: conf = Config() + return get_frrender_dict(conf, argv) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'static'): + return None + vrf = None - if len(argv) > 1: - vrf = argv[1] + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] - base_path = ['protocols', 'static'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'static'] or base_path - static = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) - - # Assign the name of our VRF context - if vrf: static['vrf'] = vrf - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - static = dict_merge(tmp, static) - - # T3680 - get a list of all interfaces currently configured to use DHCP - tmp = get_dhcp_interfaces(conf, vrf) - if tmp: static.update({'dhcp' : tmp}) - tmp = get_pppoe_interfaces(conf, vrf) - if tmp: static.update({'pppoe' : tmp}) - - return static - -def verify(static): + static = vrf and config_dict['vrf']['name'][vrf]['protocols']['static'] or config_dict['static'] + static['policy'] = config_dict['policy'] + verify_common_route_maps(static) for route in ['route', 'route6']: @@ -88,37 +72,36 @@ def verify(static): if {'blackhole', 'reject'} <= set(prefix_options): raise ConfigError(f'Can not use both blackhole and reject for '\ - 'prefix "{prefix}"!') + f'prefix "{prefix}"!') + + if 'multicast' in static and 'route' in static['multicast']: + for prefix, prefix_options in static['multicast']['route'].items(): + if not IPv4Network(prefix).is_multicast: + raise ConfigError(f'{prefix} is not a multicast network!') return None -def generate(static): - if not static: +def generate(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'static'): return None - # Put routing table names in /etc/iproute2/rt_tables - render(config_file, 'iproute2/static.conf.j2', static) - static['new_frr_config'] = render_to_string('frr/staticd.frr.j2', static) - return None - -def apply(static): - static_daemon = 'staticd' + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(static_daemon) + # eqivalent of the C foo ? 'a' : 'b' statement + static = vrf and config_dict['vrf']['name'][vrf]['protocols']['static'] or config_dict['static'] - if 'vrf' in static: - vrf = static['vrf'] - frr_cfg.modify_section(f'^vrf {vrf}', stop_pattern='^exit-vrf', remove_stop_mark=True) - else: - frr_cfg.modify_section(r'^ip route .*') - frr_cfg.modify_section(r'^ipv6 route .*') + # Put routing table names in /etc/iproute2/rt_tables + render(config_file, 'iproute2/static.conf.j2', static) - if 'new_frr_config' in static: - frr_cfg.add_before(frr.default_add_before, static['new_frr_config']) - frr_cfg.commit_configuration(static_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': diff --git a/src/conf_mode/protocols_static_multicast.py b/src/conf_mode/protocols_static_multicast.py deleted file mode 100755 index d323ceb4f..000000000 --- a/src/conf_mode/protocols_static_multicast.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2020-2024 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - - -from ipaddress import IPv4Address -from sys import exit - -from vyos import ConfigError -from vyos import frr -from vyos.config import Config -from vyos.template import render_to_string - -from vyos import airbag -airbag.enable() - -config_file = r'/tmp/static_mcast.frr' - -# Get configuration for static multicast route -def get_config(config=None): - if config: - conf = config - else: - conf = Config() - mroute = { - 'old_mroute' : {}, - 'mroute' : {} - } - - base_path = "protocols static multicast" - - if not (conf.exists(base_path) or conf.exists_effective(base_path)): - return None - - conf.set_level(base_path) - - # Get multicast effective routes - for route in conf.list_effective_nodes('route'): - mroute['old_mroute'][route] = {} - for next_hop in conf.list_effective_nodes('route {0} next-hop'.format(route)): - mroute['old_mroute'][route].update({ - next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) - }) - - # Get multicast effective interface-routes - for route in conf.list_effective_nodes('interface-route'): - if not route in mroute['old_mroute']: - mroute['old_mroute'][route] = {} - for next_hop in conf.list_effective_nodes('interface-route {0} next-hop-interface'.format(route)): - mroute['old_mroute'][route].update({ - next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) - }) - - # Get multicast routes - for route in conf.list_nodes('route'): - mroute['mroute'][route] = {} - for next_hop in conf.list_nodes('route {0} next-hop'.format(route)): - mroute['mroute'][route].update({ - next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) - }) - - # Get multicast interface-routes - for route in conf.list_nodes('interface-route'): - if not route in mroute['mroute']: - mroute['mroute'][route] = {} - for next_hop in conf.list_nodes('interface-route {0} next-hop-interface'.format(route)): - mroute['mroute'][route].update({ - next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) - }) - - return mroute - -def verify(mroute): - if mroute is None: - return None - - for route in mroute['mroute']: - route = route.split('/') - if IPv4Address(route[0]) < IPv4Address('224.0.0.0'): - raise ConfigError(route + " not a multicast network") - - -def generate(mroute): - if mroute is None: - return None - - mroute['new_frr_config'] = render_to_string('frr/static_mcast.frr.j2', mroute) - return None - - -def apply(mroute): - if mroute is None: - return None - static_daemon = 'staticd' - - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(static_daemon) - - if 'old_mroute' in mroute: - for route_gr in mroute['old_mroute']: - for nh in mroute['old_mroute'][route_gr]: - if mroute['old_mroute'][route_gr][nh]: - frr_cfg.modify_section(f'^ip mroute {route_gr} {nh} {mroute["old_mroute"][route_gr][nh]}') - else: - frr_cfg.modify_section(f'^ip mroute {route_gr} {nh}') - - if 'new_frr_config' in mroute: - frr_cfg.add_before(frr.default_add_before, mroute['new_frr_config']) - - frr_cfg.commit_configuration(static_daemon) - - return None - - -if __name__ == '__main__': - try: - c = get_config() - verify(c) - generate(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py index 7dfad3180..59e307a39 100755 --- a/src/conf_mode/qos.py +++ b/src/conf_mode/qos.py @@ -198,10 +198,16 @@ def get_config(config=None): def _verify_match(cls_config: dict) -> None: if 'match' in cls_config: for match, match_config in cls_config['match'].items(): - if {'ip', 'ipv6'} <= set(match_config): + filters = set(match_config) + if {'ip', 'ipv6'} <= filters: raise ConfigError( f'Can not use both IPv6 and IPv4 in one match ({match})!') + if {'interface', 'vif'} & filters: + if {'ip', 'ipv6', 'ether'} & filters: + raise ConfigError( + f'Can not combine protocol and interface or vlan tag match ({match})!') + def _verify_match_group_exist(cls_config, qos): if 'match_group' in cls_config: @@ -210,6 +216,46 @@ def _verify_match_group_exist(cls_config, qos): Warning(f'Match group "{group}" does not exist!') +def _verify_default_policy_exist(policy, policy_config): + if 'default' not in policy_config: + raise ConfigError(f'Policy {policy} misses "default" class!') + + +def _check_shaper_hfsc_rate(cls, cls_conf): + is_m2_exist = False + for crit in TrafficShaperHFSC.criteria: + if cls_conf.get(crit, {}).get('m2') is not None: + is_m2_exist = True + + if cls_conf.get(crit, {}).get('m1') is not None: + for crit_val in ['m2', 'd']: + if cls_conf.get(crit, {}).get(crit_val) is None: + raise ConfigError( + f'{cls} {crit} m1 value is set, but no {crit_val} was found!' + ) + + if not is_m2_exist: + raise ConfigError(f'At least one m2 value needs to be set for class: {cls}') + + if ( + cls_conf.get('upperlimit', {}).get('m2') is not None + and cls_conf.get('linkshare', {}).get('m2') is None + ): + raise ConfigError( + f'Linkshare m2 needs to be defined to use upperlimit m2 for class: {cls}' + ) + + +def _verify_shaper_hfsc(policy, policy_config): + _verify_default_policy_exist(policy, policy_config) + + _check_shaper_hfsc_rate('default', policy_config.get('default')) + + if 'class' in policy_config: + for cls, cls_conf in policy_config['class'].items(): + _check_shaper_hfsc_rate(cls, cls_conf) + + def verify(qos): if not qos or 'interface' not in qos: return None @@ -253,8 +299,13 @@ def verify(qos): if queue_lim < max_tr: raise ConfigError(f'Policy "{policy}" uses queue-limit "{queue_lim}" < max-threshold "{max_tr}"!') if policy_type in ['priority_queue']: - if 'default' not in policy_config: - raise ConfigError(f'Policy {policy} misses "default" class!') + _verify_default_policy_exist(policy, policy_config) + if policy_type in ['rate_control']: + if 'bandwidth' not in policy_config: + raise ConfigError('Bandwidth not defined') + if policy_type in ['shaper_hfsc']: + _verify_shaper_hfsc(policy, policy_config) + if 'default' in policy_config: if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']: raise ConfigError('Bandwidth not defined for default traffic!') @@ -290,6 +341,7 @@ def generate(qos): return None + def apply(qos): # Always delete "old" shapers first for interface in interfaces(): diff --git a/src/conf_mode/service_console-server.py b/src/conf_mode/service_console-server.py index b112add3f..b83c6dfb1 100755 --- a/src/conf_mode/service_console-server.py +++ b/src/conf_mode/service_console-server.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2021 VyOS maintainers and contributors +# Copyright (C) 2018-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -98,6 +98,12 @@ def generate(proxy): return None def apply(proxy): + if not os.path.exists('/etc/dropbear/dropbear_rsa_host_key'): + call('dropbearkey -t rsa -s 4096 -f /etc/dropbear/dropbear_rsa_host_key') + + if not os.path.exists('/etc/dropbear/dropbear_ecdsa_host_key'): + call('dropbearkey -t ecdsa -f /etc/dropbear/dropbear_ecdsa_host_key') + call('systemctl daemon-reload') call('systemctl stop dropbear@*.service conserver-server.service') diff --git a/src/conf_mode/service_dhcp-server.py b/src/conf_mode/service_dhcp-server.py index e89448e2d..99c7e6a1f 100755 --- a/src/conf_mode/service_dhcp-server.py +++ b/src/conf_mode/service_dhcp-server.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2024 VyOS maintainers and contributors +# Copyright (C) 2018-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -38,20 +38,21 @@ from vyos.utils.network import is_subnet_connected from vyos.utils.network import is_addr_assigned from vyos import ConfigError from vyos import airbag + airbag.enable() -ctrl_config_file = '/run/kea/kea-ctrl-agent.conf' ctrl_socket = '/run/kea/dhcp4-ctrl-socket' config_file = '/run/kea/kea-dhcp4.conf' +config_file_d2 = '/run/kea/kea-dhcp-ddns.conf' lease_file = '/config/dhcp/dhcp4-leases.csv' lease_file_glob = '/config/dhcp/dhcp4-leases*' -systemd_override = r'/run/systemd/system/kea-ctrl-agent.service.d/10-override.conf' user_group = '_kea' ca_cert_file = '/run/kea/kea-failover-ca.pem' cert_file = '/run/kea/kea-failover.pem' cert_key_file = '/run/kea/kea-failover-key.pem' + def dhcp_slice_range(exclude_list, range_dict): """ This function is intended to slice a DHCP range. What does it mean? @@ -74,19 +75,21 @@ def dhcp_slice_range(exclude_list, range_dict): range_last_exclude = '' for e in exclude_list: - if (ip_address(e) >= ip_address(range_start)) and \ - (ip_address(e) <= ip_address(range_stop)): + if (ip_address(e) >= ip_address(range_start)) and ( + ip_address(e) <= ip_address(range_stop) + ): range_last_exclude = e for e in exclude_list: - if (ip_address(e) >= ip_address(range_start)) and \ - (ip_address(e) <= ip_address(range_stop)): - + if (ip_address(e) >= ip_address(range_start)) and ( + ip_address(e) <= ip_address(range_stop) + ): # Build new address range ending one address before exclude address - r = { - 'start' : range_start, - 'stop' : str(ip_address(e) -1) - } + r = {'start': range_start, 'stop': str(ip_address(e) - 1)} + + if 'option' in range_dict: + r['option'] = range_dict['option'] + # On the next run our address range will start one address after # the exclude address range_start = str(ip_address(e) + 1) @@ -100,21 +103,23 @@ def dhcp_slice_range(exclude_list, range_dict): # Take care of last IP address range spanning from the last exclude # address (+1) to the end of the initial configured range if ip_address(e) == ip_address(range_last_exclude): - r = { - 'start': str(ip_address(e) + 1), - 'stop': str(range_stop) - } + r = {'start': str(ip_address(e) + 1), 'stop': str(range_stop)} + + if 'option' in range_dict: + r['option'] = range_dict['option'] + if not (ip_address(r['start']) > ip_address(r['stop'])): output.append(r) else: - # if the excluded address was not part of the range, we simply return - # the entire ranga again - if not range_last_exclude: - if range_dict not in output: - output.append(range_dict) + # if the excluded address was not part of the range, we simply return + # the entire ranga again + if not range_last_exclude: + if range_dict not in output: + output.append(range_dict) return output + def get_config(config=None): if config: conf = config @@ -124,10 +129,13 @@ def get_config(config=None): if not conf.exists(base): return None - dhcp = conf.get_config_dict(base, key_mangling=('-', '_'), - no_tag_node_value_mangle=True, - get_first_key=True, - with_recursive_defaults=True) + dhcp = conf.get_config_dict( + base, + key_mangling=('-', '_'), + no_tag_node_value_mangle=True, + get_first_key=True, + with_recursive_defaults=True, + ) if 'shared_network_name' in dhcp: for network, network_config in dhcp['shared_network_name'].items(): @@ -139,22 +147,40 @@ def get_config(config=None): new_range_id = 0 new_range_dict = {} for r, r_config in subnet_config['range'].items(): - for slice in dhcp_slice_range(subnet_config['exclude'], r_config): - new_range_dict.update({new_range_id : slice}) - new_range_id +=1 + for slice in dhcp_slice_range( + subnet_config['exclude'], r_config + ): + new_range_dict.update({new_range_id: slice}) + new_range_id += 1 dhcp['shared_network_name'][network]['subnet'][subnet].update( - {'range' : new_range_dict}) + {'range': new_range_dict} + ) if len(dhcp['high_availability']) == 1: ## only default value for mode is set, need to remove ha node del dhcp['high_availability'] else: if dict_search('high_availability.certificate', dhcp): - dhcp['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) + dhcp['pki'] = conf.get_config_dict( + ['pki'], + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + ) return dhcp +def verify_ddns_domain_servers(domain_type, domain): + if 'dns_server' in domain: + invalid_servers = [] + for server_no, server_config in domain['dns_server'].items(): + if 'address' not in server_config: + invalid_servers.append(server_no) + if len(invalid_servers) > 0: + raise ConfigError(f'{domain_type} DNS servers {", ".join(invalid_servers)} in DDNS configuration need to have an IP address') + return None + def verify(dhcp): # bail out early - looks like removal from running config if not dhcp or 'disable' in dhcp: @@ -162,13 +188,15 @@ def verify(dhcp): # If DHCP is enabled we need one share-network if 'shared_network_name' not in dhcp: - raise ConfigError('No DHCP shared networks configured.\n' \ - 'At least one DHCP shared network must be configured.') + raise ConfigError( + 'No DHCP shared networks configured.\n' + 'At least one DHCP shared network must be configured.' + ) # Inspect shared-network/subnet listen_ok = False subnets = [] - shared_networks = len(dhcp['shared_network_name']) + shared_networks = len(dhcp['shared_network_name']) disabled_shared_networks = 0 subnet_ids = [] @@ -179,12 +207,16 @@ def verify(dhcp): disabled_shared_networks += 1 if 'subnet' not in network_config: - raise ConfigError(f'No subnets defined for {network}. At least one\n' \ - 'lease subnet must be configured.') + raise ConfigError( + f'No subnets defined for {network}. At least one\n' + 'lease subnet must be configured.' + ) for subnet, subnet_config in network_config['subnet'].items(): if 'subnet_id' not in subnet_config: - raise ConfigError(f'Unique subnet ID not specified for subnet "{subnet}"') + raise ConfigError( + f'Unique subnet ID not specified for subnet "{subnet}"' + ) if subnet_config['subnet_id'] in subnet_ids: raise ConfigError(f'Subnet ID for subnet "{subnet}" is not unique') @@ -195,32 +227,46 @@ def verify(dhcp): if 'static_route' in subnet_config: for route, route_option in subnet_config['static_route'].items(): if 'next_hop' not in route_option: - raise ConfigError(f'DHCP static-route "{route}" requires router to be defined!') + raise ConfigError( + f'DHCP static-route "{route}" requires router to be defined!' + ) # Check if DHCP address range is inside configured subnet declaration if 'range' in subnet_config: networks = [] for range, range_config in subnet_config['range'].items(): if not {'start', 'stop'} <= set(range_config): - raise ConfigError(f'DHCP range "{range}" start and stop address must be defined!') + raise ConfigError( + f'DHCP range "{range}" start and stop address must be defined!' + ) # Start/Stop address must be inside network for key in ['start', 'stop']: if ip_address(range_config[key]) not in ip_network(subnet): - raise ConfigError(f'DHCP range "{range}" {key} address not within shared-network "{network}, {subnet}"!') + raise ConfigError( + f'DHCP range "{range}" {key} address not within shared-network "{network}, {subnet}"!' + ) # Stop address must be greater or equal to start address - if ip_address(range_config['stop']) < ip_address(range_config['start']): - raise ConfigError(f'DHCP range "{range}" stop address must be greater or equal\n' \ - 'to the ranges start address!') + if ip_address(range_config['stop']) < ip_address( + range_config['start'] + ): + raise ConfigError( + f'DHCP range "{range}" stop address must be greater or equal\n' + 'to the ranges start address!' + ) for network in networks: start = range_config['start'] stop = range_config['stop'] if start in network: - raise ConfigError(f'Range "{range}" start address "{start}" already part of another range!') + raise ConfigError( + f'Range "{range}" start address "{start}" already part of another range!' + ) if stop in network: - raise ConfigError(f'Range "{range}" stop address "{stop}" already part of another range!') + raise ConfigError( + f'Range "{range}" stop address "{stop}" already part of another range!' + ) tmp = IPRange(range_config['start'], range_config['stop']) networks.append(tmp) @@ -229,12 +275,16 @@ def verify(dhcp): if 'exclude' in subnet_config: for exclude in subnet_config['exclude']: if ip_address(exclude) not in ip_network(subnet): - raise ConfigError(f'Excluded IP address "{exclude}" not within shared-network "{network}, {subnet}"!') + raise ConfigError( + f'Excluded IP address "{exclude}" not within shared-network "{network}, {subnet}"!' + ) # At least one DHCP address range or static-mapping required if 'range' not in subnet_config and 'static_mapping' not in subnet_config: - raise ConfigError(f'No DHCP address range or active static-mapping configured\n' \ - f'within shared-network "{network}, {subnet}"!') + raise ConfigError( + f'No DHCP address range or active static-mapping configured\n' + f'within shared-network "{network}, {subnet}"!' + ) if 'static_mapping' in subnet_config: # Static mappings require just a MAC address (will use an IP from the dynamic pool if IP is not set) @@ -243,29 +293,42 @@ def verify(dhcp): used_duid = [] for mapping, mapping_config in subnet_config['static_mapping'].items(): if 'ip_address' in mapping_config: - if ip_address(mapping_config['ip_address']) not in ip_network(subnet): - raise ConfigError(f'Configured static lease address for mapping "{mapping}" is\n' \ - f'not within shared-network "{network}, {subnet}"!') - - if ('mac' not in mapping_config and 'duid' not in mapping_config) or \ - ('mac' in mapping_config and 'duid' in mapping_config): - raise ConfigError(f'Either MAC address or Client identifier (DUID) is required for ' - f'static mapping "{mapping}" within shared-network "{network}, {subnet}"!') + if ip_address(mapping_config['ip_address']) not in ip_network( + subnet + ): + raise ConfigError( + f'Configured static lease address for mapping "{mapping}" is\n' + f'not within shared-network "{network}, {subnet}"!' + ) + + if ( + 'mac' not in mapping_config and 'duid' not in mapping_config + ) or ('mac' in mapping_config and 'duid' in mapping_config): + raise ConfigError( + f'Either MAC address or Client identifier (DUID) is required for ' + f'static mapping "{mapping}" within shared-network "{network}, {subnet}"!' + ) if 'disable' not in mapping_config: if mapping_config['ip_address'] in used_ips: - raise ConfigError(f'Configured IP address for static mapping "{mapping}" already exists on another static mapping') + raise ConfigError( + f'Configured IP address for static mapping "{mapping}" already exists on another static mapping' + ) used_ips.append(mapping_config['ip_address']) if 'disable' not in mapping_config: if 'mac' in mapping_config: if mapping_config['mac'] in used_mac: - raise ConfigError(f'Configured MAC address for static mapping "{mapping}" already exists on another static mapping') + raise ConfigError( + f'Configured MAC address for static mapping "{mapping}" already exists on another static mapping' + ) used_mac.append(mapping_config['mac']) if 'duid' in mapping_config: if mapping_config['duid'] in used_duid: - raise ConfigError(f'Configured DUID for static mapping "{mapping}" already exists on another static mapping') + raise ConfigError( + f'Configured DUID for static mapping "{mapping}" already exists on another static mapping' + ) used_duid.append(mapping_config['duid']) # There must be one subnet connected to a listen interface. @@ -276,73 +339,118 @@ def verify(dhcp): # Subnets must be non overlapping if subnet in subnets: - raise ConfigError(f'Configured subnets must be unique! Subnet "{subnet}"\n' - 'defined multiple times!') + raise ConfigError( + f'Configured subnets must be unique! Subnet "{subnet}"\n' + 'defined multiple times!' + ) subnets.append(subnet) # Check for overlapping subnets net = ip_network(subnet) for n in subnets: net2 = ip_network(n) - if (net != net2): + if net != net2: if net.overlaps(net2): - raise ConfigError(f'Conflicting subnet ranges: "{net}" overlaps "{net2}"!') + raise ConfigError( + f'Conflicting subnet ranges: "{net}" overlaps "{net2}"!' + ) # Prevent 'disable' for shared-network if only one network is configured if (shared_networks - disabled_shared_networks) < 1: - raise ConfigError(f'At least one shared network must be active!') + raise ConfigError('At least one shared network must be active!') if 'high_availability' in dhcp: for key in ['name', 'remote', 'source_address', 'status']: if key not in dhcp['high_availability']: tmp = key.replace('_', '-') - raise ConfigError(f'DHCP high-availability requires "{tmp}" to be specified!') + raise ConfigError( + f'DHCP high-availability requires "{tmp}" to be specified!' + ) if len({'certificate', 'ca_certificate'} & set(dhcp['high_availability'])) == 1: - raise ConfigError(f'DHCP secured high-availability requires both certificate and CA certificate') + raise ConfigError( + 'DHCP secured high-availability requires both certificate and CA certificate' + ) if 'certificate' in dhcp['high_availability']: cert_name = dhcp['high_availability']['certificate'] if cert_name not in dhcp['pki']['certificate']: - raise ConfigError(f'Invalid certificate specified for DHCP high-availability') - - if not dict_search_args(dhcp['pki']['certificate'], cert_name, 'certificate'): - raise ConfigError(f'Invalid certificate specified for DHCP high-availability') - - if not dict_search_args(dhcp['pki']['certificate'], cert_name, 'private', 'key'): - raise ConfigError(f'Missing private key on certificate specified for DHCP high-availability') + raise ConfigError( + 'Invalid certificate specified for DHCP high-availability' + ) + + if not dict_search_args( + dhcp['pki']['certificate'], cert_name, 'certificate' + ): + raise ConfigError( + 'Invalid certificate specified for DHCP high-availability' + ) + + if not dict_search_args( + dhcp['pki']['certificate'], cert_name, 'private', 'key' + ): + raise ConfigError( + 'Missing private key on certificate specified for DHCP high-availability' + ) if 'ca_certificate' in dhcp['high_availability']: ca_cert_name = dhcp['high_availability']['ca_certificate'] if ca_cert_name not in dhcp['pki']['ca']: - raise ConfigError(f'Invalid CA certificate specified for DHCP high-availability') + raise ConfigError( + 'Invalid CA certificate specified for DHCP high-availability' + ) if not dict_search_args(dhcp['pki']['ca'], ca_cert_name, 'certificate'): - raise ConfigError(f'Invalid CA certificate specified for DHCP high-availability') + raise ConfigError( + 'Invalid CA certificate specified for DHCP high-availability' + ) - for address in (dict_search('listen_address', dhcp) or []): + for address in dict_search('listen_address', dhcp) or []: if is_addr_assigned(address, include_vrf=True): listen_ok = True # no need to probe further networks, we have one that is valid continue else: - raise ConfigError(f'listen-address "{address}" not configured on any interface') + raise ConfigError( + f'listen-address "{address}" not configured on any interface' + ) if not listen_ok: - raise ConfigError('None of the configured subnets have an appropriate primary IP address on any\n' - 'broadcast interface configured, nor was there an explicit listen-address\n' - 'configured for serving DHCP relay packets!') + raise ConfigError( + 'None of the configured subnets have an appropriate primary IP address on any\n' + 'broadcast interface configured, nor was there an explicit listen-address\n' + 'configured for serving DHCP relay packets!' + ) if 'listen_address' in dhcp and 'listen_interface' in dhcp: - raise ConfigError(f'Cannot define listen-address and listen-interface at the same time') + raise ConfigError( + 'Cannot define listen-address and listen-interface at the same time' + ) - for interface in (dict_search('listen_interface', dhcp) or []): + for interface in dict_search('listen_interface', dhcp) or []: if not interface_exists(interface): raise ConfigError(f'listen-interface "{interface}" does not exist') + if 'dynamic_dns_update' in dhcp: + ddns = dhcp['dynamic_dns_update'] + if 'tsig_key' in ddns: + invalid_keys = [] + for tsig_key_name, tsig_key_config in ddns['tsig_key'].items(): + if not ('algorithm' in tsig_key_config and 'secret' in tsig_key_config): + invalid_keys.append(tsig_key_name) + if len(invalid_keys) > 0: + raise ConfigError(f'Both algorithm and secret need to be set for TSIG keys: {", ".join(invalid_keys)}') + + if 'forward_domain' in ddns: + verify_ddns_domain_servers('Forward', ddns['forward_domain']) + + if 'reverse_domain' in ddns: + verify_ddns_domain_servers('Reverse', ddns['reverse_domain']) + return None + def generate(dhcp): # bail out early - looks like removal from running config if not dhcp or 'disable' in dhcp: @@ -374,8 +482,12 @@ def generate(dhcp): cert_name = dhcp['high_availability']['certificate'] cert_data = dhcp['pki']['certificate'][cert_name]['certificate'] key_data = dhcp['pki']['certificate'][cert_name]['private']['key'] - write_file(cert_file, wrap_certificate(cert_data), user=user_group, mode=0o600) - write_file(cert_key_file, wrap_private_key(key_data), user=user_group, mode=0o600) + write_file( + cert_file, wrap_certificate(cert_data), user=user_group, mode=0o600 + ) + write_file( + cert_key_file, wrap_private_key(key_data), user=user_group, mode=0o600 + ) dhcp['high_availability']['cert_file'] = cert_file dhcp['high_availability']['cert_key_file'] = cert_key_file @@ -383,19 +495,36 @@ def generate(dhcp): if 'ca_certificate' in dhcp['high_availability']: ca_cert_name = dhcp['high_availability']['ca_certificate'] ca_cert_data = dhcp['pki']['ca'][ca_cert_name]['certificate'] - write_file(ca_cert_file, wrap_certificate(ca_cert_data), user=user_group, mode=0o600) + write_file( + ca_cert_file, + wrap_certificate(ca_cert_data), + user=user_group, + mode=0o600, + ) dhcp['high_availability']['ca_cert_file'] = ca_cert_file - render(systemd_override, 'dhcp-server/10-override.conf.j2', dhcp) - - render(ctrl_config_file, 'dhcp-server/kea-ctrl-agent.conf.j2', dhcp, user=user_group, group=user_group) - render(config_file, 'dhcp-server/kea-dhcp4.conf.j2', dhcp, user=user_group, group=user_group) + render( + config_file, + 'dhcp-server/kea-dhcp4.conf.j2', + dhcp, + user=user_group, + group=user_group, + ) + if 'dynamic_dns_update' in dhcp: + render( + config_file_d2, + 'dhcp-server/kea-dhcp-ddns.conf.j2', + dhcp, + user=user_group, + group=user_group + ) return None + def apply(dhcp): - services = ['kea-ctrl-agent', 'kea-dhcp4-server', 'kea-dhcp-ddns-server'] + services = ['kea-dhcp4-server', 'kea-dhcp-ddns-server'] if not dhcp or 'disable' in dhcp: for service in services: @@ -412,13 +541,11 @@ def apply(dhcp): if service == 'kea-dhcp-ddns-server' and 'dynamic_dns_update' not in dhcp: action = 'stop' - if service == 'kea-ctrl-agent' and 'high_availability' not in dhcp: - action = 'stop' - call(f'systemctl {action} {service}.service') return None + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/service_dns_forwarding.py b/src/conf_mode/service_dns_forwarding.py index e3bdbc9f8..5636d6f83 100755 --- a/src/conf_mode/service_dns_forwarding.py +++ b/src/conf_mode/service_dns_forwarding.py @@ -366,6 +366,13 @@ def apply(dns): hc.add_name_server_tags_recursor(['dhcp-' + interface, 'dhcpv6-' + interface ]) + # add dhcp interfaces + if 'dhcp' in dns: + for interface in dns['dhcp']: + if interface_exists(interface): + hc.add_name_server_tags_recursor(['dhcp-' + interface, + 'dhcpv6-' + interface ]) + # hostsd will generate the forward-zones file # the list and keys() are required as get returns a dict, not list hc.delete_forward_zones(list(hc.get_forward_zones().keys())) diff --git a/src/conf_mode/service_https.py b/src/conf_mode/service_https.py index 9e58b4c72..2123823f4 100755 --- a/src/conf_mode/service_https.py +++ b/src/conf_mode/service_https.py @@ -28,6 +28,7 @@ from vyos.configverify import verify_vrf from vyos.configverify import verify_pki_certificate from vyos.configverify import verify_pki_ca_certificate from vyos.configverify import verify_pki_dh_parameters +from vyos.configdiff import get_config_diff from vyos.defaults import api_config_state from vyos.pki import wrap_certificate from vyos.pki import wrap_private_key @@ -79,6 +80,14 @@ def get_config(config=None): # merge CLI and default dictionary https = config_dict_merge(default_values, https) + + # some settings affecting nginx will require a restart: + # for example, a reload will not suffice when binding the listen address + # after nginx has started and dropped privileges; add flag here + diff = get_config_diff(conf) + children_changed = diff.node_changed_children(base) + https['nginx_restart_required'] = bool(set(children_changed) != set(['api'])) + return https def verify(https): @@ -208,7 +217,10 @@ def apply(https): elif is_systemd_service_active(http_api_service_name): call(f'systemctl stop {http_api_service_name}') - call(f'systemctl reload-or-restart {https_service_name}') + if https['nginx_restart_required']: + call(f'systemctl restart {https_service_name}') + else: + call(f'systemctl reload-or-restart {https_service_name}') if __name__ == '__main__': try: diff --git a/src/conf_mode/service_ids_ddos-protection.py b/src/conf_mode/service_ids_ddos-protection.py deleted file mode 100755 index 276a71fcb..000000000 --- a/src/conf_mode/service_ids_ddos-protection.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2018-2023 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import os - -from sys import exit - -from vyos.config import Config -from vyos.template import render -from vyos.utils.process import call -from vyos import ConfigError -from vyos import airbag -airbag.enable() - -config_file = r'/run/fastnetmon/fastnetmon.conf' -networks_list = r'/run/fastnetmon/networks_list' -excluded_networks_list = r'/run/fastnetmon/excluded_networks_list' -attack_dir = '/var/log/fastnetmon_attacks' - -def get_config(config=None): - if config: - conf = config - else: - conf = Config() - base = ['service', 'ids', 'ddos-protection'] - if not conf.exists(base): - return None - - fastnetmon = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - with_recursive_defaults=True) - - return fastnetmon - -def verify(fastnetmon): - if not fastnetmon: - return None - - if 'mode' not in fastnetmon: - raise ConfigError('Specify operating mode!') - - if fastnetmon.get('mode') == 'mirror' and 'listen_interface' not in fastnetmon: - raise ConfigError("Incorrect settings for 'mode mirror': must specify interface(s) for traffic mirroring") - - if fastnetmon.get('mode') == 'sflow' and 'listen_address' not in fastnetmon.get('sflow', {}): - raise ConfigError("Incorrect settings for 'mode sflow': must specify sFlow 'listen-address'") - - if 'alert_script' in fastnetmon: - if os.path.isfile(fastnetmon['alert_script']): - # Check script permissions - if not os.access(fastnetmon['alert_script'], os.X_OK): - raise ConfigError('Script "{alert_script}" is not executable!'.format(fastnetmon['alert_script'])) - else: - raise ConfigError('File "{alert_script}" does not exists!'.format(fastnetmon)) - -def generate(fastnetmon): - if not fastnetmon: - for file in [config_file, networks_list]: - if os.path.isfile(file): - os.unlink(file) - - return None - - # Create dir for log attack details - if not os.path.exists(attack_dir): - os.mkdir(attack_dir) - - render(config_file, 'ids/fastnetmon.j2', fastnetmon) - render(networks_list, 'ids/fastnetmon_networks_list.j2', fastnetmon) - render(excluded_networks_list, 'ids/fastnetmon_excluded_networks_list.j2', fastnetmon) - return None - -def apply(fastnetmon): - systemd_service = 'fastnetmon.service' - if not fastnetmon: - # Stop fastnetmon service if removed - call(f'systemctl stop {systemd_service}') - else: - call(f'systemctl reload-or-restart {systemd_service}') - - return None - -if __name__ == '__main__': - try: - c = get_config() - verify(c) - generate(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) diff --git a/src/conf_mode/service_ipoe-server.py b/src/conf_mode/service_ipoe-server.py index c7e3ef033..a14d4b5b6 100755 --- a/src/conf_mode/service_ipoe-server.py +++ b/src/conf_mode/service_ipoe-server.py @@ -31,6 +31,7 @@ from vyos.accel_ppp_util import verify_accel_ppp_ip_pool from vyos.accel_ppp_util import verify_accel_ppp_authentication from vyos import ConfigError from vyos import airbag + airbag.enable() @@ -52,7 +53,9 @@ def get_config(config=None): if dict_search('client_ip_pool', ipoe): # Multiple named pools require ordered values T5099 - ipoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', ipoe)) + ipoe['ordered_named_pools'] = get_pools_in_order( + dict_search('client_ip_pool', ipoe) + ) ipoe['server_type'] = 'ipoe' return ipoe @@ -68,11 +71,23 @@ def verify(ipoe): for interface, iface_config in ipoe['interface'].items(): verify_interface_exists(ipoe, interface, warning_only=True) if 'client_subnet' in iface_config and 'vlan' in iface_config: - raise ConfigError('Option "client-subnet" and "vlan" are mutually exclusive, ' - 'use "client-ip-pool" instead!') - if 'vlan_mon' in iface_config and not 'vlan' in iface_config: + raise ConfigError( + 'Options "client-subnet" and "vlan" are mutually exclusive, ' + 'use "client-ip-pool" instead!' + ) + if 'vlan_mon' in iface_config and 'vlan' not in iface_config: raise ConfigError('Option "vlan-mon" requires "vlan" to be set!') + if 'lua_username' in iface_config: + if 'lua_file' not in ipoe: + raise ConfigError( + 'Option "lua-username" requires "lua-file" to be set!' + ) + if dict_search('authentication.mode', ipoe) != 'radius': + raise ConfigError( + 'Can configure username with Lua script only for RADIUS authentication' + ) + verify_accel_ppp_authentication(ipoe, local_users=False) verify_accel_ppp_ip_pool(ipoe) verify_accel_ppp_name_servers(ipoe) @@ -88,14 +103,15 @@ def generate(ipoe): render(ipoe_conf, 'accel-ppp/ipoe.config.j2', ipoe) if dict_search('authentication.mode', ipoe) == 'local': - render(ipoe_chap_secrets, 'accel-ppp/chap-secrets.ipoe.j2', - ipoe, permission=0o640) + render( + ipoe_chap_secrets, 'accel-ppp/chap-secrets.ipoe.j2', ipoe, permission=0o640 + ) return None def apply(ipoe): systemd_service = 'accel-ppp@ipoe.service' - if ipoe == None: + if ipoe is None: call(f'systemctl stop {systemd_service}') for file in [ipoe_conf, ipoe_chap_secrets]: if os.path.exists(file): @@ -103,7 +119,10 @@ def apply(ipoe): return None - call(f'systemctl reload-or-restart {systemd_service}') + # Accel-pppd does not do soft-reload correctly. + # Most of the changes require restarting the service + call(f'systemctl restart {systemd_service}') + if __name__ == '__main__': try: diff --git a/src/conf_mode/service_monitoring_network_event.py b/src/conf_mode/service_monitoring_network_event.py new file mode 100644 index 000000000..104e6ce23 --- /dev/null +++ b/src/conf_mode/service_monitoring_network_event.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import json + +from sys import exit + +from vyos.config import Config +from vyos.utils.file import write_file +from vyos.utils.process import call +from vyos import ConfigError +from vyos import airbag +airbag.enable() + +vyos_network_event_logger_config = r'/run/vyos-network-event-logger.conf' + + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['service', 'monitoring', 'network-event'] + if not conf.exists(base): + return None + + monitoring = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + monitoring = conf.merge_defaults(monitoring, recursive=True) + + return monitoring + + +def verify(monitoring): + if not monitoring: + return None + + return None + + +def generate(monitoring): + if not monitoring: + # Delete config + if os.path.exists(vyos_network_event_logger_config): + os.unlink(vyos_network_event_logger_config) + + return None + + # Create config + log_conf_json = json.dumps(monitoring, indent=4) + write_file(vyos_network_event_logger_config, log_conf_json) + + return None + + +def apply(monitoring): + # Reload systemd manager configuration + systemd_service = 'vyos-network-event-logger.service' + + if not monitoring: + call(f'systemctl stop {systemd_service}') + return + + call(f'systemctl restart {systemd_service}') + + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/service_monitoring_prometheus.py b/src/conf_mode/service_monitoring_prometheus.py new file mode 100755 index 000000000..9a07d8593 --- /dev/null +++ b/src/conf_mode/service_monitoring_prometheus.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os + +from sys import exit + +from vyos.config import Config +from vyos.configdict import is_node_changed +from vyos.configverify import verify_vrf +from vyos.template import render +from vyos.utils.process import call +from vyos import ConfigError +from vyos import airbag + +airbag.enable() + +node_exporter_service_file = '/etc/systemd/system/node_exporter.service' +node_exporter_systemd_service = 'node_exporter.service' +node_exporter_collector_path = '/run/node_exporter/collector' + +frr_exporter_service_file = '/etc/systemd/system/frr_exporter.service' +frr_exporter_systemd_service = 'frr_exporter.service' + +blackbox_exporter_service_file = '/etc/systemd/system/blackbox_exporter.service' +blackbox_exporter_systemd_service = 'blackbox_exporter.service' + + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + base = ['service', 'monitoring', 'prometheus'] + if not conf.exists(base): + return None + + monitoring = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True, with_recursive_defaults=True + ) + + tmp = is_node_changed(conf, base + ['node-exporter', 'vrf']) + if tmp: + monitoring.update({'node_exporter_restart_required': {}}) + + tmp = is_node_changed(conf, base + ['frr-exporter', 'vrf']) + if tmp: + monitoring.update({'frr_exporter_restart_required': {}}) + + tmp = False + for node in ['vrf', 'config-file']: + tmp = tmp or is_node_changed(conf, base + ['blackbox-exporter', node]) + if tmp: + monitoring.update({'blackbox_exporter_restart_required': {}}) + + return monitoring + + +def verify(monitoring): + if not monitoring: + return None + + if 'node_exporter' in monitoring: + verify_vrf(monitoring['node_exporter']) + + if 'frr_exporter' in monitoring: + verify_vrf(monitoring['frr_exporter']) + + if 'blackbox_exporter' in monitoring: + verify_vrf(monitoring['blackbox_exporter']) + + if ( + 'modules' in monitoring['blackbox_exporter'] + and 'dns' in monitoring['blackbox_exporter']['modules'] + and 'name' in monitoring['blackbox_exporter']['modules']['dns'] + ): + for mod_name, mod_config in monitoring['blackbox_exporter']['modules'][ + 'dns' + ]['name'].items(): + if 'query_name' not in mod_config: + raise ConfigError( + f'query name not specified in dns module {mod_name}' + ) + + return None + + +def generate(monitoring): + if not monitoring or 'node_exporter' not in monitoring: + # Delete systemd files + if os.path.isfile(node_exporter_service_file): + os.unlink(node_exporter_service_file) + + if not monitoring or 'frr_exporter' not in monitoring: + # Delete systemd files + if os.path.isfile(frr_exporter_service_file): + os.unlink(frr_exporter_service_file) + + if not monitoring or 'blackbox_exporter' not in monitoring: + # Delete systemd files + if os.path.isfile(blackbox_exporter_service_file): + os.unlink(blackbox_exporter_service_file) + + if not monitoring: + return None + + if 'node_exporter' in monitoring: + # Render node_exporter node_exporter_service_file + render( + node_exporter_service_file, + 'prometheus/node_exporter.service.j2', + monitoring['node_exporter'], + ) + if ( + 'collectors' in monitoring['node_exporter'] + and 'textfile' in monitoring['node_exporter']['collectors'] + ): + # Create textcollector folder + if not os.path.isdir(node_exporter_collector_path): + os.makedirs(node_exporter_collector_path) + + if 'frr_exporter' in monitoring: + # Render frr_exporter service_file + render( + frr_exporter_service_file, + 'prometheus/frr_exporter.service.j2', + monitoring['frr_exporter'], + ) + + if 'blackbox_exporter' in monitoring: + # Render blackbox_exporter service_file + render( + blackbox_exporter_service_file, + 'prometheus/blackbox_exporter.service.j2', + monitoring['blackbox_exporter'], + ) + # Render blackbox_exporter config file + render( + '/run/blackbox_exporter/config.yml', + 'prometheus/blackbox_exporter.yml.j2', + monitoring['blackbox_exporter'], + ) + + return None + + +def apply(monitoring): + # Reload systemd manager configuration + call('systemctl daemon-reload') + if not monitoring or 'node_exporter' not in monitoring: + call(f'systemctl stop {node_exporter_systemd_service}') + if not monitoring or 'frr_exporter' not in monitoring: + call(f'systemctl stop {frr_exporter_systemd_service}') + if not monitoring or 'blackbox_exporter' not in monitoring: + call(f'systemctl stop {blackbox_exporter_systemd_service}') + + if not monitoring: + return + + if 'node_exporter' in monitoring: + # we need to restart the service if e.g. the VRF name changed + systemd_action = 'reload-or-restart' + if 'node_exporter_restart_required' in monitoring: + systemd_action = 'restart' + + call(f'systemctl {systemd_action} {node_exporter_systemd_service}') + + if 'frr_exporter' in monitoring: + # we need to restart the service if e.g. the VRF name changed + systemd_action = 'reload-or-restart' + if 'frr_exporter_restart_required' in monitoring: + systemd_action = 'restart' + + call(f'systemctl {systemd_action} {frr_exporter_systemd_service}') + + if 'blackbox_exporter' in monitoring: + # we need to restart the service if e.g. the VRF name changed + systemd_action = 'reload-or-restart' + if 'blackbox_exporter_restart_required' in monitoring: + systemd_action = 'restart' + + call(f'systemctl {systemd_action} {blackbox_exporter_systemd_service}') + + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/service_monitoring_zabbix-agent.py b/src/conf_mode/service_monitoring_zabbix-agent.py index 98d8a32ca..f17146a8d 100755 --- a/src/conf_mode/service_monitoring_zabbix-agent.py +++ b/src/conf_mode/service_monitoring_zabbix-agent.py @@ -18,6 +18,8 @@ import os from vyos.config import Config from vyos.template import render +from vyos.utils.dict import dict_search +from vyos.utils.file import write_file from vyos.utils.process import call from vyos import ConfigError from vyos import airbag @@ -26,6 +28,7 @@ airbag.enable() service_name = 'zabbix-agent2' service_conf = f'/run/zabbix/{service_name}.conf' +service_psk_file = f'/run/zabbix/{service_name}.psk' systemd_override = r'/run/systemd/system/zabbix-agent2.service.d/10-override.conf' @@ -49,6 +52,8 @@ def get_config(config=None): if 'directory' in config and config['directory'].endswith('/'): config['directory'] = config['directory'][:-1] + config['service_psk_file'] = service_psk_file + return config @@ -60,18 +65,34 @@ def verify(config): if 'server' not in config: raise ConfigError('Server is required!') + if 'authentication' in config and dict_search("authentication.mode", + config) == 'pre_shared_secret': + if 'id' not in config['authentication']['psk']: + raise ConfigError( + 'PSK identity is required for pre-shared-secret authentication mode') + + if 'secret' not in config['authentication']['psk']: + raise ConfigError( + 'PSK secret is required for pre-shared-secret authentication mode') + def generate(config): # bail out early - looks like removal from running config if config is None: # Remove old config and return - config_files = [service_conf, systemd_override] + config_files = [service_conf, systemd_override, service_psk_file] for file in config_files: if os.path.isfile(file): os.unlink(file) return None + if not dict_search("authentication.psk.secret", config): + if os.path.isfile(service_psk_file): + os.unlink(service_psk_file) + else: + write_file(service_psk_file, config["authentication"]["psk"]["secret"]) + # Write configuration file render(service_conf, 'zabbix-agent/zabbix-agent.conf.j2', config) render(systemd_override, 'zabbix-agent/10-override.conf.j2', config) diff --git a/src/conf_mode/service_ntp.py b/src/conf_mode/service_ntp.py index 83880fd72..32563aa0e 100755 --- a/src/conf_mode/service_ntp.py +++ b/src/conf_mode/service_ntp.py @@ -17,6 +17,7 @@ import os from vyos.config import Config +from vyos.config import config_dict_merge from vyos.configdict import is_node_changed from vyos.configverify import verify_vrf from vyos.configverify import verify_interface_exists @@ -42,13 +43,21 @@ def get_config(config=None): if not conf.exists(base): return None - ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_defaults=True) + ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) ntp['config_file'] = config_file ntp['user'] = user_group tmp = is_node_changed(conf, base + ['vrf']) if tmp: ntp.update({'restart_required': {}}) + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(**ntp.kwargs, recursive=True) + # Only defined PTP default port, if PTP feature is in use + if 'ptp' not in ntp: + del default_values['ptp'] + + ntp = config_dict_merge(default_values, ntp) return ntp def verify(ntp): @@ -87,6 +96,15 @@ def verify(ntp): if ipv6_addresses > 1: raise ConfigError(f'NTP Only admits one ipv6 value for listen-address parameter ') + if 'server' in ntp: + for host, server in ntp['server'].items(): + if 'ptp' in server: + if 'ptp' not in ntp: + raise ConfigError('PTP must be enabled for the NTP service '\ + f'before it can be used for server "{host}"') + else: + break + return None def generate(ntp): diff --git a/src/conf_mode/service_snmp.py b/src/conf_mode/service_snmp.py index c9c0ed9a0..c64c59af7 100755 --- a/src/conf_mode/service_snmp.py +++ b/src/conf_mode/service_snmp.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2024 VyOS maintainers and contributors +# Copyright (C) 2018-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -22,6 +22,7 @@ from vyos.base import Warning from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_vrf +from vyos.defaults import systemd_services from vyos.snmpv3_hashgen import plaintext_to_md5 from vyos.snmpv3_hashgen import plaintext_to_sha1 from vyos.snmpv3_hashgen import random @@ -43,7 +44,7 @@ config_file_access = r'/usr/share/snmp/snmpd.conf' config_file_user = r'/var/lib/snmp/snmpd.conf' default_script_dir = r'/config/user-data/' systemd_override = r'/run/systemd/system/snmpd.service.d/override.conf' -systemd_service = 'snmpd.service' +systemd_service = systemd_services['snmpd'] def get_config(config=None): if config: @@ -146,6 +147,9 @@ def verify(snmp): return None if 'user' in snmp['v3']: + if 'engineid' not in snmp['v3']: + raise ConfigError(f'EngineID must be configured for SNMPv3!') + for user, user_config in snmp['v3']['user'].items(): if 'group' not in user_config: raise ConfigError(f'Group membership required for user "{user}"!') @@ -260,15 +264,6 @@ def apply(snmp): # start SNMP daemon call(f'systemctl reload-or-restart {systemd_service}') - - # Enable AgentX in FRR - # This should be done for each daemon individually because common command - # works only if all the daemons started with SNMP support - # Following daemons from FRR 9.0/stable have SNMP module compiled in VyOS - frr_daemons_list = ['zebra', 'bgpd', 'ospf6d', 'ospfd', 'ripd', 'isisd', 'ldpd'] - for frr_daemon in frr_daemons_list: - call(f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null') - return None if __name__ == '__main__': diff --git a/src/conf_mode/service_ssh.py b/src/conf_mode/service_ssh.py index 9abdd33dc..759f87bb2 100755 --- a/src/conf_mode/service_ssh.py +++ b/src/conf_mode/service_ssh.py @@ -23,10 +23,16 @@ from syslog import LOG_INFO from vyos.config import Config from vyos.configdict import is_node_changed from vyos.configverify import verify_vrf +from vyos.configverify import verify_pki_ca_certificate from vyos.utils.process import call from vyos.template import render from vyos import ConfigError from vyos import airbag +from vyos.pki import find_chain +from vyos.pki import encode_certificate +from vyos.pki import load_certificate +from vyos.utils.file import write_file + airbag.enable() config_file = r'/run/sshd/sshd_config' @@ -38,6 +44,9 @@ key_rsa = '/etc/ssh/ssh_host_rsa_key' key_dsa = '/etc/ssh/ssh_host_dsa_key' key_ed25519 = '/etc/ssh/ssh_host_ed25519_key' +trusted_user_ca_key = '/etc/ssh/trusted_user_ca_key' + + def get_config(config=None): if config: conf = config @@ -47,10 +56,13 @@ def get_config(config=None): if not conf.exists(base): return None - ssh = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + ssh = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True, with_pki=True + ) tmp = is_node_changed(conf, base + ['vrf']) - if tmp: ssh.update({'restart_required': {}}) + if tmp: + ssh.update({'restart_required': {}}) # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. @@ -62,20 +74,32 @@ def get_config(config=None): # Ignore default XML values if config doesn't exists # Delete key from dict if not conf.exists(base + ['dynamic-protection']): - del ssh['dynamic_protection'] + del ssh['dynamic_protection'] return ssh + def verify(ssh): if not ssh: return None if 'rekey' in ssh and 'data' not in ssh['rekey']: - raise ConfigError(f'Rekey data is required!') + raise ConfigError('Rekey data is required!') + + if 'trusted_user_ca_key' in ssh: + if 'ca_certificate' not in ssh['trusted_user_ca_key']: + raise ConfigError('CA certificate is required for TrustedUserCAKey') + + ca_key_name = ssh['trusted_user_ca_key']['ca_certificate'] + verify_pki_ca_certificate(ssh, ca_key_name) + pki_ca_cert = ssh['pki']['ca'][ca_key_name] + if 'certificate' not in pki_ca_cert or not pki_ca_cert['certificate']: + raise ConfigError(f"CA certificate '{ca_key_name}' is not valid or missing") verify_vrf(ssh) return None + def generate(ssh): if not ssh: if os.path.isfile(config_file): @@ -95,6 +119,24 @@ def generate(ssh): syslog(LOG_INFO, 'SSH ed25519 host key not found, generating new key!') call(f'ssh-keygen -q -N "" -t ed25519 -f {key_ed25519}') + if 'trusted_user_ca_key' in ssh: + ca_key_name = ssh['trusted_user_ca_key']['ca_certificate'] + pki_ca_cert = ssh['pki']['ca'][ca_key_name] + + loaded_ca_cert = load_certificate(pki_ca_cert['certificate']) + loaded_ca_certs = { + load_certificate(c['certificate']) + for c in ssh['pki']['ca'].values() + if 'certificate' in c + } + + ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs) + write_file( + trusted_user_ca_key, '\n'.join(encode_certificate(c) for c in ca_full_chain) + ) + elif os.path.exists(trusted_user_ca_key): + os.unlink(trusted_user_ca_key) + render(config_file, 'ssh/sshd_config.j2', ssh) if 'dynamic_protection' in ssh: @@ -103,12 +145,12 @@ def generate(ssh): return None + def apply(ssh): - systemd_service_ssh = 'ssh.service' systemd_service_sshguard = 'sshguard.service' if not ssh: # SSH access is removed in the commit - call(f'systemctl stop ssh@*.service') + call('systemctl stop ssh@*.service') call(f'systemctl stop {systemd_service_sshguard}') return None @@ -122,13 +164,14 @@ def apply(ssh): if 'restart_required' in ssh: # this is only true if something for the VRFs changed, thus we # stop all VRF services and only restart then new ones - call(f'systemctl stop ssh@*.service') + call('systemctl stop ssh@*.service') systemd_action = 'restart' for vrf in ssh['vrf']: call(f'systemctl {systemd_action} ssh@{vrf}.service') return None + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/system_config-management.py b/src/conf_mode/system_config-management.py index c681a8405..a3ce66512 100755 --- a/src/conf_mode/system_config-management.py +++ b/src/conf_mode/system_config-management.py @@ -22,6 +22,7 @@ from vyos.config import Config from vyos.config_mgmt import ConfigMgmt from vyos.config_mgmt import commit_post_hook_dir, commit_hooks + def get_config(config=None): if config: conf = config @@ -36,22 +37,32 @@ def get_config(config=None): return mgmt -def verify(_mgmt): + +def verify(mgmt): + if mgmt is None: + return + + d = mgmt.config_dict + confirm = d.get('commit_confirm', {}) + if confirm.get('action', '') == 'reload' and 'commit_revisions' not in d: + raise ConfigError('commit-confirm reload requires non-zero commit-revisions') + return + def generate(mgmt): if mgmt is None: return mgmt.initialize_revision() + def apply(mgmt): if mgmt is None: return locations = mgmt.locations - archive_target = os.path.join(commit_post_hook_dir, - commit_hooks['commit_archive']) + archive_target = os.path.join(commit_post_hook_dir, commit_hooks['commit_archive']) if locations: try: os.symlink('/usr/bin/config-mgmt', archive_target) @@ -68,8 +79,9 @@ def apply(mgmt): raise ConfigError from exc revisions = mgmt.max_revisions - revision_target = os.path.join(commit_post_hook_dir, - commit_hooks['commit_revision']) + revision_target = os.path.join( + commit_post_hook_dir, commit_hooks['commit_revision'] + ) if revisions > 0: try: os.symlink('/usr/bin/config-mgmt', revision_target) @@ -85,6 +97,7 @@ def apply(mgmt): except OSError as exc: raise ConfigError from exc + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/system_conntrack.py b/src/conf_mode/system_conntrack.py index 2529445bf..f25ed8d10 100755 --- a/src/conf_mode/system_conntrack.py +++ b/src/conf_mode/system_conntrack.py @@ -258,6 +258,8 @@ def apply(conntrack): if 'log' in conntrack: call(f'systemctl restart vyos-conntrack-logger.service') + else: + call(f'systemctl stop vyos-conntrack-logger.service') return None diff --git a/src/conf_mode/system_flow-accounting.py b/src/conf_mode/system_flow-accounting.py index a12ee363d..925c4a562 100755 --- a/src/conf_mode/system_flow-accounting.py +++ b/src/conf_mode/system_flow-accounting.py @@ -18,7 +18,6 @@ import os import re from sys import exit -from ipaddress import ip_address from vyos.config import Config from vyos.config import config_dict_merge @@ -159,9 +158,9 @@ def get_config(config=None): # delete individual flow type defaults - should only be added if user # sets this feature - for flow_type in ['sflow', 'netflow']: - if flow_type not in flow_accounting and flow_type in default_values: - del default_values[flow_type] + flow_type = 'netflow' + if flow_type not in flow_accounting and flow_type in default_values: + del default_values[flow_type] flow_accounting = config_dict_merge(default_values, flow_accounting) @@ -171,9 +170,9 @@ def verify(flow_config): if not flow_config: return None - # check if at least one collector is enabled - if 'sflow' not in flow_config and 'netflow' not in flow_config and 'disable_imt' in flow_config: - raise ConfigError('You need to configure at least sFlow or NetFlow, ' \ + # check if collector is enabled + if 'netflow' not in flow_config and 'disable_imt' in flow_config: + raise ConfigError('You need to configure NetFlow, ' \ 'or not set "disable-imt" for flow-accounting!') # Check if at least one interface is configured @@ -185,45 +184,7 @@ def verify(flow_config): for interface in flow_config['interface']: verify_interface_exists(flow_config, interface, warning_only=True) - # check sFlow configuration - if 'sflow' in flow_config: - # check if at least one sFlow collector is configured - if 'server' not in flow_config['sflow']: - raise ConfigError('You need to configure at least one sFlow server!') - - # check that all sFlow collectors use the same IP protocol version - sflow_collector_ipver = None - for server in flow_config['sflow']['server']: - if sflow_collector_ipver: - if sflow_collector_ipver != ip_address(server).version: - raise ConfigError("All sFlow servers must use the same IP protocol") - else: - sflow_collector_ipver = ip_address(server).version - - # check if vrf is defined for Sflow - verify_vrf(flow_config) - sflow_vrf = None - if 'vrf' in flow_config: - sflow_vrf = flow_config['vrf'] - - # check agent-id for sFlow: we should avoid mixing IPv4 agent-id with IPv6 collectors and vice-versa - for server in flow_config['sflow']['server']: - if 'agent_address' in flow_config['sflow']: - if ip_address(server).version != ip_address(flow_config['sflow']['agent_address']).version: - raise ConfigError('IPv4 and IPv6 addresses can not be mixed in "sflow agent-address" and "sflow '\ - 'server". You need to set the same IP version for both "agent-address" and '\ - 'all sFlow servers') - - if 'agent_address' in flow_config['sflow']: - tmp = flow_config['sflow']['agent_address'] - if not is_addr_assigned(tmp, sflow_vrf): - raise ConfigError(f'Configured "sflow agent-address {tmp}" does not exist in the system!') - - # Check if configured sflow source-address exist in the system - if 'source_address' in flow_config['sflow']: - if not is_addr_assigned(flow_config['sflow']['source_address'], sflow_vrf): - tmp = flow_config['sflow']['source_address'] - raise ConfigError(f'Configured "sflow source-address {tmp}" does not exist on the system!') + verify_vrf(flow_config) # check NetFlow configuration if 'netflow' in flow_config: diff --git a/src/conf_mode/system_host-name.py b/src/conf_mode/system_host-name.py index 3f245f166..de4accda2 100755 --- a/src/conf_mode/system_host-name.py +++ b/src/conf_mode/system_host-name.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2024 VyOS maintainers and contributors +# Copyright (C) 2018-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -23,6 +23,7 @@ import vyos.hostsd_client from vyos.base import Warning from vyos.config import Config from vyos.configdict import leaf_node_changed +from vyos.defaults import systemd_services from vyos.ifconfig import Section from vyos.template import is_ip from vyos.utils.process import cmd @@ -174,11 +175,13 @@ def apply(config): # Restart services that use the hostname if hostname_new != hostname_old: - call("systemctl restart rsyslog.service") + tmp = systemd_services['syslog'] + call(f'systemctl restart {tmp}') # If SNMP is running, restart it too if process_named_running('snmpd') and config['snmpd_restart_reqired']: - call('systemctl restart snmpd.service') + tmp = systemd_services['snmpd'] + call(f'systemctl restart {tmp}') return None diff --git a/src/conf_mode/system_ip.py b/src/conf_mode/system_ip.py index c8a91fd2f..7f3796168 100755 --- a/src/conf_mode/system_ip.py +++ b/src/conf_mode/system_ip.py @@ -17,17 +17,17 @@ from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_route_map -from vyos.template import render_to_string +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search -from vyos.utils.file import write_file from vyos.utils.process import is_systemd_service_active +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write -from vyos.configdep import set_dependents -from vyos.configdep import call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -36,42 +36,36 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['system', 'ip'] - - opt = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - with_recursive_defaults=True) - - # When working with FRR we need to know the corresponding address-family - opt['afi'] = 'ip' - - # We also need the route-map information from the config - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], - get_first_key=True)}} - # Merge policy dict into "regular" config dict - opt = dict_merge(tmp, opt) # If IPv4 ARP table size is set here and also manually in sysctl, the more # fine grained value from sysctl must win set_dependents('sysctl', conf) + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ip'): + return None - return opt + opt = config_dict['ip'] + opt['policy'] = config_dict['policy'] -def verify(opt): if 'protocol' in opt: for protocol, protocol_options in opt['protocol'].items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], opt) return -def generate(opt): - opt['frr_zebra_config'] = render_to_string('frr/zebra.route-map.frr.j2', opt) - return +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None + +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ip'): + + return None + opt = config_dict['ip'] -def apply(opt): # Apply ARP threshold values # table_size has a default value - thus the key always exists size = int(dict_search('arp.table_size', opt)) @@ -82,11 +76,6 @@ def apply(opt): # Minimum number of stored records is indicated which is not cleared sysctl_write('net.ipv4.neigh.default.gc_thresh1', size // 8) - # enable/disable IPv4 forwarding - tmp = dict_search('disable_forwarding', opt) - value = '0' if (tmp != None) else '1' - write_file('/proc/sys/net/ipv4/conf/all/forwarding', value) - # configure multipath tmp = dict_search('multipath.ignore_unreachable_nexthops', opt) value = '1' if (tmp != None) else '0' @@ -121,19 +110,11 @@ def apply(opt): # running when this script is called first. Skip this part and wait for initial # commit of the configuration to trigger this statement if is_systemd_service_active('frr.service'): - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'no ip nht resolve-via-default') - frr_cfg.modify_section(r'ip protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - if 'frr_zebra_config' in opt: - frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() call_dependents() + return None if __name__ == '__main__': try: diff --git a/src/conf_mode/system_ipv6.py b/src/conf_mode/system_ipv6.py index a2442d009..309869b2f 100755 --- a/src/conf_mode/system_ipv6.py +++ b/src/conf_mode/system_ipv6.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2023 VyOS maintainers and contributors +# Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -18,17 +18,18 @@ import os from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_route_map -from vyos.template import render_to_string +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.utils.dict import dict_search from vyos.utils.file import write_file from vyos.utils.process import is_systemd_service_active +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write -from vyos.configdep import set_dependents -from vyos.configdep import call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -37,42 +38,35 @@ def get_config(config=None): conf = config else: conf = Config() - base = ['system', 'ipv6'] - - opt = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - with_recursive_defaults=True) - - # When working with FRR we need to know the corresponding address-family - opt['afi'] = 'ipv6' - - # We also need the route-map information from the config - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], - get_first_key=True)}} - # Merge policy dict into "regular" config dict - opt = dict_merge(tmp, opt) # If IPv6 neighbor table size is set here and also manually in sysctl, the more # fine grained value from sysctl must win set_dependents('sysctl', conf) + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ipv6'): + return None - return opt + opt = config_dict['ipv6'] + opt['policy'] = config_dict['policy'] -def verify(opt): if 'protocol' in opt: for protocol, protocol_options in opt['protocol'].items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], opt) return -def generate(opt): - opt['frr_zebra_config'] = render_to_string('frr/zebra.route-map.frr.j2', opt) - return +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None + +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ipv6'): + return None + opt = config_dict['ipv6'] -def apply(opt): # configure multipath tmp = dict_search('multipath.layer4_hashing', opt) value = '1' if (tmp != None) else '0' @@ -88,11 +82,6 @@ def apply(opt): # Minimum number of stored records is indicated which is not cleared sysctl_write('net.ipv6.neigh.default.gc_thresh1', size // 8) - # enable/disable IPv6 forwarding - tmp = dict_search('disable_forwarding', opt) - value = '0' if (tmp != None) else '1' - write_file('/proc/sys/net/ipv6/conf/all/forwarding', value) - # configure IPv6 strict-dad tmp = dict_search('strict_dad', opt) value = '2' if (tmp != None) else '1' @@ -105,19 +94,11 @@ def apply(opt): # running when this script is called first. Skip this part and wait for initial # commit of the configuration to trigger this statement if is_systemd_service_active('frr.service'): - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'no ipv6 nht resolve-via-default') - frr_cfg.modify_section(r'ipv6 protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - if 'frr_zebra_config' in opt: - frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() call_dependents() + return None if __name__ == '__main__': try: diff --git a/src/conf_mode/system_login.py b/src/conf_mode/system_login.py index 439fa645b..4febb6494 100755 --- a/src/conf_mode/system_login.py +++ b/src/conf_mode/system_login.py @@ -24,10 +24,13 @@ from pwd import getpwuid from sys import exit from time import sleep +from vyos.base import Warning from vyos.config import Config from vyos.configverify import verify_vrf from vyos.template import render from vyos.template import is_ipv4 +from vyos.utils.auth import EPasswdStrength +from vyos.utils.auth import evaluate_strength from vyos.utils.auth import get_current_user from vyos.utils.configfs import delete_cli_node from vyos.utils.configfs import add_cli_node @@ -58,20 +61,21 @@ MAX_RADIUS_TIMEOUT: int = 50 MAX_RADIUS_COUNT: int = 8 # Maximum number of supported TACACS servers MAX_TACACS_COUNT: int = 8 - +# Minimum USER id for TACACS users +MIN_TACACS_UID = 900 # List of local user accounts that must be preserved SYSTEM_USER_SKIP_LIST: list = ['radius_user', 'radius_priv_user', 'tacacs0', 'tacacs1', 'tacacs2', 'tacacs3', 'tacacs4', 'tacacs5', 'tacacs6', 'tacacs7', 'tacacs8', 'tacacs9', 'tacacs10',' tacacs11', 'tacacs12', 'tacacs13', 'tacacs14', 'tacacs15'] -def get_local_users(): +def get_local_users(min_uid=MIN_USER_UID, max_uid=MAX_USER_UID): """Return list of dynamically allocated users (see Debian Policy Manual)""" local_users = [] for s_user in getpwall(): - if getpwnam(s_user.pw_name).pw_uid < MIN_USER_UID: + if getpwnam(s_user.pw_name).pw_uid < min_uid: continue - if getpwnam(s_user.pw_name).pw_uid > MAX_USER_UID: + if getpwnam(s_user.pw_name).pw_uid > max_uid: continue if s_user.pw_name in SYSTEM_USER_SKIP_LIST: continue @@ -119,6 +123,12 @@ def get_config(config=None): rm_users = [tmp for tmp in all_users if tmp not in cli_users] if rm_users: login.update({'rm_users' : rm_users}) + # Build TACACS user mapping + if 'tacacs' in login: + login['exclude_users'] = get_local_users(min_uid=0, + max_uid=MIN_TACACS_UID) + cli_users + login['tacacs_min_uid'] = MIN_TACACS_UID + return login def verify(login): @@ -139,6 +149,19 @@ def verify(login): if s_user.pw_name == user and s_user.pw_uid < MIN_USER_UID: raise ConfigError(f'User "{user}" can not be created, conflict with local system account!') + # T6353: Check password for complexity using cracklib. + # A user password should be sufficiently complex + plaintext_password = dict_search( + path='authentication.plaintext_password', + dict_object=user_config + ) or None + + failed_check_status = [EPasswdStrength.WEAK, EPasswdStrength.ERROR] + if plaintext_password is not None: + result = evaluate_strength(plaintext_password) + if result['strength'] in failed_check_status: + Warning(result['error']) + for pubkey, pubkey_options in (dict_search('authentication.public_keys', user_config) or {}).items(): if 'type' not in pubkey_options: raise ConfigError(f'Missing type for public-key "{pubkey}"!') diff --git a/src/conf_mode/system_login_banner.py b/src/conf_mode/system_login_banner.py index 923e1bf57..cdd066649 100755 --- a/src/conf_mode/system_login_banner.py +++ b/src/conf_mode/system_login_banner.py @@ -28,6 +28,7 @@ airbag.enable() PRELOGIN_FILE = r'/etc/issue' PRELOGIN_NET_FILE = r'/etc/issue.net' POSTLOGIN_FILE = r'/etc/motd' +POSTLOGIN_VYOS_FILE = r'/run/motd.d/01-vyos-nonproduction' default_config_data = { 'issue': 'Welcome to VyOS - \\n \\l\n\n', @@ -94,6 +95,13 @@ def apply(banner): render(POSTLOGIN_FILE, 'login/default_motd.j2', banner, permission=0o644, user='root', group='root') + if banner['version_data']['build_type'] != 'release': + render(POSTLOGIN_VYOS_FILE, 'login/motd_vyos_nonproduction.j2', + banner, + permission=0o644, + user='root', + group='root') + return None if __name__ == '__main__': diff --git a/src/conf_mode/system_option.py b/src/conf_mode/system_option.py index a84572f83..5acad6599 100755 --- a/src/conf_mode/system_option.py +++ b/src/conf_mode/system_option.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2024 VyOS maintainers and contributors +# Copyright (C) 2019-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -46,6 +46,13 @@ systemd_action_file = '/lib/systemd/system/ctrl-alt-del.target' usb_autosuspend = r'/etc/udev/rules.d/40-usb-autosuspend.rules' kernel_dynamic_debug = r'/sys/kernel/debug/dynamic_debug/control' time_format_to_locale = {'12-hour': 'en_US.UTF-8', '24-hour': 'en_GB.UTF-8'} +tuned_profiles = { + 'power-save': 'powersave', + 'network-latency': 'network-latency', + 'network-throughput': 'network-throughput', + 'virtual-guest': 'virtual-guest', + 'virtual-host': 'virtual-host', +} def get_config(config=None): @@ -79,7 +86,7 @@ def verify(options): if 'source_address' in config: if not is_addr_assigned(config['source_address']): - raise ConfigError('No interface with give address specified!') + raise ConfigError('No interface with given address specified!') if 'ssh_client' in options: config = options['ssh_client'] @@ -115,7 +122,14 @@ def generate(options): render(ssh_config, 'system/ssh_config.j2', options) render(usb_autosuspend, 'system/40_usb_autosuspend.j2', options) + # XXX: This code path and if statements must be kept in sync with the Kernel + # option handling in image_installer.py:get_cli_kernel_options(). This + # occurance is used for having the appropriate options passed to GRUB + # when re-configuring options on the CLI. cmdline_options = [] + kernel_opts = options.get('kernel', {}) + k_cpu_opts = kernel_opts.get('cpu', {}) + k_memory_opts = kernel_opts.get('memory', {}) if 'kernel' in options: if 'disable_mitigations' in options['kernel']: cmdline_options.append('mitigations=off') @@ -124,8 +138,51 @@ def generate(options): if 'amd_pstate_driver' in options['kernel']: mode = options['kernel']['amd_pstate_driver'] cmdline_options.append( - f'initcall_blacklist=acpi_cpufreq_init amd_pstate={mode}' - ) + f'initcall_blacklist=acpi_cpufreq_init amd_pstate={mode}') + if 'quiet' in options['kernel']: + cmdline_options.append('quiet') + + if 'disable_hpet' in kernel_opts: + cmdline_options.append('hpet=disable') + + if 'disable_mce' in kernel_opts: + cmdline_options.append('mce=off') + + if 'disable_softlockup' in kernel_opts: + cmdline_options.append('nosoftlockup') + + # CPU options + isol_cpus = k_cpu_opts.get('isolate_cpus') + if isol_cpus: + cmdline_options.append(f'isolcpus={isol_cpus}') + + nohz_full = k_cpu_opts.get('nohz_full') + if nohz_full: + cmdline_options.append(f'nohz_full={nohz_full}') + + rcu_nocbs = k_cpu_opts.get('rcu_no_cbs') + if rcu_nocbs: + cmdline_options.append(f'rcu_nocbs={rcu_nocbs}') + + if 'disable_nmi_watchdog' in k_cpu_opts: + cmdline_options.append('nmi_watchdog=0') + + # Memory options + if 'disable_numa_balancing' in k_memory_opts: + cmdline_options.append('numa_balancing=disable') + + default_hp_size = k_memory_opts.get('default_hugepage_size') + if default_hp_size: + cmdline_options.append(f'default_hugepagesz={default_hp_size}') + + hp_sizes = k_memory_opts.get('hugepage_size') + if hp_sizes: + for size, settings in hp_sizes.items(): + cmdline_options.append(f'hugepagesz={size}') + count = settings.get('hugepage_count') + if count: + cmdline_options.append(f'hugepages={count}') + grub_util.update_kernel_cmdline_options(' '.join(cmdline_options)) return None @@ -171,7 +228,10 @@ def apply(options): # wait until daemon has started before sending configuration while not is_systemd_service_running('tuned.service'): sleep(0.250) - cmd('tuned-adm profile network-{performance}'.format(**options)) + performance = ' '.join( + list(tuned_profiles[profile] for profile in options['performance']) + ) + cmd(f'tuned-adm profile {performance}') else: cmd('systemctl stop tuned.service') diff --git a/src/conf_mode/system_sflow.py b/src/conf_mode/system_sflow.py index 41119b494..a22dac36f 100755 --- a/src/conf_mode/system_sflow.py +++ b/src/conf_mode/system_sflow.py @@ -54,7 +54,7 @@ def verify(sflow): # Check if configured sflow agent-address exist in the system if 'agent_address' in sflow: tmp = sflow['agent_address'] - if not is_addr_assigned(tmp): + if not is_addr_assigned(tmp, include_vrf=True): raise ConfigError( f'Configured "sflow agent-address {tmp}" does not exist in the system!' ) diff --git a/src/conf_mode/system_syslog.py b/src/conf_mode/system_syslog.py index 2497c5bb6..bdab09f3c 100755 --- a/src/conf_mode/system_syslog.py +++ b/src/conf_mode/system_syslog.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2024 VyOS maintainers and contributors +# Copyright (C) 2018-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -20,17 +20,22 @@ from sys import exit from vyos.base import Warning from vyos.config import Config -from vyos.configdict import is_node_changed from vyos.configverify import verify_vrf +from vyos.defaults import systemd_services +from vyos.utils.network import is_addr_assigned from vyos.utils.process import call from vyos.template import render +from vyos.template import is_ipv4 +from vyos.template import is_ipv6 from vyos import ConfigError from vyos import airbag airbag.enable() -rsyslog_conf = '/etc/rsyslog.d/00-vyos.conf' +rsyslog_conf = '/run/rsyslog/rsyslog.conf' logrotate_conf = '/etc/logrotate.d/vyos-rsyslog' -systemd_override = r'/run/systemd/system/rsyslog.service.d/override.conf' + +systemd_socket = 'syslog.socket' +systemd_service = systemd_services['syslog'] def get_config(config=None): if config: @@ -46,12 +51,17 @@ def get_config(config=None): syslog.update({ 'logrotate' : logrotate_conf }) - tmp = is_node_changed(conf, base + ['vrf']) - if tmp: syslog.update({'restart_required': {}}) - syslog = conf.merge_defaults(syslog, recursive=True) - if syslog.from_defaults(['global']): - del syslog['global'] + if syslog.from_defaults(['local']): + del syslog['local'] + + if 'preserve_fqdn' in syslog: + if conf.exists(['system', 'host-name']): + tmp = conf.return_value(['system', 'host-name']) + syslog['preserve_fqdn']['host_name'] = tmp + if conf.exists(['system', 'domain-name']): + tmp = conf.return_value(['system', 'domain-name']) + syslog['preserve_fqdn']['domain_name'] = tmp return syslog @@ -59,13 +69,33 @@ def verify(syslog): if not syslog: return None - if 'host' in syslog: - for host, host_options in syslog['host'].items(): - if 'protocol' in host_options and host_options['protocol'] == 'udp': - if 'format' in host_options and 'octet_counted' in host_options['format']: - Warning(f'Syslog UDP transport for "{host}" should not use octet-counted format!') - - verify_vrf(syslog) + if 'preserve_fqdn' in syslog: + if 'host_name' not in syslog['preserve_fqdn']: + Warning('No "system host-name" defined - cannot set syslog FQDN!') + if 'domain_name' not in syslog['preserve_fqdn']: + Warning('No "system domain-name" defined - cannot set syslog FQDN!') + + if 'remote' in syslog: + for remote, remote_options in syslog['remote'].items(): + if 'protocol' in remote_options and remote_options['protocol'] == 'udp': + if 'format' in remote_options and 'octet_counted' in remote_options['format']: + Warning(f'Syslog UDP transport for "{remote}" should not use octet-counted format!') + + if 'vrf' in remote_options: + verify_vrf(remote_options) + + if 'source_address' in remote_options: + vrf = None + if 'vrf' in remote_options: + vrf = remote_options['vrf'] + if not is_addr_assigned(remote_options['source_address'], vrf): + raise ConfigError('No interface with given address specified!') + + source_address = remote_options['source_address'] + if ((is_ipv4(remote) and is_ipv6(source_address)) or + (is_ipv6(remote) and is_ipv4(source_address))): + raise ConfigError(f'Source-address "{source_address}" does not match '\ + f'address-family of remote "{remote}"!') def generate(syslog): if not syslog: @@ -77,26 +107,15 @@ def generate(syslog): return None render(rsyslog_conf, 'rsyslog/rsyslog.conf.j2', syslog) - render(systemd_override, 'rsyslog/override.conf.j2', syslog) render(logrotate_conf, 'rsyslog/logrotate.j2', syslog) - - # Reload systemd manager configuration - call('systemctl daemon-reload') return None def apply(syslog): - systemd_socket = 'syslog.socket' - systemd_service = 'syslog.service' if not syslog: call(f'systemctl stop {systemd_service} {systemd_socket}') return None - # we need to restart the service if e.g. the VRF name changed - systemd_action = 'reload-or-restart' - if 'restart_required' in syslog: - systemd_action = 'restart' - - call(f'systemctl {systemd_action} {systemd_service}') + call(f'systemctl reload-or-restart {systemd_service}') return None if __name__ == '__main__': diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py index ca0c3657f..2754314f7 100755 --- a/src/conf_mode/vpn_ipsec.py +++ b/src/conf_mode/vpn_ipsec.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2024 VyOS maintainers and contributors +# Copyright (C) 2021-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -64,6 +64,7 @@ swanctl_dir = '/etc/swanctl' charon_conf = '/etc/strongswan.d/charon.conf' charon_dhcp_conf = '/etc/strongswan.d/charon/dhcp.conf' charon_radius_conf = '/etc/strongswan.d/charon/eap-radius.conf' +charon_systemd_conf = '/etc/strongswan.d/charon-systemd.conf' interface_conf = '/etc/strongswan.d/interfaces_use.conf' swanctl_conf = f'{swanctl_dir}/swanctl.conf' @@ -86,8 +87,6 @@ def get_config(config=None): conf = Config() base = ['vpn', 'ipsec'] l2tp_base = ['vpn', 'l2tp', 'remote-access', 'ipsec-settings'] - if not conf.exists(base): - return None # retrieve common dictionary keys ipsec = conf.get_config_dict(base, key_mangling=('-', '_'), @@ -95,6 +94,14 @@ def get_config(config=None): get_first_key=True, with_pki=True) + ipsec['nhrp_exists'] = conf.exists(['protocols', 'nhrp', 'tunnel']) + if ipsec['nhrp_exists']: + set_dependents('nhrp', conf) + + if not conf.exists(base): + ipsec.update({'deleted' : ''}) + return ipsec + # We have to cleanup the default dict, as default values could # enable features which are not explicitly enabled on the # CLI. E.g. dead-peer-detection defaults should not be injected @@ -115,7 +122,6 @@ def get_config(config=None): ipsec['dhcp_no_address'] = {} ipsec['install_routes'] = 'no' if conf.exists(base + ["options", "disable-route-autoinstall"]) else default_install_routes ipsec['interface_change'] = leaf_node_changed(conf, base + ['interface']) - ipsec['nhrp_exists'] = conf.exists(['protocols', 'nhrp', 'tunnel']) if ipsec['nhrp_exists']: set_dependents('nhrp', conf) @@ -151,6 +157,8 @@ def get_config(config=None): _, vti = get_interface_dict(conf, ['interfaces', 'vti'], vti_interface) ipsec['vti_interface_dicts'][vti_interface] = vti + ipsec['vpp_ipsec_exists'] = conf.exists(['vpp', 'settings', 'ipsec']) + return ipsec def get_dhcp_address(iface): @@ -196,8 +204,8 @@ def verify_pki_rsa(pki, rsa_conf): return True def verify(ipsec): - if not ipsec: - return None + if not ipsec or 'deleted' in ipsec: + return if 'authentication' in ipsec: if 'psk' in ipsec['authentication']: @@ -214,6 +222,19 @@ def verify(ipsec): else: verify_interface_exists(ipsec, interface) + # need to use a pseudo-random function (PRF) with an authenticated encryption algorithm. + # If a hash algorithm is defined then it will be mapped to an equivalent PRF + if 'ike_group' in ipsec: + for _, ike_config in ipsec['ike_group'].items(): + for proposal, proposal_config in ike_config.get('proposal', {}).items(): + if 'encryption' in proposal_config and 'prf' not in proposal_config: + # list of hash algorithms that cannot be mapped to an equivalent PRF + algs = ['aes128gmac', 'aes192gmac', 'aes256gmac', 'sha256_96'] + if 'hash' in proposal_config and proposal_config['hash'] in algs: + raise ConfigError( + f"A PRF algorithm is mandatory in IKE proposal {proposal}" + ) + if 'l2tp' in ipsec: if 'esp_group' in ipsec['l2tp']: if 'esp_group' not in ipsec or ipsec['l2tp']['esp_group'] not in ipsec['esp_group']: @@ -466,6 +487,17 @@ def verify(ipsec): else: raise ConfigError(f"Missing ike-group on site-to-site peer {peer}") + # verify encryption algorithm compatibility for IKE with VPP + if ipsec['vpp_ipsec_exists']: + ike_group = ipsec['ike_group'][peer_conf['ike_group']] + for proposal, proposal_config in ike_group.get('proposal', {}).items(): + algs = ['gmac', 'serpent', 'twofish'] + if any(alg in proposal_config['encryption'] for alg in algs): + raise ConfigError( + f'Encryption algorithm {proposal_config["encryption"]} cannot be used ' + f'for IKE proposal {proposal} for site-to-site peer {peer} with VPP' + ) + if 'authentication' not in peer_conf or 'mode' not in peer_conf['authentication']: raise ConfigError(f"Missing authentication on site-to-site peer {peer}") @@ -544,7 +576,7 @@ def verify(ipsec): esp_group_name = tunnel_conf['esp_group'] if 'esp_group' in tunnel_conf else peer_conf['default_esp_group'] - if esp_group_name not in ipsec['esp_group']: + if esp_group_name not in ipsec.get('esp_group'): raise ConfigError(f"Invalid esp-group on tunnel {tunnel} for site-to-site peer {peer}") esp_group = ipsec['esp_group'][esp_group_name] @@ -556,6 +588,18 @@ def verify(ipsec): if ('local' in tunnel_conf and 'prefix' in tunnel_conf['local']) or ('remote' in tunnel_conf and 'prefix' in tunnel_conf['remote']): raise ConfigError(f"Local/remote prefix cannot be used with ESP transport mode on tunnel {tunnel} for site-to-site peer {peer}") + # verify ESP encryption algorithm compatibility with VPP + # because Marvel plugin for VPP doesn't support all algorithms that Strongswan does + if ipsec['vpp_ipsec_exists']: + for proposal, proposal_config in esp_group.get('proposal', {}).items(): + algs = ['aes128', 'aes192', 'aes256', 'aes128gcm128', 'aes192gcm128', 'aes256gcm128'] + if proposal_config['encryption'] not in algs: + raise ConfigError( + f'Encryption algorithm {proposal_config["encryption"]} cannot be used ' + f'for ESP proposal {proposal} on tunnel {tunnel} for site-to-site peer {peer} with VPP' + ) + + def cleanup_pki_files(): for path in [CERT_PATH, CA_PATH, CRL_PATH, KEY_PATH, PUBKEY_PATH]: if not os.path.exists(path): @@ -611,7 +655,7 @@ def generate_pki_files_rsa(pki, rsa_conf): def generate(ipsec): cleanup_pki_files() - if not ipsec: + if not ipsec or 'deleted' in ipsec: for config_file in [charon_dhcp_conf, charon_radius_conf, interface_conf, swanctl_conf]: if os.path.isfile(config_file): os.unlink(config_file) @@ -702,21 +746,19 @@ def generate(ipsec): render(charon_conf, 'ipsec/charon.j2', ipsec) render(charon_dhcp_conf, 'ipsec/charon/dhcp.conf.j2', ipsec) render(charon_radius_conf, 'ipsec/charon/eap-radius.conf.j2', ipsec) + render(charon_systemd_conf, 'ipsec/charon_systemd.conf.j2', ipsec) render(interface_conf, 'ipsec/interfaces_use.conf.j2', ipsec) render(swanctl_conf, 'ipsec/swanctl.conf.j2', ipsec) def apply(ipsec): systemd_service = 'strongswan.service' - if not ipsec: + if not ipsec or 'deleted' in ipsec: call(f'systemctl stop {systemd_service}') - if vti_updown_db_exists(): remove_vti_updown_db() - else: call(f'systemctl reload-or-restart {systemd_service}') - if ipsec['enabled_vti_interfaces']: with open_vti_updown_db_for_create_or_update() as db: db.removeAllOtherInterfaces(ipsec['enabled_vti_interfaces']) @@ -724,7 +766,7 @@ def apply(ipsec): db.commit(lambda interface: ipsec['vti_interface_dicts'][interface]) elif vti_updown_db_exists(): remove_vti_updown_db() - + if ipsec: if ipsec.get('nhrp_exists', False): try: call_dependents() @@ -733,7 +775,6 @@ def apply(ipsec): # ConfigError("ConfigError('Interface ethN requires an IP address!')") pass - if __name__ == '__main__': try: ipsec = get_config() diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py index 72b178c89..8baf55857 100755 --- a/src/conf_mode/vrf.py +++ b/src/conf_mode/vrf.py @@ -19,23 +19,23 @@ from jmespath import search from json import loads from vyos.config import Config -from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_route_map from vyos.firewall import conntrack_required +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos.ifconfig import Interface from vyos.template import render -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_vrf_tableid from vyos.utils.network import get_vrf_members from vyos.utils.network import interface_exists from vyos.utils.process import call from vyos.utils.process import cmd +from vyos.utils.process import is_systemd_service_running from vyos.utils.process import popen from vyos.utils.system import sysctl_write from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() @@ -132,15 +132,9 @@ def get_config(config=None): if 'name' in vrf: vrf['conntrack'] = conntrack_required(conf) - # We also need the route-map information from the config - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], - get_first_key=True)}} - - # Merge policy dict into "regular" config dict - vrf = dict_merge(tmp, vrf) + # We need to merge the FRR rendering dict into the VRF dict + # this is required to get the route-map information to FRR + vrf.update({'frr_dict' : get_frrender_dict(conf)}) return vrf def verify(vrf): @@ -155,9 +149,11 @@ def verify(vrf): f'static routes installed!') if 'name' in vrf: - reserved_names = ["add", "all", "broadcast", "default", "delete", "dev", - "get", "inet", "mtu", "link", "type", "vrf"] + reserved_names = ['add', 'all', 'broadcast', 'default', 'delete', 'dev', + 'down', 'get', 'inet', 'link', 'mtu', 'type', 'up', 'vrf'] + table_ids = [] + vnis = [] for name, vrf_config in vrf['name'].items(): # Reserved VRF names if name in reserved_names: @@ -178,17 +174,24 @@ def verify(vrf): raise ConfigError(f'VRF "{name}" table id is not unique!') table_ids.append(vrf_config['table']) + # VRF VNIs must be unique on the system + if 'vni' in vrf_config: + vni = vrf_config['vni'] + if vni in vnis: + raise ConfigError(f'VRF "{name}" VNI "{vni}" is not unique!') + vnis.append(vni) + tmp = dict_search('ip.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: - verify_route_map(protocol_options['route_map'], vrf) + verify_route_map(protocol_options['route_map'], vrf['frr_dict']) tmp = dict_search('ipv6.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: - verify_route_map(protocol_options['route_map'], vrf) + verify_route_map(protocol_options['route_map'], vrf['frr_dict']) return None @@ -196,8 +199,9 @@ def verify(vrf): def generate(vrf): # Render iproute2 VR helper names render(config_file, 'iproute2/vrf.conf.j2', vrf) - # Render VRF Kernel/Zebra route-map filters - vrf['frr_zebra_config'] = render_to_string('frr/zebra.vrf.route-map.frr.j2', vrf) + + if 'frr_dict' in vrf and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(vrf['frr_dict']) return None @@ -339,17 +343,8 @@ def apply(vrf): if has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule del pref 2000 l3mdev unreachable') - # Apply FRR filters - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(f'^vrf .+', stop_pattern='^exit-vrf', remove_stop_mark=True) - if 'frr_zebra_config' in vrf: - frr_cfg.add_before(frr.default_add_before, vrf['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + if 'frr_dict' in vrf and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None diff --git a/src/etc/dhcp/dhclient-enter-hooks.d/06-vyos-nodefaultroute b/src/etc/dhcp/dhclient-enter-hooks.d/06-vyos-nodefaultroute new file mode 100644 index 000000000..38f674276 --- /dev/null +++ b/src/etc/dhcp/dhclient-enter-hooks.d/06-vyos-nodefaultroute @@ -0,0 +1,20 @@ +# Don't add default route if no-default-route is configured for interface + +# As configuration is not available to cli-shell-api at the first boot, we must use vyos.config, which contains a workaround for this +function get_no_default_route { +python3 - <<PYEND +from vyos.config import Config +import os + +config = Config() +if config.exists('interfaces'): + iface_types = config.list_nodes('interfaces') + for iface_type in iface_types: + if config.exists("interfaces {} {} dhcp-options no-default-route".format(iface_type, os.environ['interface'])): + print("True") +PYEND +} + +if [[ "$(get_no_default_route)" == 'True' ]]; then + new_routers="" +fi diff --git a/src/etc/netplug/vyos-netplug-dhcp-client b/src/etc/netplug/vyos-netplug-dhcp-client index 55d15a163..a230fe900 100755 --- a/src/etc/netplug/vyos-netplug-dhcp-client +++ b/src/etc/netplug/vyos-netplug-dhcp-client @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2023 VyOS maintainers and contributors <maintainers@vyos.io> +# Copyright 2023-2025 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -19,44 +19,53 @@ import sys from time import sleep -from vyos.configquery import ConfigTreeQuery +from vyos.config import Config from vyos.ifconfig import Section from vyos.utils.boot import boot_configuration_complete +from vyos.utils.process import cmd +from vyos.utils.process import is_systemd_service_active from vyos.utils.commit import commit_in_progress -from vyos.utils.process import call from vyos import airbag + airbag.enable() if len(sys.argv) < 3: - airbag.noteworthy("Must specify both interface and link status!") + airbag.noteworthy('Must specify both interface and link status!') sys.exit(1) if not boot_configuration_complete(): - airbag.noteworthy("System bootup not yet finished...") + airbag.noteworthy('System bootup not yet finished...') sys.exit(1) +interface = sys.argv[1] + while commit_in_progress(): - sleep(1) + sleep(0.250) -interface = sys.argv[1] in_out = sys.argv[2] -config = ConfigTreeQuery() +config = Config() interface_path = ['interfaces'] + Section.get_config_path(interface).split() -for _, interface_config in config.get_config_dict(interface_path).items(): - # Bail out early if we do not have an IP address configured - if 'address' not in interface_config: - continue - # Bail out early if interface ist administrative down - if 'disable' in interface_config: - continue - systemd_action = 'start' - if in_out == 'out': - systemd_action = 'stop' - # Start/Stop DHCP service - if 'dhcp' in interface_config['address']: - call(f'systemctl {systemd_action} dhclient@{interface}.service') - # Start/Stop DHCPv6 service - if 'dhcpv6' in interface_config['address']: - call(f'systemctl {systemd_action} dhcp6c@{interface}.service') +systemdV4_service = f'dhclient@{interface}.service' +systemdV6_service = f'dhcp6c@{interface}.service' +if in_out == 'out': + # Interface moved state to down + if is_systemd_service_active(systemdV4_service): + cmd(f'systemctl stop {systemdV4_service}') + if is_systemd_service_active(systemdV6_service): + cmd(f'systemctl stop {systemdV6_service}') +elif in_out == 'in': + if config.exists_effective(interface_path + ['address']): + tmp = config.return_effective_values(interface_path + ['address']) + # Always (re-)start the DHCP(v6) client service. If the DHCP(v6) client + # is already running - which could happen if the interface is re- + # configured in operational down state, it will have a backoff + # time increasing while not receiving a DHCP(v6) reply. + # + # To make the interface instantly available, and as for a DHCP(v6) lease + # we will re-start the service and thus cancel the backoff time. + if 'dhcp' in tmp: + cmd(f'systemctl restart {systemdV4_service}') + if 'dhcpv6' in tmp: + cmd(f'systemctl restart {systemdV6_service}') diff --git a/src/etc/ppp/ip-up.d/99-vyos-pppoe-wlb b/src/etc/ppp/ip-up.d/99-vyos-pppoe-wlb new file mode 100755 index 000000000..fff258afa --- /dev/null +++ b/src/etc/ppp/ip-up.d/99-vyos-pppoe-wlb @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# This is a Python hook script which is invoked whenever a PPPoE session goes +# "ip-up". It will call into our vyos.ifconfig library and will then execute +# common tasks for the PPPoE interface. The reason we have to "hook" this is +# that we can not create a pppoeX interface in advance in linux and then connect +# pppd to this already existing interface. + +import os +import signal + +from sys import argv +from sys import exit + +from vyos.defaults import directories + +# When the ppp link comes up, this script is called with the following +# parameters +# $1 the interface name used by pppd (e.g. ppp3) +# $2 the tty device name +# $3 the tty device speed +# $4 the local IP address for the interface +# $5 the remote IP address +# $6 the parameter specified by the 'ipparam' option to pppd + +if (len(argv) < 7): + exit(1) + +wlb_pid_file = '/run/wlb_daemon.pid' + +interface = argv[6] +nexthop = argv[5] + +if not os.path.exists(directories['ppp_nexthop_dir']): + os.mkdir(directories['ppp_nexthop_dir']) + +nexthop_file = os.path.join(directories['ppp_nexthop_dir'], interface) + +with open(nexthop_file, 'w') as f: + f.write(nexthop) + +# Trigger WLB daemon update +if os.path.exists(wlb_pid_file): + with open(wlb_pid_file, 'r') as f: + pid = int(f.read()) + + os.kill(pid, signal.SIGUSR2) diff --git a/src/etc/rsyslog.conf b/src/etc/rsyslog.conf deleted file mode 100644 index b3f41acb6..000000000 --- a/src/etc/rsyslog.conf +++ /dev/null @@ -1,67 +0,0 @@ -################# -#### MODULES #### -################# - -$ModLoad imuxsock # provides support for local system logging -$ModLoad imklog # provides kernel logging support (previously done by rklogd) -#$ModLoad immark # provides --MARK-- message capability - -$OmitLocalLogging off -$SystemLogSocketName /run/systemd/journal/syslog - -$KLogPath /proc/kmsg - -########################### -#### GLOBAL DIRECTIVES #### -########################### - -# Use traditional timestamp format. -# To enable high precision timestamps, comment out the following line. -# A modern-style logfile format similar to TraditionalFileFormat, buth with high-precision timestamps and timezone information -#$ActionFileDefaultTemplate RSYSLOG_FileFormat -# The "old style" default log file format with low-precision timestamps -$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat - -# Filter duplicated messages -$RepeatedMsgReduction on - -# -# Set the default permissions for all log files. -# -$FileOwner root -$FileGroup adm -$FileCreateMode 0640 -$DirCreateMode 0755 -$Umask 0022 - -# -# Stop excessive logging of sudo -# -:msg, contains, " pam_unix(sudo:session): session opened for user root(uid=0) by" stop -:msg, contains, "pam_unix(sudo:session): session closed for user root" stop - -# -# Include all config files in /etc/rsyslog.d/ -# -$IncludeConfig /etc/rsyslog.d/*.conf - -# The lines below cause all listed daemons/processes to be logged into -# /var/log/auth.log, then drops the message so it does not also go to the -# regular syslog so that messages are not duplicated - -$outchannel auth_log,/var/log/auth.log -if $programname == 'CRON' or - $programname == 'sudo' or - $programname == 'su' - then :omfile:$auth_log - -if $programname == 'CRON' or - $programname == 'sudo' or - $programname == 'su' - then stop - -############### -#### RULES #### -############### -# Emergencies are sent to everybody logged in. -*.emerg :omusrmsg:*
\ No newline at end of file diff --git a/src/etc/skel/.bashrc b/src/etc/skel/.bashrc index ba7d50003..f807f0c72 100644 --- a/src/etc/skel/.bashrc +++ b/src/etc/skel/.bashrc @@ -92,6 +92,9 @@ fi #alias la='ls -A' #alias l='ls -CF' +# Disable iproute2 auto color +alias ip="ip --color=never" + # Alias definitions. # You may want to put all your additions into a separate file like # ~/.bash_aliases, instead of adding them here directly. diff --git a/src/etc/sudoers.d/vyos b/src/etc/sudoers.d/vyos index 67d7babc4..198b9b9aa 100644 --- a/src/etc/sudoers.d/vyos +++ b/src/etc/sudoers.d/vyos @@ -1,7 +1,8 @@ # # VyOS modifications to sudo configuration # -Defaults syslog_goodpri=info +Defaults !syslog +Defaults !pam_session Defaults env_keep+=VYATTA_* # diff --git a/src/etc/sysctl.d/30-vyos-router.conf b/src/etc/sysctl.d/30-vyos-router.conf index 76be41ddc..ef81cebac 100644 --- a/src/etc/sysctl.d/30-vyos-router.conf +++ b/src/etc/sysctl.d/30-vyos-router.conf @@ -83,6 +83,16 @@ net.ipv4.conf.default.ignore_routes_with_linkdown=1 net.ipv6.conf.all.ignore_routes_with_linkdown=1 net.ipv6.conf.default.ignore_routes_with_linkdown=1 +# Disable IPv6 interface autoconfigurationnable packet forwarding for IPv6 +net.ipv6.conf.all.autoconf=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.*.autoconf=0 + +# Disable IPv6 router advertisements +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +net.ipv6.conf.*.accept_ra=0 + # Enable packet forwarding for IPv6 net.ipv6.conf.all.forwarding=1 diff --git a/src/etc/systemd/system/fastnetmon.service.d/override.conf b/src/etc/systemd/system/fastnetmon.service.d/override.conf deleted file mode 100644 index 841666070..000000000 --- a/src/etc/systemd/system/fastnetmon.service.d/override.conf +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -RequiresMountsFor=/run -ConditionPathExists=/run/fastnetmon/fastnetmon.conf -After= -After=vyos-router.service - -[Service] -Type=simple -WorkingDirectory=/run/fastnetmon -PIDFile=/run/fastnetmon.pid -ExecStart= -ExecStart=/usr/sbin/fastnetmon --configuration_file /run/fastnetmon/fastnetmon.conf diff --git a/src/etc/systemd/system/frr.service.d/override.conf b/src/etc/systemd/system/frr.service.d/override.conf index 614b4f7ed..a4a73ecd9 100644 --- a/src/etc/systemd/system/frr.service.d/override.conf +++ b/src/etc/systemd/system/frr.service.d/override.conf @@ -3,9 +3,11 @@ After=vyos-router.service [Service] LimitNOFILE=4096 -ExecStartPre=/bin/bash -c 'mkdir -p /run/frr/config; \ +ExecStartPre=/bin/bash -c 'if [ ! -f /run/frr/config/frr.conf ]; then \ + mkdir -p /run/frr/config; \ echo "log syslog" > /run/frr/config/frr.conf; \ echo "log facility local7" >> /run/frr/config/frr.conf; \ chown frr:frr /run/frr/config/frr.conf; \ chmod 664 /run/frr/config/frr.conf; \ - mount --bind /run/frr/config/frr.conf /etc/frr/frr.conf' + mount --bind /run/frr/config/frr.conf /etc/frr/frr.conf; \ +fi;' diff --git a/src/etc/systemd/system/kea-ctrl-agent.service.d/override.conf b/src/etc/systemd/system/kea-ctrl-agent.service.d/override.conf deleted file mode 100644 index 0f5bf801e..000000000 --- a/src/etc/systemd/system/kea-ctrl-agent.service.d/override.conf +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -After= -After=vyos-router.service - -[Service] -ExecStart= -ExecStart=/usr/sbin/kea-ctrl-agent -c /run/kea/kea-ctrl-agent.conf -AmbientCapabilities=CAP_NET_BIND_SERVICE -CapabilityBoundingSet=CAP_NET_BIND_SERVICE diff --git a/src/etc/systemd/system/kea-dhcp-ddns-server.service.d/override.conf b/src/etc/systemd/system/kea-dhcp-ddns-server.service.d/override.conf new file mode 100644 index 000000000..cdfdea8eb --- /dev/null +++ b/src/etc/systemd/system/kea-dhcp-ddns-server.service.d/override.conf @@ -0,0 +1,7 @@ +[Unit] +After= +After=vyos-router.service + +[Service] +ExecStart= +ExecStart=/usr/sbin/kea-dhcp-ddns -c /run/kea/kea-dhcp-ddns.conf diff --git a/src/etc/systemd/system/kea-dhcp4-server.service.d/override.conf b/src/etc/systemd/system/kea-dhcp4-server.service.d/override.conf index 682e5bbce..4a04892c0 100644 --- a/src/etc/systemd/system/kea-dhcp4-server.service.d/override.conf +++ b/src/etc/systemd/system/kea-dhcp4-server.service.d/override.conf @@ -5,3 +5,5 @@ After=vyos-router.service [Service] ExecStart= ExecStart=/usr/sbin/kea-dhcp4 -c /run/kea/kea-dhcp4.conf +ExecStartPost=!/usr/bin/python3 /usr/libexec/vyos/system/sync-dhcp-lease-to-hosts.py --inet +Restart=on-failure diff --git a/src/etc/systemd/system/rsyslog.service.d/override.conf b/src/etc/systemd/system/rsyslog.service.d/override.conf new file mode 100644 index 000000000..665b994d9 --- /dev/null +++ b/src/etc/systemd/system/rsyslog.service.d/override.conf @@ -0,0 +1,10 @@ +[Unit] +StartLimitIntervalSec=0 + +[Service] +ExecStart= +ExecStart=/usr/sbin/rsyslogd -n -iNONE -f /run/rsyslog/rsyslog.conf +Restart=always +RestartPreventExitStatus= +RestartSec=10 +RuntimeDirectoryPreserve=yes diff --git a/src/etc/udev/rules.d/90-vyos-serial.rules b/src/etc/udev/rules.d/90-vyos-serial.rules index 30c1d3170..f86b2258f 100644 --- a/src/etc/udev/rules.d/90-vyos-serial.rules +++ b/src/etc/udev/rules.d/90-vyos-serial.rules @@ -8,7 +8,7 @@ SUBSYSTEMS=="pci", IMPORT{builtin}="hwdb --subsystem=pci" SUBSYSTEMS=="usb", IMPORT{builtin}="usb_id", IMPORT{builtin}="hwdb --subsystem=usb" # /dev/serial/by-path/, /dev/serial/by-id/ for USB devices -KERNEL!="ttyUSB[0-9]*", GOTO="serial_end" +KERNEL!="ttyUSB[0-9]*|ttyACM[0-9]*", GOTO="serial_end" SUBSYSTEMS=="usb-serial", ENV{.ID_PORT}="$attr{port_number}" diff --git a/src/helpers/commit-confirm-notify.py b/src/helpers/commit-confirm-notify.py index 8d7626c78..af6167651 100755 --- a/src/helpers/commit-confirm-notify.py +++ b/src/helpers/commit-confirm-notify.py @@ -2,30 +2,56 @@ import os import sys import time +from argparse import ArgumentParser # Minutes before reboot to trigger notification. intervals = [1, 5, 15, 60] -def notify(interval): - s = "" if interval == 1 else "s" +parser = ArgumentParser() +parser.add_argument( + 'minutes', type=int, help='minutes before rollback to trigger notification' +) +parser.add_argument( + '--reboot', action='store_true', help="use 'soft' rollback instead of reboot" +) + + +def notify(interval, reboot=False): + s = '' if interval == 1 else 's' time.sleep((minutes - interval) * 60) - message = ('"[commit-confirm] System is going to reboot in ' - f'{interval} minute{s} to rollback the last commit.\n' - 'Confirm your changes to cancel the reboot."') - os.system("wall -n " + message) + if reboot: + message = ( + '"[commit-confirm] System will reboot in ' + f'{interval} minute{s}\nto rollback the last commit.\n' + 'Confirm your changes to cancel the reboot."' + ) + os.system('wall -n ' + message) + else: + message = ( + '"[commit-confirm] System will reload previous config in ' + f'{interval} minute{s}\nto rollback the last commit.\n' + 'Confirm your changes to cancel the reload."' + ) + os.system('wall -n ' + message) + -if __name__ == "__main__": +if __name__ == '__main__': # Must be run as root to call wall(1) without a banner. - if len(sys.argv) != 2 or os.getuid() != 0: + if os.getuid() != 0: print('This script requires superuser privileges.', file=sys.stderr) exit(1) - minutes = int(sys.argv[1]) + + args = parser.parse_args() + + minutes = args.minutes + reboot = args.reboot + # Drop the argument from the list so that the notification # doesn't kick in immediately. if minutes in intervals: intervals.remove(minutes) for interval in sorted(intervals, reverse=True): if minutes >= interval: - notify(interval) - minutes -= (minutes - interval) + notify(interval, reboot=reboot) + minutes -= minutes - interval exit(0) diff --git a/src/helpers/geoip-update.py b/src/helpers/geoip-update.py index 34accf2cc..061c95401 100755 --- a/src/helpers/geoip-update.py +++ b/src/helpers/geoip-update.py @@ -25,20 +25,19 @@ def get_config(config=None): conf = config else: conf = ConfigTreeQuery() - base = ['firewall'] - if not conf.exists(base): - return None - - return conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, - no_tag_node_value_mangle=True) + return ( + conf.get_config_dict(['firewall'], key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) if conf.exists(['firewall']) else None, + conf.get_config_dict(['policy'], key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) if conf.exists(['policy']) else None, + ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--force", help="Force update", action="store_true") args = parser.parse_args() - firewall = get_config() - - if not geoip_update(firewall, force=args.force): + firewall, policy = get_config() + if not geoip_update(firewall=firewall, policy=policy, force=args.force): sys.exit(1) diff --git a/src/helpers/latest-image-url.py b/src/helpers/latest-image-url.py new file mode 100755 index 000000000..ea201ef7c --- /dev/null +++ b/src/helpers/latest-image-url.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 + +import sys + +from vyos.configquery import ConfigTreeQuery +from vyos.version import get_remote_version + + +if __name__ == '__main__': + image_path = '' + + config = ConfigTreeQuery() + if config.exists('system update-check url'): + configured_url_version = config.value('system update-check url') + remote_url_list = get_remote_version(configured_url_version) + if remote_url_list: + image_path = remote_url_list[0].get('url') + else: + sys.exit(1) + + print(image_path) diff --git a/src/helpers/show_commit_data.py b/src/helpers/show_commit_data.py new file mode 100755 index 000000000..d507ed9a4 --- /dev/null +++ b/src/helpers/show_commit_data.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# This script is used to show the commit data of the configuration + +import sys +from pathlib import Path +from argparse import ArgumentParser + +from vyos.config_mgmt import ConfigMgmt +from vyos.configtree import ConfigTree +from vyos.configtree import show_commit_data + +cm = ConfigMgmt() + +parser = ArgumentParser( + description='Show commit priority queue; no options compares the last two commits' +) +parser.add_argument('--active-config', help='Path to the active configuration file') +parser.add_argument('--proposed-config', help='Path to the proposed configuration file') +args = parser.parse_args() + +active_arg = args.active_config +proposed_arg = args.proposed_config + +if active_arg and not proposed_arg: + print('--proposed-config is required when --active-config is specified') + sys.exit(1) + +if not active_arg and not proposed_arg: + active = cm.get_config_tree_revision(1) + proposed = cm.get_config_tree_revision(0) +else: + if active_arg: + active = ConfigTree(Path(active_arg).read_text()) + else: + active = cm.get_config_tree_revision(0) + + proposed = ConfigTree(Path(proposed_arg).read_text()) + +ret = show_commit_data(active, proposed) +print(ret) diff --git a/src/helpers/test_commit.py b/src/helpers/test_commit.py new file mode 100755 index 000000000..00a413687 --- /dev/null +++ b/src/helpers/test_commit.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# This script is used to test execution of the commit algorithm by vyos-commitd + +from pathlib import Path +from argparse import ArgumentParser +from datetime import datetime + +from vyos.configtree import ConfigTree +from vyos.configtree import test_commit + + +parser = ArgumentParser( + description='Execute commit priority queue' +) +parser.add_argument( + '--active-config', help='Path to the active configuration file', required=True +) +parser.add_argument( + '--proposed-config', help='Path to the proposed configuration file', required=True +) +args = parser.parse_args() + +active_arg = args.active_config +proposed_arg = args.proposed_config + +active = ConfigTree(Path(active_arg).read_text()) +proposed = ConfigTree(Path(proposed_arg).read_text()) + + +time_begin_commit = datetime.now() +test_commit(active, proposed) +time_end_commit = datetime.now() +print(f'commit time: {time_end_commit - time_begin_commit}') diff --git a/src/helpers/vyos-certbot-renew-pki.sh b/src/helpers/vyos-certbot-renew-pki.sh index d0b663f7b..1c273d2fa 100755 --- a/src/helpers/vyos-certbot-renew-pki.sh +++ b/src/helpers/vyos-certbot-renew-pki.sh @@ -1,3 +1,3 @@ -#!/bin/sh +#!/bin/vbash source /opt/vyatta/etc/functions/script-template /usr/libexec/vyos/conf_mode/pki.py certbot_renew diff --git a/src/helpers/vyos-domain-resolver.py b/src/helpers/vyos-domain-resolver.py deleted file mode 100755 index 57cfcabd7..000000000 --- a/src/helpers/vyos-domain-resolver.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2022-2024 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import json -import time - -from vyos.configdict import dict_merge -from vyos.configquery import ConfigTreeQuery -from vyos.firewall import fqdn_config_parse -from vyos.firewall import fqdn_resolve -from vyos.utils.commit import commit_in_progress -from vyos.utils.dict import dict_search_args -from vyos.utils.process import cmd -from vyos.utils.process import run -from vyos.xml_ref import get_defaults - -base = ['firewall'] -timeout = 300 -cache = False - -domain_state = {} - -ipv4_tables = { - 'ip vyos_mangle', - 'ip vyos_filter', - 'ip vyos_nat', - 'ip raw' -} - -ipv6_tables = { - 'ip6 vyos_mangle', - 'ip6 vyos_filter', - 'ip6 raw' -} - -def get_config(conf): - firewall = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, - no_tag_node_value_mangle=True) - - default_values = get_defaults(base, get_first_key=True) - - firewall = dict_merge(default_values, firewall) - - global timeout, cache - - if 'resolver_interval' in firewall: - timeout = int(firewall['resolver_interval']) - - if 'resolver_cache' in firewall: - cache = True - - fqdn_config_parse(firewall) - - return firewall - -def resolve(domains, ipv6=False): - global domain_state - - ip_list = set() - - for domain in domains: - resolved = fqdn_resolve(domain, ipv6=ipv6) - - if resolved and cache: - domain_state[domain] = resolved - elif not resolved: - if domain not in domain_state: - continue - resolved = domain_state[domain] - - ip_list = ip_list | resolved - return ip_list - -def nft_output(table, set_name, ip_list): - output = [f'flush set {table} {set_name}'] - if ip_list: - ip_str = ','.join(ip_list) - output.append(f'add element {table} {set_name} {{ {ip_str} }}') - return output - -def nft_valid_sets(): - try: - valid_sets = [] - sets_json = cmd('nft --json list sets') - sets_obj = json.loads(sets_json) - - for obj in sets_obj['nftables']: - if 'set' in obj: - family = obj['set']['family'] - table = obj['set']['table'] - name = obj['set']['name'] - valid_sets.append((f'{family} {table}', name)) - - return valid_sets - except: - return [] - -def update(firewall): - conf_lines = [] - count = 0 - - valid_sets = nft_valid_sets() - - domain_groups = dict_search_args(firewall, 'group', 'domain_group') - if domain_groups: - for set_name, domain_config in domain_groups.items(): - if 'address' not in domain_config: - continue - - nft_set_name = f'D_{set_name}' - domains = domain_config['address'] - - ip_list = resolve(domains, ipv6=False) - for table in ipv4_tables: - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) - - ip6_list = resolve(domains, ipv6=True) - for table in ipv6_tables: - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip6_list) - count += 1 - - for set_name, domain in firewall['ip_fqdn'].items(): - table = 'ip vyos_filter' - nft_set_name = f'FQDN_{set_name}' - - ip_list = resolve([domain], ipv6=False) - - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) - count += 1 - - for set_name, domain in firewall['ip6_fqdn'].items(): - table = 'ip6 vyos_filter' - nft_set_name = f'FQDN_{set_name}' - - ip_list = resolve([domain], ipv6=True) - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) - count += 1 - - nft_conf_str = "\n".join(conf_lines) + "\n" - code = run(f'nft --file -', input=nft_conf_str) - - print(f'Updated {count} sets - result: {code}') - -if __name__ == '__main__': - print(f'VyOS domain resolver') - - count = 1 - while commit_in_progress(): - if ( count % 60 == 0 ): - print(f'Commit still in progress after {count}s - waiting') - count += 1 - time.sleep(1) - - conf = ConfigTreeQuery() - firewall = get_config(conf) - - print(f'interval: {timeout}s - cache: {cache}') - - while True: - update(firewall) - time.sleep(timeout) diff --git a/src/helpers/vyos-load-balancer.py b/src/helpers/vyos-load-balancer.py new file mode 100755 index 000000000..30329fd5c --- /dev/null +++ b/src/helpers/vyos-load-balancer.py @@ -0,0 +1,312 @@ +#!/usr/bin/python3 + +# Copyright 2024-2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. If not, see <http://www.gnu.org/licenses/>. + +import json +import os +import signal +import sys +import time + +from vyos.config import Config +from vyos.template import render +from vyos.utils.commit import commit_in_progress +from vyos.utils.network import get_interface_address +from vyos.utils.process import rc_cmd +from vyos.utils.process import run +from vyos.xml_ref import get_defaults +from vyos.wanloadbalance import health_ping_host +from vyos.wanloadbalance import health_ping_host_ttl +from vyos.wanloadbalance import parse_dhcp_nexthop +from vyos.wanloadbalance import parse_ppp_nexthop + +nftables_wlb_conf = '/run/nftables_wlb.conf' +wlb_status_file = '/run/wlb_status.json' +wlb_pid_file = '/run/wlb_daemon.pid' +sleep_interval = 5 # Main loop sleep interval + +def health_check(ifname, conf, state, test_defaults): + # Run health tests for interface + + if get_ipv4_address(ifname) is None: + return False + + if 'test' not in conf: + resp_time = test_defaults['resp-time'] + target = conf['nexthop'] + + if target == 'dhcp': + target = state['dhcp_nexthop'] + + if not target: + return False + + return health_ping_host(target, ifname, wait_time=resp_time) + + for test_id, test_conf in conf['test'].items(): + check_type = test_conf['type'] + + if check_type == 'ping': + resp_time = test_conf['resp_time'] + target = test_conf['target'] + if not health_ping_host(target, ifname, wait_time=resp_time): + return False + elif check_type == 'ttl': + target = test_conf['target'] + ttl_limit = test_conf['ttl_limit'] + if not health_ping_host_ttl(target, ifname, ttl_limit=ttl_limit): + return False + elif check_type == 'user-defined': + script = test_conf['test_script'] + rc = run(script) + if rc != 0: + return False + + return True + +def on_state_change(lb, ifname, state): + # Run hook on state change + if 'hook' in lb: + script_path = os.path.join('/config/scripts/', lb['hook']) + env = { + 'WLB_INTERFACE_NAME': ifname, + 'WLB_INTERFACE_STATE': 'ACTIVE' if state else 'FAILED' + } + + code = run(script_path, env=env) + if code != 0: + print('WLB hook returned non-zero error code') + + print(f'INFO: State change: {ifname} -> {state}') + +def get_ipv4_address(ifname): + # Get primary ipv4 address on interface (for source nat) + addr_json = get_interface_address(ifname) + if addr_json and 'addr_info' in addr_json and len(addr_json['addr_info']) > 0: + for addr_info in addr_json['addr_info']: + if addr_info['family'] == 'inet': + if 'local' in addr_info: + return addr_json['addr_info'][0]['local'] + return None + +def dynamic_nexthop_update(lb, ifname): + # Update on DHCP/PPP address/nexthop changes + # Return True if nftables needs to be updated - IP change + + if 'dhcp_nexthop' in lb['health_state'][ifname]: + if ifname[:5] == 'pppoe': + dhcp_nexthop_addr = parse_ppp_nexthop(ifname) + else: + dhcp_nexthop_addr = parse_dhcp_nexthop(ifname) + + table_num = lb['health_state'][ifname]['table_number'] + + if dhcp_nexthop_addr and lb['health_state'][ifname]['dhcp_nexthop'] != dhcp_nexthop_addr: + lb['health_state'][ifname]['dhcp_nexthop'] = dhcp_nexthop_addr + run(f'ip route replace table {table_num} default dev {ifname} via {dhcp_nexthop_addr}') + + if_addr = get_ipv4_address(ifname) + if if_addr and if_addr != lb['health_state'][ifname]['if_addr']: + lb['health_state'][ifname]['if_addr'] = if_addr + return True + + return False + +def nftables_update(lb): + # Atomically reload nftables table from template + if not os.path.exists(nftables_wlb_conf): + lb['first_install'] = True + elif 'first_install' in lb: + del lb['first_install'] + + render(nftables_wlb_conf, 'load-balancing/nftables-wlb.j2', lb) + + rc, out = rc_cmd(f'nft -f {nftables_wlb_conf}') + + if rc != 0: + print('ERROR: Failed to apply WLB nftables config') + print('Output:', out) + return False + + return True + +def cleanup(lb): + if 'interface_health' in lb: + index = 1 + for ifname, health_conf in lb['interface_health'].items(): + table_num = lb['mark_offset'] + index + run(f'ip route del table {table_num} default') + run(f'ip rule del fwmark {hex(table_num)} table {table_num}') + index += 1 + + run(f'nft delete table ip vyos_wanloadbalance') + +def get_config(): + conf = Config() + base = ['load-balancing', 'wan'] + lb = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, with_recursive_defaults=True) + + lb['test_defaults'] = get_defaults(base + ['interface-health', 'A', 'test', 'B'], get_first_key=True) + + return lb + +if __name__ == '__main__': + while commit_in_progress(): + print("Notice: Waiting for commit to complete...") + time.sleep(1) + + lb = get_config() + + lb['health_state'] = {} + lb['mark_offset'] = 0xc8 + + # Create state dicts, interface address and nexthop, install routes and ip rules + if 'interface_health' in lb: + index = 1 + for ifname, health_conf in lb['interface_health'].items(): + table_num = lb['mark_offset'] + index + addr = get_ipv4_address(ifname) + lb['health_state'][ifname] = { + 'if_addr': addr, + 'failure_count': 0, + 'success_count': 0, + 'last_success': 0, + 'last_failure': 0, + 'state': addr is not None, + 'state_changed': False, + 'table_number': table_num, + 'mark': hex(table_num) + } + + if health_conf['nexthop'] == 'dhcp': + lb['health_state'][ifname]['dhcp_nexthop'] = None + + dynamic_nexthop_update(lb, ifname) + else: + run(f'ip route replace table {table_num} default dev {ifname} via {health_conf["nexthop"]}') + + run(f'ip rule add fwmark {hex(table_num)} table {table_num}') + + index += 1 + + nftables_update(lb) + + run('ip route flush cache') + + if 'flush_connections' in lb: + run('conntrack --delete') + run('conntrack -F expect') + + with open(wlb_status_file, 'w') as f: + f.write(json.dumps(lb['health_state'])) + + # Signal handler SIGUSR2 -> dhcpcd update + def handle_sigusr2(signum, frame): + for ifname, health_conf in lb['interface_health'].items(): + if 'nexthop' in health_conf and health_conf['nexthop'] == 'dhcp': + retval = dynamic_nexthop_update(lb, ifname) + + if retval: + nftables_update(lb) + + # Signal handler SIGTERM -> exit + def handle_sigterm(signum, frame): + if os.path.exists(wlb_status_file): + os.unlink(wlb_status_file) + + if os.path.exists(wlb_pid_file): + os.unlink(wlb_pid_file) + + if os.path.exists(nftables_wlb_conf): + os.unlink(nftables_wlb_conf) + + cleanup(lb) + sys.exit(0) + + signal.signal(signal.SIGUSR2, handle_sigusr2) + signal.signal(signal.SIGINT, handle_sigterm) + signal.signal(signal.SIGTERM, handle_sigterm) + + with open(wlb_pid_file, 'w') as f: + f.write(str(os.getpid())) + + # Main loop + + try: + while True: + ip_change = False + + if 'interface_health' in lb: + for ifname, health_conf in lb['interface_health'].items(): + state = lb['health_state'][ifname] + + result = health_check(ifname, health_conf, state=state, test_defaults=lb['test_defaults']) + + state_changed = result != state['state'] + state['state_changed'] = False + + if result: + state['failure_count'] = 0 + state['success_count'] += 1 + state['last_success'] = time.time() + if state_changed and state['success_count'] >= int(health_conf['success_count']): + state['state'] = True + state['state_changed'] = True + elif not result: + state['failure_count'] += 1 + state['success_count'] = 0 + state['last_failure'] = time.time() + if state_changed and state['failure_count'] >= int(health_conf['failure_count']): + state['state'] = False + state['state_changed'] = True + + if state['state_changed']: + state['if_addr'] = get_ipv4_address(ifname) + on_state_change(lb, ifname, state['state']) + + if dynamic_nexthop_update(lb, ifname): + ip_change = True + + if any(state['state_changed'] for ifname, state in lb['health_state'].items()): + if not nftables_update(lb): + break + + run('ip route flush cache') + + if 'flush_connections' in lb: + run('conntrack --delete') + run('conntrack -F expect') + + with open(wlb_status_file, 'w') as f: + f.write(json.dumps(lb['health_state'])) + elif ip_change: + nftables_update(lb) + + time.sleep(sleep_interval) + except Exception as e: + print('WLB ERROR:', e) + + if os.path.exists(wlb_status_file): + os.unlink(wlb_status_file) + + if os.path.exists(wlb_pid_file): + os.unlink(wlb_pid_file) + + if os.path.exists(nftables_wlb_conf): + os.unlink(nftables_wlb_conf) + + cleanup(lb) diff --git a/src/init/vyos-router b/src/init/vyos-router index 8825cc16a..6f1d386d6 100755 --- a/src/init/vyos-router +++ b/src/init/vyos-router @@ -24,6 +24,8 @@ declare action=$1; shift declare -x BOOTFILE=$vyatta_sysconfdir/config/config.boot declare -x DEFAULT_BOOTFILE=$vyatta_sysconfdir/config.boot.default +declare -x VYCONF_CONFIG_DIR=/usr/libexec/vyos/vyconf/config + # If vyos-config= boot option is present, use that file instead for x in $(cat /proc/cmdline); do [[ $x = vyos-config=* ]] || continue @@ -146,6 +148,10 @@ init_bootfile () { chgrp ${GROUP} $BOOTFILE chmod 660 $BOOTFILE fi + if [ -d $VYCONF_CONFIG_DIR ] ; then + cp -f $BOOTFILE $VYCONF_CONFIG_DIR/config.boot + cp -f $DEFAULT_BOOTFILE $VYCONF_CONFIG_DIR/config.failsafe + fi } # if necessary, migrate initial config @@ -154,6 +160,10 @@ migrate_bootfile () if [ -x $vyos_libexec_dir/run-config-migration.py ]; then log_progress_msg migrate sg ${GROUP} -c "$vyos_libexec_dir/run-config-migration.py $BOOTFILE" + # update vyconf copy after migration + if [ -d $VYCONF_CONFIG_DIR ] ; then + cp -f $BOOTFILE $VYCONF_CONFIG_DIR/config.boot + fi fi } @@ -449,8 +459,17 @@ start () nfct helper add tns inet6 tcp nft --file /usr/share/vyos/vyos-firewall-init.conf || log_failure_msg "could not initiate firewall rules" + # Ensure rsyslog is the default syslog daemon + SYSTEMD_SYSLOG="/etc/systemd/system/syslog.service" + SYSTEMD_RSYSLOG="/lib/systemd/system/rsyslog.service" + if [ ! -L ${SYSTEMD_SYSLOG} ] || [ "$(readlink -f ${SYSTEMD_SYSLOG})" != "${SYSTEMD_RSYSLOG}" ]; then + ln -sf ${SYSTEMD_RSYSLOG} ${SYSTEMD_SYSLOG} + systemctl daemon-reload + fi + # As VyOS does not execute commands that are not present in the CLI we call # the script by hand to have a single source for the login banner and MOTD + ${vyos_conf_scripts_dir}/system_syslog.py || log_failure_msg "could not reset syslog" ${vyos_conf_scripts_dir}/system_console.py || log_failure_msg "could not reset serial console" ${vyos_conf_scripts_dir}/system_login_banner.py || log_failure_msg "could not reset motd and issue files" ${vyos_conf_scripts_dir}/system_option.py || log_failure_msg "could not reset system option files" @@ -464,13 +483,20 @@ start () # enable some debugging before loading the configuration if grep -q vyos-debug /proc/cmdline; then log_action_begin_msg "Enable runtime debugging options" + FRR_DEBUG=$(python3 -c "from vyos.defaults import frr_debug_enable; print(frr_debug_enable)") + touch $FRR_DEBUG touch /tmp/vyos.container.debug touch /tmp/vyos.ifconfig.debug - touch /tmp/vyos.frr.debug touch /tmp/vyos.container.debug touch /tmp/vyos.smoketest.debug fi + # Cleanup PKI CAs + if [ -d /usr/local/share/ca-certificates/vyos ]; then + rm -f /usr/local/share/ca-certificates/vyos/*.crt + update-ca-certificates >/dev/null 2>&1 + fi + log_action_begin_msg "Mounting VyOS Config" # ensure the vyatta_configdir supports a large number of inodes since # the config hierarchy is often inode-bound (instead of size). @@ -512,6 +538,8 @@ start () disabled system_config || system_config + systemctl start vyconfd.service + for s in ${subinit[@]} ; do if ! disabled $s; then log_progress_msg $s @@ -537,6 +565,9 @@ start () if [[ ! -z "$tmp" ]]; then vtysh -c "rpki start" fi + + # Start netplug daemon + systemctl start netplug.service } stop() @@ -554,6 +585,8 @@ stop() umount ${vyatta_configdir} log_action_end_msg $? + systemctl stop netplug.service + systemctl stop vyconfd.service systemctl stop frr.service unmount_encrypted_config diff --git a/src/migration-scripts/bgp/5-to-6 b/src/migration-scripts/bgp/5-to-6 new file mode 100644 index 000000000..e6fea6574 --- /dev/null +++ b/src/migration-scripts/bgp/5-to-6 @@ -0,0 +1,39 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T7163: migrate "address-family ipv4|6-unicast redistribute table" from a multi +# leafNode to a tagNode. This is needed to support per table definition of a +# route-map and/or metric + +from vyos.configtree import ConfigTree + +def migrate(config: ConfigTree) -> None: + bgp_base = ['protocols', 'bgp'] + if not config.exists(bgp_base): + return + + for address_family in ['ipv4-unicast', 'ipv6-unicast']: + # there is no non-main routing table beeing redistributed under this addres family + # bail out early and continue with next AFI + table_path = bgp_base + ['address-family', address_family, 'redistribute', 'table'] + if not config.exists(table_path): + continue + + tables = config.return_values(table_path) + config.delete(table_path) + + for table in tables: + config.set(table_path + [table]) + config.set_tag(table_path) diff --git a/src/migration-scripts/dhcp-server/7-to-8 b/src/migration-scripts/dhcp-server/7-to-8 index 7fcb62e86..d0f9455bb 100644 --- a/src/migration-scripts/dhcp-server/7-to-8 +++ b/src/migration-scripts/dhcp-server/7-to-8 @@ -41,9 +41,6 @@ def migrate(config: ConfigTree) -> None: for network in config.list_nodes(base + ['shared-network-name']): base_network = base + ['shared-network-name', network] - if config.exists(base_network + ['ping-check']): - config.delete(base_network + ['ping-check']) - if config.exists(base_network + ['shared-network-parameters']): config.delete(base_network +['shared-network-parameters']) @@ -57,9 +54,6 @@ def migrate(config: ConfigTree) -> None: if config.exists(base_subnet + ['enable-failover']): config.delete(base_subnet + ['enable-failover']) - if config.exists(base_subnet + ['ping-check']): - config.delete(base_subnet + ['ping-check']) - if config.exists(base_subnet + ['subnet-parameters']): config.delete(base_subnet + ['subnet-parameters']) diff --git a/src/migration-scripts/dns-dynamic/1-to-2 b/src/migration-scripts/dns-dynamic/1-to-2 index 5dca9e32f..7f4938147 100644 --- a/src/migration-scripts/dns-dynamic/1-to-2 +++ b/src/migration-scripts/dns-dynamic/1-to-2 @@ -20,6 +20,10 @@ # - migrate "service dns dynamic address <interface> service <service> protocol dnsexit" # to "service dns dynamic address <interface> service <service> protocol dnsexit2" +# T6950: +# - add if statement to prevent processing of "service dns dynamic address" options if they don't exist +# due to the fact they are no longer valid syntax + from vyos.configtree import ConfigTree base_path = ['service', 'dns', 'dynamic'] @@ -36,16 +40,19 @@ def migrate(config: ConfigTree) -> None: if config.exists(timeout_path): config.rename(timeout_path, 'interval') - # Remove "service dns dynamic address <interface> web-options ..." when <interface> != "web" - for address in config.list_nodes(address_path): - if config.exists(address_path + [address, 'web-options']) and address != 'web': - config.delete(address_path + [address, 'web-options']) - - # Migrate "service dns dynamic address <interface> service <service> protocol dnsexit" - # to "service dns dynamic address <interface> service <service> protocol dnsexit2" - for address in config.list_nodes(address_path): - for svc_cfg in config.list_nodes(address_path + [address, 'service']): - if config.exists(address_path + [address, 'service', svc_cfg, 'protocol']): - protocol = config.return_value(address_path + [address, 'service', svc_cfg, 'protocol']) - if protocol == 'dnsexit': - config.set(address_path + [address, 'service', svc_cfg, 'protocol'], 'dnsexit2') + # T6950: Can't migrate address if it doesn't exist + if config.exists(address_path): + + # Remove "service dns dynamic address <interface> web-options ..." when <interface> != "web" + for address in config.list_nodes(address_path): + if config.exists(address_path + [address, 'web-options']) and address != 'web': + config.delete(address_path + [address, 'web-options']) + + # Migrate "service dns dynamic address <interface> service <service> protocol dnsexit" + # to "service dns dynamic address <interface> service <service> protocol dnsexit2" + for address in config.list_nodes(address_path): + for svc_cfg in config.list_nodes(address_path + [address, 'service']): + if config.exists(address_path + [address, 'service', svc_cfg, 'protocol']): + protocol = config.return_value(address_path + [address, 'service', svc_cfg, 'protocol']) + if protocol == 'dnsexit': + config.set(address_path + [address, 'service', svc_cfg, 'protocol'], 'dnsexit2') diff --git a/src/migration-scripts/firewall/16-to-17 b/src/migration-scripts/firewall/16-to-17 index ad0706f04..ad0706f04 100755..100644 --- a/src/migration-scripts/firewall/16-to-17 +++ b/src/migration-scripts/firewall/16-to-17 diff --git a/src/migration-scripts/firewall/17-to-18 b/src/migration-scripts/firewall/17-to-18 new file mode 100755 index 000000000..34ce6aa07 --- /dev/null +++ b/src/migration-scripts/firewall/17-to-18 @@ -0,0 +1,41 @@ +# Copyright (C) 2024-2025 VyOS maintainers and contributors +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# From +# set firewall zone <zone> interface RED +# set firewall zone <zone> interface eth0 +# To +# set firewall zone <zone> member vrf RED +# set firewall zone <zone> member interface eth0 + +from vyos.configtree import ConfigTree + +base = ['firewall', 'zone'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + for zone in config.list_nodes(base): + zone_iface_base = base + [zone, 'interface'] + zone_member_base = base + [zone, 'member'] + if config.exists(zone_iface_base): + for iface in config.return_values(zone_iface_base): + if config.exists(['vrf', 'name', iface]): + config.set(zone_member_base + ['vrf'], value=iface, replace=False) + else: + config.set(zone_member_base + ['interface'], value=iface, replace=False) + config.delete(zone_iface_base) diff --git a/src/migration-scripts/flow-accounting/1-to-2 b/src/migration-scripts/flow-accounting/1-to-2 new file mode 100644 index 000000000..5ffb1eec8 --- /dev/null +++ b/src/migration-scripts/flow-accounting/1-to-2 @@ -0,0 +1,63 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# migrate 'system flow-accounting sflow' to 'system sflow' + +from vyos.configtree import ConfigTree + +base = ['system', 'flow-accounting'] +base_fa_sflow = base + ['sflow'] +base_sflow = ['system', 'sflow'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base_fa_sflow): + # Nothing to do + return + + if not config.exists(base_sflow): + + for iface in config.return_values(base + ['interface']): + config.set(base_sflow + ['interface'], value=iface, replace=False) + + if config.exists(base + ['vrf']): + vrf = config.return_value(base + ['vrf']) + config.set(base_sflow + ['vrf'], value=vrf) + + if config.exists(base + ['enable-egress']): + config.set(base_sflow + ['enable-egress']) + + if config.exists(base_fa_sflow + ['agent-address']): + address = config.return_value(base_fa_sflow + ['agent-address']) + config.set(base_sflow + ['agent-address'], value=address) + + if config.exists(base_fa_sflow + ['sampling-rate']): + sr = config.return_value(base_fa_sflow + ['sampling-rate']) + config.set(base_sflow + ['sampling-rate'], value=sr) + + for server in config.list_nodes(base_fa_sflow + ['server']): + config.set(base_sflow + ['server']) + config.set_tag(base_sflow + ['server']) + config.set(base_sflow + ['server', server]) + tmp = base_fa_sflow + ['server', server] + if config.exists(tmp + ['port']): + port = config.return_value(tmp + ['port']) + config.set(base_sflow + ['server', server, 'port'], value=port) + + if config.exists(base + ['netflow']): + # delete only sflow from flow-accounting if netflow is set + config.delete(base_fa_sflow) + else: + # delete all flow-accounting config otherwise + config.delete(base) diff --git a/src/migration-scripts/https/6-to-7 b/src/migration-scripts/https/6-to-7 new file mode 100644 index 000000000..571f3b6ae --- /dev/null +++ b/src/migration-scripts/https/6-to-7 @@ -0,0 +1,43 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T6736: move REST API to distinct node + + +from vyos.configtree import ConfigTree + + +base = ['service', 'https', 'api'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + # Move REST API configuration to new node + # REST API was previously enabled if base path exists + config.set(['service', 'https', 'api', 'rest']) + for entry in ('debug', 'strict'): + if config.exists(base + [entry]): + config.set(base + ['rest', entry]) + config.delete(base + [entry]) + + # Move CORS settings under GraphQL + # CORS is not implemented for REST API + if config.exists(base + ['cors']): + old_base = base + ['cors'] + new_base = base + ['graphql', 'cors'] + config.copy(old_base, new_base) + config.delete(old_base) diff --git a/src/migration-scripts/ids/1-to-2 b/src/migration-scripts/ids/1-to-2 new file mode 100644 index 000000000..4c0333c88 --- /dev/null +++ b/src/migration-scripts/ids/1-to-2 @@ -0,0 +1,30 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T: Migrate threshold and add new threshold types + +from vyos.configtree import ConfigTree + +# The old 'service ids' path was only used for FastNetMon +# Suricata is in 'service suricata', +# so this isn't an overreach +base = ['service', 'ids'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + else: + config.delete(base) diff --git a/src/migration-scripts/lldp/2-to-3 b/src/migration-scripts/lldp/2-to-3 new file mode 100644 index 000000000..93090756c --- /dev/null +++ b/src/migration-scripts/lldp/2-to-3 @@ -0,0 +1,31 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T7165: Migrate LLDP interface disable to 'mode disable' + +from vyos.configtree import ConfigTree + +base = ['service', 'lldp'] + +def migrate(config: ConfigTree) -> None: + interface_base = base + ['interface'] + if not config.exists(interface_base): + # Nothing to do + return + + for interface in config.list_nodes(interface_base): + if config.exists(interface_base + [interface, 'disable']): + config.delete(interface_base + [interface, 'disable']) + config.set(interface_base + [interface, 'mode'], value='disable') diff --git a/src/migration-scripts/monitoring/1-to-2 b/src/migration-scripts/monitoring/1-to-2 new file mode 100644 index 000000000..8bdaebae9 --- /dev/null +++ b/src/migration-scripts/monitoring/1-to-2 @@ -0,0 +1,50 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T6953: merge node and frr exporter under prometheus section + +from vyos.configtree import ConfigTree + +old_base = ['service', 'monitoring'] +new_base = ['service', 'monitoring', 'prometheus'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(old_base): + # Nothing to do + return + + if config.exists(old_base + ['node-exporter']): + if config.exists(old_base + ['node-exporter', 'listen-address']): + tmp = config.return_value(old_base + ['node-exporter', 'listen-address']) + config.set(new_base + ['node-exporter', 'listen-address'], value=tmp) + if config.exists(old_base + ['node-exporter', 'port']): + tmp = config.return_value(old_base + ['node-exporter', 'port']) + config.set(new_base + ['node-exporter', 'port'], value=tmp) + if config.exists(old_base + ['node-exporter', 'vrf']): + tmp = config.return_value(old_base + ['node-exporter', 'vrf']) + config.set(new_base + ['node-exporter', 'vrf'], value=tmp) + config.delete(old_base + ['node-exporter']) + + if config.exists(old_base + ['frr-exporter']): + if config.exists(old_base + ['frr-exporter', 'listen-address']): + tmp = config.return_value(old_base + ['frr-exporter', 'listen-address']) + config.set(new_base + ['frr-exporter', 'listen-address'], value=tmp) + if config.exists(old_base + ['frr-exporter', 'port']): + tmp = config.return_value(old_base + ['frr-exporter', 'port']) + config.set(new_base + ['frr-exporter', 'port'], value=tmp) + if config.exists(old_base + ['frr-exporter', 'vrf']): + tmp = config.return_value(old_base + ['frr-exporter', 'vrf']) + config.set(new_base + ['frr-exporter', 'vrf'], value=tmp) + config.delete(old_base + ['frr-exporter']) diff --git a/src/migration-scripts/nhrp/0-to-1 b/src/migration-scripts/nhrp/0-to-1 new file mode 100644 index 000000000..badd88e04 --- /dev/null +++ b/src/migration-scripts/nhrp/0-to-1 @@ -0,0 +1,129 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# Migration from Opennhrp to FRR NHRP +import ipaddress + +from vyos.configtree import ConfigTree + +base = ['protocols', 'nhrp', 'tunnel'] +interface_base = ['interfaces', 'tunnel'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + return + networkid = 1 + for tunnel_name in config.list_nodes(base): + ## Cisco Authentication migration + if config.exists(base + [tunnel_name,'cisco-authentication']): + auth = config.return_value(base + [tunnel_name,'cisco-authentication']) + config.delete(base + [tunnel_name,'cisco-authentication']) + config.set(base + [tunnel_name,'authentication'], value=auth) + ## Delete Dynamic-map to fqdn + if config.exists(base + [tunnel_name,'dynamic-map']): + config.delete(base + [tunnel_name,'dynamic-map']) + ## Holdtime migration + if config.exists(base + [tunnel_name,'holding-time']): + holdtime = config.return_value(base + [tunnel_name,'holding-time']) + config.delete(base + [tunnel_name,'holding-time']) + config.set(base + [tunnel_name,'holdtime'], value=holdtime) + ## Add network-id + config.set(base + [tunnel_name, 'network-id'], value=networkid) + networkid+=1 + ## Map and nhs migration + nhs_tunnelip_list = [] + nhs_nbmaip_list = [] + is_nhs = False + if config.exists(base + [tunnel_name,'map']): + is_map = False + for tunnel_ip in config.list_nodes(base + [tunnel_name, 'map']): + tunnel_ip_path = base + [tunnel_name, 'map', tunnel_ip] + tunnel_ip = tunnel_ip.split('/')[0] + if config.exists(tunnel_ip_path + ['cisco']): + config.delete(tunnel_ip_path + ['cisco']) + if config.exists(tunnel_ip_path + ['nbma-address']): + nbma = config.return_value(tunnel_ip_path + ['nbma-address']) + if config.exists (tunnel_ip_path + ['register']): + config.delete(tunnel_ip_path + ['register']) + config.delete(tunnel_ip_path + ['nbma-address']) + config.set(base + [tunnel_name, 'nhs', 'tunnel-ip', tunnel_ip, 'nbma'], value=nbma) + is_nhs = True + if tunnel_ip not in nhs_tunnelip_list: + nhs_tunnelip_list.append(tunnel_ip) + if nbma not in nhs_nbmaip_list: + nhs_nbmaip_list.append(nbma) + else: + config.delete(tunnel_ip_path + ['nbma-address']) + config.set(base + [tunnel_name, 'map_test', 'tunnel-ip', tunnel_ip, 'nbma'], value=nbma) + is_map = True + config.delete(base + [tunnel_name,'map']) + + if is_nhs: + config.set_tag(base + [tunnel_name, 'nhs', 'tunnel-ip']) + + if is_map: + config.copy(base + [tunnel_name, 'map_test'], base + [tunnel_name, 'map']) + config.delete(base + [tunnel_name, 'map_test']) + config.set_tag(base + [tunnel_name, 'map', 'tunnel-ip']) + + # + # Change netmask to /32 on tunnel interface + # If nhs is alone, add static route tunnel network to nhs + # + if config.exists(interface_base + [tunnel_name, 'address']): + tunnel_ip_list = [] + for tunnel_ip in config.return_values( + interface_base + [tunnel_name, 'address']): + tunnel_ip_ch = tunnel_ip.split('/')[0]+'/32' + if tunnel_ip_ch not in tunnel_ip_list: + tunnel_ip_list.append(tunnel_ip_ch) + for nhs in nhs_tunnelip_list: + config.set(['protocols', 'static', 'route', str(ipaddress.ip_network(tunnel_ip, strict=False)), 'next-hop', nhs, 'distance'], value='250') + if nhs_tunnelip_list: + if not config.is_tag(['protocols', 'static', 'route']): + config.set_tag(['protocols', 'static', 'route']) + if not config.is_tag(['protocols', 'static', 'route', str(ipaddress.ip_network(tunnel_ip, strict=False)), 'next-hop']): + config.set_tag(['protocols', 'static', 'route', str(ipaddress.ip_network(tunnel_ip, strict=False)), 'next-hop']) + + config.delete(interface_base + [tunnel_name, 'address']) + for tunnel_ip in tunnel_ip_list: + config.set( + interface_base + [tunnel_name, 'address'], value=tunnel_ip, replace=False) + + ## Map multicast migration + if config.exists(base + [tunnel_name, 'multicast']): + multicast_map = config.return_value( + base + [tunnel_name, 'multicast']) + if multicast_map == 'nhs': + config.delete(base + [tunnel_name, 'multicast']) + for nbma in nhs_nbmaip_list: + config.set(base + [tunnel_name, 'multicast'], value=nbma, + replace=False) + + ## Delete non-cahching + if config.exists(base + [tunnel_name, 'non-caching']): + config.delete(base + [tunnel_name, 'non-caching']) + ## Delete shortcut-destination + if config.exists(base + [tunnel_name, 'shortcut-destination']): + if not config.exists(base + [tunnel_name, 'shortcut']): + config.set(base + [tunnel_name, 'shortcut']) + config.delete(base + [tunnel_name, 'shortcut-destination']) + ## Delete shortcut-target + if config.exists(base + [tunnel_name, 'shortcut-target']): + if not config.exists(base + [tunnel_name, 'shortcut']): + config.set(base + [tunnel_name, 'shortcut']) + config.delete(base + [tunnel_name, 'shortcut-target']) + ## Set registration-no-unique + config.set(base + [tunnel_name, 'registration-no-unique'])
\ No newline at end of file diff --git a/src/migration-scripts/ntp/1-to-2 b/src/migration-scripts/ntp/1-to-2 index fd7b08221..d5f800922 100644 --- a/src/migration-scripts/ntp/1-to-2 +++ b/src/migration-scripts/ntp/1-to-2 @@ -1,4 +1,4 @@ -# Copyright 2023-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# Copyright 2023-2025 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -25,6 +25,11 @@ def migrate(config: ConfigTree) -> None: # Nothing to do return + # T6911: do not migrate NTP configuration if mandatory server is missing + if not config.exists(base_path + ['server']): + config.delete(base_path) + return + # config.copy does not recursively create a path, so create ['service'] if # it doesn't yet exist, such as for config.boot.default if not config.exists(['service']): diff --git a/src/migration-scripts/policy/8-to-9 b/src/migration-scripts/policy/8-to-9 new file mode 100644 index 000000000..355e48e00 --- /dev/null +++ b/src/migration-scripts/policy/8-to-9 @@ -0,0 +1,49 @@ +# Copyright (C) 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T7116: Remove unsupported "internet" community following FRR removal +# From + # set policy route-map <name> rule <ord> set community [add | replace] internet + # set policy community-list <name> rule <ord> regex internet +# To + # set policy route-map <name> rule <ord> set community [add | replace] 0:0 + # set policy community-list <name> rule <ord> regex _0:0_ + +# NOTE: In FRR expanded community-lists, without the '_' delimiters, a regex of +# "0:0" will match "65000:0" as well as "0:0". This doesn't line up with what +# we want when replacing "internet". + +from vyos.configtree import ConfigTree + +rm_base = ['policy', 'route-map'] +cl_base = ['policy', 'community-list'] + +def migrate(config: ConfigTree) -> None: + if config.exists(rm_base): + for policy_name in config.list_nodes(rm_base): + for rule_ord in config.list_nodes(rm_base + [policy_name, 'rule'], path_must_exist=False): + tmp_path = rm_base + [policy_name, 'rule', rule_ord, 'set', 'community'] + if config.exists(tmp_path + ['add']) and config.return_value(tmp_path + ['add']) == 'internet': + config.set(tmp_path + ['add'], '0:0') + if config.exists(tmp_path + ['replace']) and config.return_value(tmp_path + ['replace']) == 'internet': + config.set(tmp_path + ['replace'], '0:0') + + if config.exists(cl_base): + for policy_name in config.list_nodes(cl_base): + for rule_ord in config.list_nodes(cl_base + [policy_name, 'rule'], path_must_exist=False): + tmp_path = cl_base + [policy_name, 'rule', rule_ord, 'regex'] + if config.exists(tmp_path) and config.return_value(tmp_path) == 'internet': + config.set(tmp_path, '_0:0_') + diff --git a/src/migration-scripts/qos/2-to-3 b/src/migration-scripts/qos/2-to-3 new file mode 100644 index 000000000..284fe828e --- /dev/null +++ b/src/migration-scripts/qos/2-to-3 @@ -0,0 +1,34 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from vyos.configtree import ConfigTree + + +def migrate(config: ConfigTree) -> None: + base = ['qos', 'policy', 'cake'] + if config.exists(base): + for policy in config.list_nodes(base): + if config.exists(base + [policy, 'flow-isolation']): + isolation = None + for isol in config.list_nodes(base + [policy, 'flow-isolation']): + if isol == 'nat': + config.set(base + [policy, 'flow-isolation-nat']) + else: + isolation = isol + + config.delete(base + [policy, 'flow-isolation']) + + if isolation: + config.set(base + [policy, 'flow-isolation'], value=isolation) diff --git a/src/migration-scripts/quagga/11-to-12 b/src/migration-scripts/quagga/11-to-12 new file mode 100644 index 000000000..8ae2023a1 --- /dev/null +++ b/src/migration-scripts/quagga/11-to-12 @@ -0,0 +1,75 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T6747: +# - Migrate static BFD configuration to match FRR possibillities +# - Consolidate static multicast routing configuration under a new node + +from vyos.configtree import ConfigTree + +static_base = ['protocols', 'static'] + +def migrate(config: ConfigTree) -> None: + # Check for static route/route6 configuration + # Migrate static BFD configuration to match FRR possibillities + for route_route6 in ['route', 'route6']: + route_route6_base = static_base + [route_route6] + if not config.exists(route_route6_base): + continue + + for prefix in config.list_nodes(route_route6_base): + next_hop_base = route_route6_base + [prefix, 'next-hop'] + if not config.exists(next_hop_base): + continue + + for next_hop in config.list_nodes(next_hop_base): + multi_hop_base = next_hop_base + [next_hop, 'bfd', 'multi-hop'] + + if not config.exists(multi_hop_base): + continue + + mh_source_base = multi_hop_base + ['source'] + source = None + profile = None + for src_ip in config.list_nodes(mh_source_base): + source = src_ip + if config.exists(mh_source_base + [source, 'profile']): + profile = config.return_value(mh_source_base + [source, 'profile']) + # FRR only supports one source, we will use the first one + break + + config.delete(multi_hop_base) + config.set(multi_hop_base + ['source-address'], value=source) + config.set(next_hop_base + [next_hop, 'bfd', 'profile'], value=profile) + + # Consolidate static multicast routing configuration under a new node + if config.exists(static_base + ['multicast']): + for mroute in ['interface-route', 'route']: + mroute_base = static_base + ['multicast', mroute] + if not config.exists(mroute_base): + continue + config.set(static_base + ['mroute']) + config.set_tag(static_base + ['mroute']) + for route in config.list_nodes(mroute_base): + config.copy(mroute_base + [route], static_base + ['mroute', route]) + + mroute_base = static_base + ['mroute'] + if config.exists(mroute_base): + for mroute in config.list_nodes(mroute_base): + interface_path = mroute_base + [mroute, 'next-hop-interface'] + if config.exists(interface_path): + config.rename(interface_path, 'interface') + + config.delete(static_base + ['multicast']) diff --git a/src/migration-scripts/reverse-proxy/1-to-2 b/src/migration-scripts/reverse-proxy/1-to-2 new file mode 100755 index 000000000..61612bc36 --- /dev/null +++ b/src/migration-scripts/reverse-proxy/1-to-2 @@ -0,0 +1,27 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T6745: Rename base node to haproxy + +from vyos.configtree import ConfigTree + +base = ['load-balancing', 'reverse-proxy'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + config.rename(base, 'haproxy') diff --git a/src/migration-scripts/reverse-proxy/2-to-3 b/src/migration-scripts/reverse-proxy/2-to-3 new file mode 100755 index 000000000..ac539618e --- /dev/null +++ b/src/migration-scripts/reverse-proxy/2-to-3 @@ -0,0 +1,66 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T7429: logging facility "all" unavailable in code + +from vyos.configtree import ConfigTree + +base = ['load-balancing', 'haproxy'] +unsupported_facilities = ['all', 'authpriv', 'mark'] + +def config_migrator(config, config_path: list) -> None: + if not config.exists(config_path): + return + # Remove unsupported backend HAProxy syslog facilities form CLI + # Works for both backend and service CLI nodes + for service_backend in config.list_nodes(config_path): + log_path = config_path + [service_backend, 'logging', 'facility'] + if not config.exists(log_path): + continue + # Remove unsupported syslog facilities form CLI + for facility in config.list_nodes(log_path): + if facility in unsupported_facilities: + config.delete(log_path + [facility]) + continue + # Remove unsupported facility log level form CLI. VyOS will fallback + # to default log level if not set + if config.exists(log_path + [facility, 'level']): + tmp = config.return_value(log_path + [facility, 'level']) + if tmp == 'all': + config.delete(log_path + [facility, 'level']) + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + # Remove unsupported syslog facilities form CLI + global_path = base + ['global-parameters', 'logging', 'facility'] + if config.exists(global_path): + for facility in config.list_nodes(global_path): + if facility in unsupported_facilities: + config.delete(global_path + [facility]) + continue + # Remove unsupported facility log level form CLI. VyOS will fallback + # to default log level if not set + if config.exists(global_path + [facility, 'level']): + tmp = config.return_value(global_path + [facility, 'level']) + if tmp == 'all': + config.delete(global_path + [facility, 'level']) + + # Remove unsupported backend HAProxy syslog facilities from CLI + config_migrator(config, base + ['backend']) + # Remove unsupported service HAProxy syslog facilities from CLI + config_migrator(config, base + ['service']) diff --git a/src/migration-scripts/system/27-to-28 b/src/migration-scripts/system/27-to-28 new file mode 100644 index 000000000..0a5be48ab --- /dev/null +++ b/src/migration-scripts/system/27-to-28 @@ -0,0 +1,33 @@ +# Copyright 2023-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# rename 'system option performance' leaf nodes to new names + +from vyos.configtree import ConfigTree + +base = ['system', 'option', 'performance'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + return + + replace = { + 'throughput' : 'network-throughput', + 'latency' : 'network-latency' + } + + for old_name, new_name in replace.items(): + if config.return_value(base) == old_name: + config.set(base, new_name) diff --git a/src/migration-scripts/system/28-to-29 b/src/migration-scripts/system/28-to-29 new file mode 100644 index 000000000..ccf7056c4 --- /dev/null +++ b/src/migration-scripts/system/28-to-29 @@ -0,0 +1,71 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T6989: +# - remove syslog arbitrary file logging +# - remove syslog user console logging +# - move "global preserve-fqdn" one CLI level up +# - rename "host" to "remote" + +from vyos.configtree import ConfigTree + +base = ['system', 'syslog'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + return + # Drop support for custom file logging + if config.exists(base + ['file']): + config.delete(base + ['file']) + + # Drop support for logging to a user tty + # This should be dynamically added via an op-mode command like "terminal monitor" + if config.exists(base + ['user']): + config.delete(base + ['user']) + + # Move "global preserve-fqdn" one CLI level up, as it relates to all + # logging targets (console, global and remote) + preserve_fqdn_base = base + ['global', 'preserve-fqdn'] + if config.exists(preserve_fqdn_base): + config.delete(preserve_fqdn_base) + config.set(base + ['preserve-fqdn']) + + # Move "global marker" one CLI level up, as it relates to all + # logging targets (console, global and remote) + marker_base = base + ['global', 'marker'] + if config.exists(marker_base): + config.copy(marker_base, base + ['marker']) + config.delete(marker_base) + + # Rename "global" -> "local" as this describes what is logged locally + # on the router to a file on the filesystem + if config.exists(base + ['global']): + config.rename(base + ['global'], 'local') + + vrf = '' + if config.exists(base + ['vrf']): + vrf = config.return_value(base + ['vrf']) + config.delete(base + ['vrf']) + + # Rename host x.x.x.x -> remote x.x.x.x + if config.exists(base + ['host']): + config.set(base + ['remote']) + config.set_tag(base + ['remote']) + for remote in config.list_nodes(base + ['host']): + config.copy(base + ['host', remote], base + ['remote', remote]) + config.set_tag(base + ['remote']) + if vrf: + config.set(base + ['remote', remote, 'vrf'], value=vrf) + config.delete(base + ['host']) diff --git a/src/migration-scripts/vrf/1-to-2 b/src/migration-scripts/vrf/1-to-2 index 557a9ec58..89b0f708a 100644 --- a/src/migration-scripts/vrf/1-to-2 +++ b/src/migration-scripts/vrf/1-to-2 @@ -37,7 +37,10 @@ def migrate(config: ConfigTree) -> None: new_static_base = vrf_base + [vrf, 'protocols'] config.set(new_static_base) config.copy(static_base, new_static_base + ['static']) - config.set_tag(new_static_base + ['static', 'route']) + if config.exists(new_static_base + ['static', 'route']): + config.set_tag(new_static_base + ['static', 'route']) + if config.exists(new_static_base + ['static', 'route6']): + config.set_tag(new_static_base + ['static', 'route6']) # Now delete the old configuration config.delete(base) diff --git a/src/migration-scripts/vrf/2-to-3 b/src/migration-scripts/vrf/2-to-3 index acacffb41..5f396e7ed 100644 --- a/src/migration-scripts/vrf/2-to-3 +++ b/src/migration-scripts/vrf/2-to-3 @@ -76,7 +76,8 @@ def migrate(config: ConfigTree) -> None: # Get a list of all currently used VRFs and tables vrfs_current = {} for vrf in config.list_nodes(base): - vrfs_current[vrf] = int(config.return_value(base + [vrf, 'table'])) + if config.exists(base + [vrf, 'table']): + vrfs_current[vrf] = int(config.return_value(base + [vrf, 'table'])) # Check VRF names and table numbers name_regex = re.compile(r'^\d.*$') diff --git a/src/migration-scripts/wanloadbalance/3-to-4 b/src/migration-scripts/wanloadbalance/3-to-4 new file mode 100644 index 000000000..e49f46a5b --- /dev/null +++ b/src/migration-scripts/wanloadbalance/3-to-4 @@ -0,0 +1,33 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from vyos.configtree import ConfigTree + +base = ['load-balancing', 'wan'] + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + if config.exists(base + ['rule']): + for rule in config.list_nodes(base + ['rule']): + rule_base = base + ['rule', rule] + + if config.exists(rule_base + ['inbound-interface']): + ifname = config.return_value(rule_base + ['inbound-interface']) + + if ifname.endswith('+'): + config.set(rule_base + ['inbound-interface'], value=ifname.replace('+', '*')) diff --git a/src/op_mode/bridge.py b/src/op_mode/bridge.py index e80b1c21d..c4293a77c 100755 --- a/src/op_mode/bridge.py +++ b/src/op_mode/bridge.py @@ -23,10 +23,11 @@ from tabulate import tabulate from vyos.utils.process import cmd from vyos.utils.process import rc_cmd -from vyos.utils.process import call +from vyos.utils.process import call import vyos.opmode + def _get_json_data(): """ Get bridge data format JSON @@ -43,7 +44,7 @@ def _get_raw_data_summary(): return data_dict -def _get_raw_data_vlan(tunnel:bool=False): +def _get_raw_data_vlan(tunnel: bool = False): """ :returns dict """ @@ -54,14 +55,18 @@ def _get_raw_data_vlan(tunnel:bool=False): data_dict = json.loads(json_data) return data_dict + def _get_raw_data_vni() -> dict: """ :returns dict """ - json_data = cmd(f'bridge --json vni show') + code, json_data = rc_cmd(f'bridge --json vni show') + if code != 0: + raise vyos.opmode.UnconfiguredObject('VNI is not configured') data_dict = json.loads(json_data) return data_dict + def _get_raw_data_fdb(bridge): """Get MAC-address for the bridge brX :returns list @@ -70,7 +75,9 @@ def _get_raw_data_fdb(bridge): # From iproute2 fdb.c, fdb_show() will only exit(-1) in case of # non-existent bridge device; raise error. if code == 255: - raise vyos.opmode.UnconfiguredObject(f"bridge {bridge} does not exist in the system") + raise vyos.opmode.UnconfiguredObject( + f'bridge {bridge} does not exist in the system' + ) data_dict = json.loads(json_data) return data_dict @@ -116,8 +123,8 @@ def _get_formatted_output_summary(data): flags = ','.join(option.get('flags')).lower() prio = option.get('priority') member_entries.append([interface, state, mtu, flags, prio]) - member_headers = ["Member", "State", "MTU", "Flags", "Prio"] - output_members = tabulate(member_entries, member_headers, numalign="left") + member_headers = ['Member', 'State', 'MTU', 'Flags', 'Prio'] + output_members = tabulate(member_entries, member_headers, numalign='left') output_bridge = f"""Bridge interface {bridge}: {output_members} @@ -138,13 +145,14 @@ def _get_formatted_output_vlan(data): vlan_end = vlan_entry.get('vlanEnd') vlan = f'{vlan}-{vlan_end}' flags_raw = vlan_entry.get('flags') - flags = ', '.join(flags_raw if isinstance(flags_raw,list) else "").lower() + flags = ', '.join(flags_raw if isinstance(flags_raw, list) else '').lower() data_entries.append([interface, vlan, flags]) - headers = ["Interface", "VLAN", "Flags"] + headers = ['Interface', 'VLAN', 'Flags'] output = tabulate(data_entries, headers) return output + def _get_formatted_output_vlan_tunnel(data): data_entries = [] for entry in data: @@ -166,10 +174,11 @@ def _get_formatted_output_vlan_tunnel(data): # 200 200 data_entries.append(['', vlan, vni]) - headers = ["Interface", "VLAN", "VNI"] + headers = ['Interface', 'VLAN', 'VNI'] output = tabulate(data_entries, headers) return output + def _get_formatted_output_vni(data): data_entries = [] for entry in data: @@ -182,10 +191,11 @@ def _get_formatted_output_vni(data): vlan = f'{vlan}-{vlan_end}' data_entries.append([interface, vlan]) - headers = ["Interface", "VNI"] + headers = ['Interface', 'VNI'] output = tabulate(data_entries, headers) return output + def _get_formatted_output_fdb(data): data_entries = [] for entry in data: @@ -195,8 +205,8 @@ def _get_formatted_output_fdb(data): flags = ','.join(entry['flags']) data_entries.append([interface, mac, state, flags]) - headers = ["Interface", "Mac address", "State", "Flags"] - output = tabulate(data_entries, headers, numalign="left") + headers = ['Interface', 'Mac address', 'State', 'Flags'] + output = tabulate(data_entries, headers, numalign='left') return output @@ -209,28 +219,33 @@ def _get_formatted_output_mdb(data): state = mdb_entry.get('state') flags = ','.join(mdb_entry.get('flags')) data_entries.append([interface, group, state, flags]) - headers = ["Interface", "Group", "State", "Flags"] + headers = ['Interface', 'Group', 'State', 'Flags'] output = tabulate(data_entries, headers) return output + def _get_bridge_detail(iface): """Get interface detail statistics""" return call(f'vtysh -c "show interface {iface}"') + def _get_bridge_detail_nexthop_group(iface): """Get interface detail nexthop_group statistics""" return call(f'vtysh -c "show interface {iface} nexthop-group"') + def _get_bridge_detail_nexthop_group_raw(iface): out = cmd(f'vtysh -c "show interface {iface} nexthop-group"') return out + def _get_bridge_detail_raw(iface): """Get interface detail json statistics""" - data = cmd(f'vtysh -c "show interface {iface} json"') + data = cmd(f'vtysh -c "show interface {iface} json"') data_dict = json.loads(data) return data_dict + def show(raw: bool): bridge_data = _get_raw_data_summary() if raw: @@ -249,6 +264,7 @@ def show_vlan(raw: bool, tunnel: typing.Optional[bool]): else: return _get_formatted_output_vlan(bridge_vlan) + def show_vni(raw: bool): bridge_vni = _get_raw_data_vni() if raw: @@ -256,6 +272,7 @@ def show_vni(raw: bool): else: return _get_formatted_output_vni(bridge_vni) + def show_fdb(raw: bool, interface: str): fdb_data = _get_raw_data_fdb(interface) if raw: @@ -271,6 +288,7 @@ def show_mdb(raw: bool, interface: str): else: return _get_formatted_output_mdb(mdb_data) + def show_detail(raw: bool, nexthop_group: typing.Optional[bool], interface: str): if raw: if nexthop_group: @@ -283,6 +301,7 @@ def show_detail(raw: bool, nexthop_group: typing.Optional[bool], interface: str) else: return _get_bridge_detail(interface) + if __name__ == '__main__': try: res = vyos.opmode.run(sys.modules[__name__]) diff --git a/src/op_mode/dhcp.py b/src/op_mode/dhcp.py index e5455c8af..725bfc75b 100755 --- a/src/op_mode/dhcp.py +++ b/src/op_mode/dhcp.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2022-2024 VyOS maintainers and contributors +# Copyright (C) 2022-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -19,6 +19,7 @@ import sys import typing from datetime import datetime +from datetime import timezone from glob import glob from ipaddress import ip_address from tabulate import tabulate @@ -29,133 +30,72 @@ from vyos.base import Warning from vyos.configquery import ConfigTreeQuery from vyos.kea import kea_get_active_config +from vyos.kea import kea_get_dhcp_pools from vyos.kea import kea_get_leases -from vyos.kea import kea_get_pool_from_subnet_id +from vyos.kea import kea_get_server_leases +from vyos.kea import kea_get_static_mappings from vyos.kea import kea_delete_lease -from vyos.utils.process import is_systemd_service_running from vyos.utils.process import call +from vyos.utils.process import is_systemd_service_running -time_string = "%a %b %d %H:%M:%S %Z %Y" +time_string = '%a %b %d %H:%M:%S %Z %Y' config = ConfigTreeQuery() -lease_valid_states = ['all', 'active', 'free', 'expired', 'released', 'abandoned', 'reset', 'backup'] -sort_valid_inet = ['end', 'mac', 'hostname', 'ip', 'pool', 'remaining', 'start', 'state'] -sort_valid_inet6 = ['end', 'duid', 'ip', 'last_communication', 'pool', 'remaining', 'state', 'type'] +lease_valid_states = [ + 'all', + 'active', + 'free', + 'expired', + 'released', + 'abandoned', + 'reset', + 'backup', +] +sort_valid_inet = [ + 'end', + 'mac', + 'hostname', + 'ip', + 'pool', + 'remaining', + 'start', + 'state', +] +sort_valid_inet6 = [ + 'end', + 'duid', + 'ip', + 'last_communication', + 'pool', + 'remaining', + 'state', + 'type', +] mapping_sort_valid = ['mac', 'ip', 'pool', 'duid'] +stale_warn_msg = 'DHCP server is configured but not started. Data may be stale.' + ArgFamily = typing.Literal['inet', 'inet6'] -ArgState = typing.Literal['all', 'active', 'free', 'expired', 'released', 'abandoned', 'reset', 'backup'] +ArgState = typing.Literal[ + 'all', 'active', 'free', 'expired', 'released', 'abandoned', 'reset', 'backup' +] ArgOrigin = typing.Literal['local', 'remote'] -def _utc_to_local(utc_dt): - return datetime.fromtimestamp((datetime.fromtimestamp(utc_dt) - datetime(1970, 1, 1)).total_seconds()) - - -def _format_hex_string(in_str): - out_str = "" - # if input is divisible by 2, add : every 2 chars - if len(in_str) > 0 and len(in_str) % 2 == 0: - out_str = ':'.join(a+b for a,b in zip(in_str[::2], in_str[1::2])) - else: - out_str = in_str - - return out_str - - -def _find_list_of_dict_index(lst, key='ip', value='') -> int: - """ - Find the index entry of list of dict matching the dict value - Exampe: - % lst = [{'ip': '192.0.2.1'}, {'ip': '192.0.2.2'}] - % _find_list_of_dict_index(lst, key='ip', value='192.0.2.2') - % 1 - """ - idx = next((index for (index, d) in enumerate(lst) if d[key] == value), None) - return idx - -def _get_raw_server_leases(family='inet', pool=None, sorted=None, state=[], origin=None) -> list: - """ - Get DHCP server leases - :return list - """ +def _get_raw_server_leases( + config, family='inet', pool=None, sorted=None, state=[], origin=None +) -> list: inet_suffix = '6' if family == 'inet6' else '4' - try: - leases = kea_get_leases(inet_suffix) - except: - raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server lease information') - - if pool is None: - pool = _get_dhcp_pools(family=family) - else: - pool = [pool] - - try: - active_config = kea_get_active_config(inet_suffix) - except: - raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server configuration') + pools = [pool] if pool else kea_get_dhcp_pools(config, inet_suffix) - data = [] - for lease in leases: - lifetime = lease['valid-lft'] - expiry = (lease['cltt'] + lifetime) - - lease['start_timestamp'] = datetime.utcfromtimestamp(expiry - lifetime) - lease['expire_timestamp'] = datetime.utcfromtimestamp(expiry) if expiry else None - - data_lease = {} - data_lease['ip'] = lease['ip-address'] - lease_state_long = {0: 'active', 1: 'rejected', 2: 'expired'} - data_lease['state'] = lease_state_long[lease['state']] - data_lease['pool'] = kea_get_pool_from_subnet_id(active_config, inet_suffix, lease['subnet-id']) if active_config else '-' - data_lease['end'] = lease['expire_timestamp'].timestamp() if lease['expire_timestamp'] else None - data_lease['origin'] = 'local' # TODO: Determine remote in HA - - if family == 'inet': - data_lease['mac'] = lease['hw-address'] - data_lease['start'] = lease['start_timestamp'].timestamp() - data_lease['hostname'] = lease['hostname'] - - if family == 'inet6': - data_lease['last_communication'] = lease['start_timestamp'].timestamp() - data_lease['duid'] = _format_hex_string(lease['duid']) - data_lease['type'] = lease['type'] - - if lease['type'] == 'IA_PD': - prefix_len = lease['prefix-len'] - data_lease['ip'] += f'/{prefix_len}' - - data_lease['remaining'] = '-' - - if lease['valid-lft'] > 0: - data_lease['remaining'] = lease['expire_timestamp'] - datetime.utcnow() - - if data_lease['remaining'].days >= 0: - # substraction gives us a timedelta object which can't be formatted with strftime - # so we use str(), split gets rid of the microseconds - data_lease['remaining'] = str(data_lease["remaining"]).split('.')[0] - - # Do not add old leases - if data_lease['remaining'] != '' and data_lease['pool'] in pool and data_lease['state'] != 'free': - if not state or state == 'all' or data_lease['state'] in state: - data.append(data_lease) - - # deduplicate - checked = [] - for entry in data: - addr = entry.get('ip') - if addr not in checked: - checked.append(addr) - else: - idx = _find_list_of_dict_index(data, key='ip', value=addr) - data.pop(idx) + mappings = kea_get_server_leases(config, inet_suffix, pools, state, origin) if sorted: if sorted == 'ip': - data.sort(key = lambda x:ip_address(x['ip'])) + mappings.sort(key=lambda x: ip_address(x['ip'])) else: - data.sort(key = lambda x:x[sorted]) - return data + mappings.sort(key=lambda x: x[sorted]) + return mappings def _get_formatted_server_leases(raw_data, family='inet'): @@ -165,46 +105,67 @@ def _get_formatted_server_leases(raw_data, family='inet'): ipaddr = lease.get('ip') hw_addr = lease.get('mac') state = lease.get('state') - start = lease.get('start') - start = _utc_to_local(start).strftime('%Y/%m/%d %H:%M:%S') - end = lease.get('end') - end = _utc_to_local(end).strftime('%Y/%m/%d %H:%M:%S') if end else '-' + start = datetime.fromtimestamp(lease.get('start'), timezone.utc) + end = ( + datetime.fromtimestamp(lease.get('end'), timezone.utc) + if lease.get('end') + else '-' + ) remain = lease.get('remaining') pool = lease.get('pool') hostname = lease.get('hostname') origin = lease.get('origin') - data_entries.append([ipaddr, hw_addr, state, start, end, remain, pool, hostname, origin]) - - headers = ['IP Address', 'MAC address', 'State', 'Lease start', 'Lease expiration', 'Remaining', 'Pool', - 'Hostname', 'Origin'] + data_entries.append( + [ipaddr, hw_addr, state, start, end, remain, pool, hostname, origin] + ) + + headers = [ + 'IP Address', + 'MAC address', + 'State', + 'Lease start', + 'Lease expiration', + 'Remaining', + 'Pool', + 'Hostname', + 'Origin', + ] if family == 'inet6': for lease in raw_data: ipaddr = lease.get('ip') state = lease.get('state') - start = lease.get('last_communication') - start = _utc_to_local(start).strftime('%Y/%m/%d %H:%M:%S') - end = lease.get('end') - end = _utc_to_local(end).strftime('%Y/%m/%d %H:%M:%S') + start = datetime.fromtimestamp( + lease.get('last_communication'), timezone.utc + ) + end = ( + datetime.fromtimestamp(lease.get('end'), timezone.utc) + if lease.get('end') + else '-' + ) remain = lease.get('remaining') lease_type = lease.get('type') pool = lease.get('pool') host_identifier = lease.get('duid') - data_entries.append([ipaddr, state, start, end, remain, lease_type, pool, host_identifier]) - - headers = ['IPv6 address', 'State', 'Last communication', 'Lease expiration', 'Remaining', 'Type', 'Pool', - 'DUID'] + data_entries.append( + [ipaddr, state, start, end, remain, lease_type, pool, host_identifier] + ) + + headers = [ + 'IPv6 address', + 'State', + 'Last communication', + 'Lease expiration', + 'Remaining', + 'Type', + 'Pool', + 'DUID', + ] output = tabulate(data_entries, headers, numalign='left') return output -def _get_dhcp_pools(family='inet') -> list: - v = 'v6' if family == 'inet6' else '' - pools = config.list_nodes(f'service dhcp{v}-server shared-network-name') - return pools - - def _get_pool_size(pool, family='inet'): v = 'v6' if family == 'inet6' else '' base = f'service dhcp{v}-server shared-network-name {pool}' @@ -224,26 +185,27 @@ def _get_pool_size(pool, family='inet'): return size -def _get_raw_pool_statistics(family='inet', pool=None): - if pool is None: - pool = _get_dhcp_pools(family=family) - else: - pool = [pool] +def _get_raw_server_pool_statistics(config, family='inet', pool=None): + inet_suffix = '6' if family == 'inet6' else '4' + pools = [pool] if pool else kea_get_dhcp_pools(config, inet_suffix) - v = 'v6' if family == 'inet6' else '' stats = [] - for p in pool: - subnet = config.list_nodes(f'service dhcp{v}-server shared-network-name {p} subnet') + for p in pools: size = _get_pool_size(family=family, pool=p) - leases = len(_get_raw_server_leases(family=family, pool=p)) + leases = len(_get_raw_server_leases(config, family=family, pool=p)) use_percentage = round(leases / size * 100) if size != 0 else 0 - pool_stats = {'pool': p, 'size': size, 'leases': leases, - 'available': (size - leases), 'use_percentage': use_percentage, 'subnet': subnet} + pool_stats = { + 'pool': p, + 'size': size, + 'leases': leases, + 'available': (size - leases), + 'use_percentage': use_percentage, + } stats.append(pool_stats) return stats -def _get_formatted_pool_statistics(pool_data, family='inet'): +def _get_formatted_server_pool_statistics(pool_data): data_entries = [] for entry in pool_data: pool = entry.get('pool') @@ -254,53 +216,52 @@ def _get_formatted_pool_statistics(pool_data, family='inet'): use_percentage = f'{use_percentage}%' data_entries.append([pool, size, leases, available, use_percentage]) - headers = ['Pool', 'Size','Leases', 'Available', 'Usage'] + headers = ['Pool', 'Size', 'Leases', 'Available', 'Usage'] output = tabulate(data_entries, headers, numalign='left') return output -def _get_raw_server_static_mappings(family='inet', pool=None, sorted=None): - if pool is None: - pool = _get_dhcp_pools(family=family) - else: - pool = [pool] - v = 'v6' if family == 'inet6' else '' - mappings = [] - for p in pool: - pool_config = config.get_config_dict(['service', f'dhcp{v}-server', 'shared-network-name', p], - get_first_key=True) - if 'subnet' in pool_config: - for subnet, subnet_config in pool_config['subnet'].items(): - if 'static-mapping' in subnet_config: - for name, mapping_config in subnet_config['static-mapping'].items(): - mapping = {'pool': p, 'subnet': subnet, 'name': name} - mapping.update(mapping_config) - mappings.append(mapping) +def _get_raw_server_static_mappings(config, family='inet', pool=None, sorted=None): + inet_suffix = '6' if family == 'inet6' else '4' + pools = [pool] if pool else kea_get_dhcp_pools(config, inet_suffix) + + mappings = kea_get_static_mappings(config, inet_suffix, pools) if sorted: if sorted == 'ip': - data.sort(key = lambda x:ip_address(x['ip-address'])) + mappings.sort(key=lambda x: ip_address(x['ip'])) else: - data.sort(key = lambda x:x[sorted]) + mappings.sort(key=lambda x: x[sorted]) return mappings -def _get_formatted_server_static_mappings(raw_data, family='inet'): + +def _get_formatted_server_static_mappings(raw_data): data_entries = [] + for entry in raw_data: pool = entry.get('pool') subnet = entry.get('subnet') - name = entry.get('name') - ip_addr = entry.get('ip-address', 'N/A') + hostname = entry.get('hostname') + ip_addr = entry.get('ip', 'N/A') mac_addr = entry.get('mac', 'N/A') duid = entry.get('duid', 'N/A') - description = entry.get('description', 'N/A') - data_entries.append([pool, subnet, name, ip_addr, mac_addr, duid, description]) - - headers = ['Pool', 'Subnet', 'Name', 'IP Address', 'MAC Address', 'DUID', 'Description'] + desc = entry.get('description', 'N/A') + data_entries.append([pool, subnet, hostname, ip_addr, mac_addr, duid, desc]) + + headers = [ + 'Pool', + 'Subnet', + 'Hostname', + 'IP Address', + 'MAC Address', + 'DUID', + 'Description', + ] output = tabulate(data_entries, headers, numalign='left') return output -def _verify(func): + +def _verify_server(func): """Decorator checks if DHCP(v6) config exists""" from functools import wraps @@ -314,8 +275,10 @@ def _verify(func): if not config.exists(f'service dhcp{v}-server'): raise vyos.opmode.UnconfiguredSubsystem(unconf_message) return func(*args, **kwargs) + return _wrapper + def _verify_client(func): """Decorator checks if interface is configured as DHCP client""" from functools import wraps @@ -334,67 +297,124 @@ def _verify_client(func): if not config.exists(f'interfaces {interface_path} address dhcp{v}'): raise vyos.opmode.UnconfiguredObject(unconf_message) return func(*args, **kwargs) + return _wrapper -@_verify -def show_pool_statistics(raw: bool, family: ArgFamily, pool: typing.Optional[str]): - pool_data = _get_raw_pool_statistics(family=family, pool=pool) + +@_verify_server +def show_server_pool_statistics( + raw: bool, family: ArgFamily, pool: typing.Optional[str] +): + v = 'v6' if family == 'inet6' else '' + inet_suffix = '6' if family == 'inet6' else '4' + + if not is_systemd_service_running(f'kea-dhcp{inet_suffix}-server.service'): + Warning(stale_warn_msg) + + try: + active_config = kea_get_active_config(inet_suffix) + except Exception: + raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server configuration') + + active_pools = kea_get_dhcp_pools(active_config, inet_suffix) + + if pool and active_pools and pool not in active_pools: + raise vyos.opmode.IncorrectValue(f'DHCP{v} pool "{pool}" does not exist!') + + pool_data = _get_raw_server_pool_statistics(active_config, family=family, pool=pool) if raw: return pool_data else: - return _get_formatted_pool_statistics(pool_data, family=family) + return _get_formatted_server_pool_statistics(pool_data) + + +@_verify_server +def show_server_leases( + raw: bool, + family: ArgFamily, + pool: typing.Optional[str], + sorted: typing.Optional[str], + state: typing.Optional[ArgState], + origin: typing.Optional[ArgOrigin], +): + v = 'v6' if family == 'inet6' else '' + inet_suffix = '6' if family == 'inet6' else '4' + + if not is_systemd_service_running(f'kea-dhcp{inet_suffix}-server.service'): + Warning(stale_warn_msg) + try: + active_config = kea_get_active_config(inet_suffix) + except Exception: + raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server configuration') -@_verify -def show_server_leases(raw: bool, family: ArgFamily, pool: typing.Optional[str], - sorted: typing.Optional[str], state: typing.Optional[ArgState], - origin: typing.Optional[ArgOrigin] ): - # if dhcp server is down, inactive leases may still be shown as active, so warn the user. - v = '6' if family == 'inet6' else '4' - if not is_systemd_service_running(f'kea-dhcp{v}-server.service'): - Warning('DHCP server is configured but not started. Data may be stale.') + active_pools = kea_get_dhcp_pools(active_config, inet_suffix) - v = 'v6' if family == 'inet6' else '' - if pool and pool not in _get_dhcp_pools(family=family): + if pool and active_pools and pool not in active_pools: raise vyos.opmode.IncorrectValue(f'DHCP{v} pool "{pool}" does not exist!') - if state and state not in lease_valid_states: - raise vyos.opmode.IncorrectValue(f'DHCP{v} state "{state}" is invalid!') - sort_valid = sort_valid_inet6 if family == 'inet6' else sort_valid_inet if sorted and sorted not in sort_valid: raise vyos.opmode.IncorrectValue(f'DHCP{v} sort "{sorted}" is invalid!') - lease_data = _get_raw_server_leases(family=family, pool=pool, sorted=sorted, state=state, origin=origin) + if state and state not in lease_valid_states: + raise vyos.opmode.IncorrectValue(f'DHCP{v} state "{state}" is invalid!') + + lease_data = _get_raw_server_leases( + config=active_config, + family=family, + pool=pool, + sorted=sorted, + state=state, + origin=origin, + ) if raw: return lease_data else: return _get_formatted_server_leases(lease_data, family=family) -@_verify -def show_server_static_mappings(raw: bool, family: ArgFamily, pool: typing.Optional[str], - sorted: typing.Optional[str]): + +@_verify_server +def show_server_static_mappings( + raw: bool, + family: ArgFamily, + pool: typing.Optional[str], + sorted: typing.Optional[str], +): v = 'v6' if family == 'inet6' else '' - if pool and pool not in _get_dhcp_pools(family=family): + inet_suffix = '6' if family == 'inet6' else '4' + + if not is_systemd_service_running(f'kea-dhcp{inet_suffix}-server.service'): + Warning(stale_warn_msg) + + try: + active_config = kea_get_active_config(inet_suffix) + except Exception: + raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server configuration') + + active_pools = kea_get_dhcp_pools(active_config, inet_suffix) + + if pool and active_pools and pool not in active_pools: raise vyos.opmode.IncorrectValue(f'DHCP{v} pool "{pool}" does not exist!') if sorted and sorted not in mapping_sort_valid: raise vyos.opmode.IncorrectValue(f'DHCP{v} sort "{sorted}" is invalid!') - static_mappings = _get_raw_server_static_mappings(family=family, pool=pool, sorted=sorted) + static_mappings = _get_raw_server_static_mappings( + config=active_config, family=family, pool=pool, sorted=sorted + ) if raw: return static_mappings else: - return _get_formatted_server_static_mappings(static_mappings, family=family) + return _get_formatted_server_static_mappings(static_mappings) + def _lease_valid(inet, address): leases = kea_get_leases(inet) - for lease in leases: - if address == lease['ip-address']: - return True - return False + return any(lease['ip-address'] == address for lease in leases) -@_verify + +@_verify_server def clear_dhcp_server_lease(family: ArgFamily, address: str): v = 'v6' if family == 'inet6' else '' inet = '6' if family == 'inet6' else '4' @@ -409,6 +429,7 @@ def clear_dhcp_server_lease(family: ArgFamily, address: str): print(f'Lease "{address}" has been cleared') + def _get_raw_client_leases(family='inet', interface=None): from time import mktime from datetime import datetime @@ -437,21 +458,29 @@ def _get_raw_client_leases(family='inet', interface=None): # format this makes less sense for an API and also the expiry # timestamp is provided in UNIX time. Convert string (e.g. Sun Jul # 30 18:13:44 CEST 2023) to UNIX time (1690733624) - tmp.update({'last_update' : int(mktime(datetime.strptime(line, time_string).timetuple()))}) + tmp.update( + { + 'last_update': int( + mktime(datetime.strptime(line, time_string).timetuple()) + ) + } + ) continue k, v = line.split('=') - tmp.update({k : v.replace("'", "")}) + tmp.update({k: v.replace("'", '')}) if 'interface' in tmp: vrf = get_interface_vrf(tmp['interface']) - if vrf: tmp.update({'vrf' : vrf}) + if vrf: + tmp.update({'vrf': vrf}) lease_data.append(tmp) return lease_data -def _get_formatted_client_leases(lease_data, family): + +def _get_formatted_client_leases(lease_data): from time import localtime from time import strftime @@ -461,30 +490,34 @@ def _get_formatted_client_leases(lease_data, family): for lease in lease_data: if not lease.get('new_ip_address'): continue - data_entries.append(["Interface", lease['interface']]) + data_entries.append(['Interface', lease['interface']]) if 'new_ip_address' in lease: - tmp = '[Active]' if is_intf_addr_assigned(lease['interface'], lease['new_ip_address']) else '[Inactive]' - data_entries.append(["IP address", lease['new_ip_address'], tmp]) + tmp = ( + '[Active]' + if is_intf_addr_assigned(lease['interface'], lease['new_ip_address']) + else '[Inactive]' + ) + data_entries.append(['IP address', lease['new_ip_address'], tmp]) if 'new_subnet_mask' in lease: - data_entries.append(["Subnet Mask", lease['new_subnet_mask']]) + data_entries.append(['Subnet Mask', lease['new_subnet_mask']]) if 'new_domain_name' in lease: - data_entries.append(["Domain Name", lease['new_domain_name']]) + data_entries.append(['Domain Name', lease['new_domain_name']]) if 'new_routers' in lease: - data_entries.append(["Router", lease['new_routers']]) + data_entries.append(['Router', lease['new_routers']]) if 'new_domain_name_servers' in lease: - data_entries.append(["Name Server", lease['new_domain_name_servers']]) + data_entries.append(['Name Server', lease['new_domain_name_servers']]) if 'new_dhcp_server_identifier' in lease: - data_entries.append(["DHCP Server", lease['new_dhcp_server_identifier']]) + data_entries.append(['DHCP Server', lease['new_dhcp_server_identifier']]) if 'new_dhcp_lease_time' in lease: - data_entries.append(["DHCP Server", lease['new_dhcp_lease_time']]) + data_entries.append(['DHCP Server', lease['new_dhcp_lease_time']]) if 'vrf' in lease: - data_entries.append(["VRF", lease['vrf']]) + data_entries.append(['VRF', lease['vrf']]) if 'last_update' in lease: tmp = strftime(time_string, localtime(int(lease['last_update']))) - data_entries.append(["Last Update", tmp]) + data_entries.append(['Last Update', tmp]) if 'new_expiry' in lease: tmp = strftime(time_string, localtime(int(lease['new_expiry']))) - data_entries.append(["Expiry", tmp]) + data_entries.append(['Expiry', tmp]) # Add empty marker data_entries.append(['']) @@ -493,12 +526,14 @@ def _get_formatted_client_leases(lease_data, family): return output + def show_client_leases(raw: bool, family: ArgFamily, interface: typing.Optional[str]): lease_data = _get_raw_client_leases(family=family, interface=interface) if raw: return lease_data else: - return _get_formatted_client_leases(lease_data, family=family) + return _get_formatted_client_leases(lease_data) + @_verify_client def renew_client_lease(raw: bool, family: ArgFamily, interface: str): @@ -510,6 +545,7 @@ def renew_client_lease(raw: bool, family: ArgFamily, interface: str): else: call(f'systemctl restart dhclient@{interface}.service') + @_verify_client def release_client_lease(raw: bool, family: ArgFamily, interface: str): if not raw: @@ -520,6 +556,7 @@ def release_client_lease(raw: bool, family: ArgFamily, interface: str): else: call(f'systemctl stop dhclient@{interface}.service') + if __name__ == '__main__': try: res = vyos.opmode.run(sys.modules[__name__]) diff --git a/src/op_mode/firewall.py b/src/op_mode/firewall.py index c197ca434..f3309ee34 100755 --- a/src/op_mode/firewall.py +++ b/src/op_mode/firewall.py @@ -18,6 +18,7 @@ import argparse import ipaddress import json import re +from signal import signal, SIGPIPE, SIG_DFL import tabulate import textwrap @@ -25,6 +26,9 @@ from vyos.config import Config from vyos.utils.process import cmd from vyos.utils.dict import dict_search_args +signal(SIGPIPE, SIG_DFL) + + def get_config_node(conf, node=None, family=None, hook=None, priority=None): if node == 'nat': if family == 'ipv6': @@ -148,6 +152,38 @@ def get_nftables_group_members(family, table, name): return out +def get_nftables_remote_group_members(family, table, name): + prefix = 'ip6' if family == 'ipv6' else 'ip' + out = [] + + try: + results_str = cmd(f'nft -j list set {prefix} {table} {name}') + results = json.loads(results_str) + except: + return out + + if 'nftables' not in results: + return out + + for obj in results['nftables']: + if 'set' not in obj: + continue + + set_obj = obj['set'] + if 'elem' in set_obj: + for elem in set_obj['elem']: + # search for single IP elements + if isinstance(elem, str): + out.append(elem) + # search for prefix elements + elif isinstance(elem, dict) and 'prefix' in elem: + out.append(f"{elem['prefix']['addr']}/{elem['prefix']['len']}") + # search for IP range elements + elif isinstance(elem, dict) and 'range' in elem: + out.append(f"{elem['range'][0]}-{elem['range'][1]}") + + return out + def output_firewall_vertical(rules, headers, adjust=True): for rule in rules: adjusted_rule = rule + [""] * (len(headers) - len(rule)) if adjust else rule # account for different header length, like default-action @@ -253,15 +289,17 @@ def output_firewall_name_statistics(family, hook, prior, prior_conf, single_rule if not source_addr: source_addr = dict_search_args(rule_conf, 'source', 'group', 'domain_group') if not source_addr: - source_addr = dict_search_args(rule_conf, 'source', 'fqdn') + source_addr = dict_search_args(rule_conf, 'source', 'group', 'remote_group') if not source_addr: - source_addr = dict_search_args(rule_conf, 'source', 'geoip', 'country_code') - if source_addr: - source_addr = str(source_addr)[1:-1].replace('\'','') - if 'inverse_match' in dict_search_args(rule_conf, 'source', 'geoip'): - source_addr = 'NOT ' + str(source_addr) + source_addr = dict_search_args(rule_conf, 'source', 'fqdn') if not source_addr: - source_addr = 'any' + source_addr = dict_search_args(rule_conf, 'source', 'geoip', 'country_code') + if source_addr: + source_addr = str(source_addr)[1:-1].replace('\'','') + if 'inverse_match' in dict_search_args(rule_conf, 'source', 'geoip'): + source_addr = 'NOT ' + str(source_addr) + if not source_addr: + source_addr = 'any' # Get destination dest_addr = dict_search_args(rule_conf, 'destination', 'address') @@ -272,15 +310,17 @@ def output_firewall_name_statistics(family, hook, prior, prior_conf, single_rule if not dest_addr: dest_addr = dict_search_args(rule_conf, 'destination', 'group', 'domain_group') if not dest_addr: - dest_addr = dict_search_args(rule_conf, 'destination', 'fqdn') + dest_addr = dict_search_args(rule_conf, 'destination', 'group', 'remote_group') if not dest_addr: - dest_addr = dict_search_args(rule_conf, 'destination', 'geoip', 'country_code') - if dest_addr: - dest_addr = str(dest_addr)[1:-1].replace('\'','') - if 'inverse_match' in dict_search_args(rule_conf, 'destination', 'geoip'): - dest_addr = 'NOT ' + str(dest_addr) + dest_addr = dict_search_args(rule_conf, 'destination', 'fqdn') if not dest_addr: - dest_addr = 'any' + dest_addr = dict_search_args(rule_conf, 'destination', 'geoip', 'country_code') + if dest_addr: + dest_addr = str(dest_addr)[1:-1].replace('\'','') + if 'inverse_match' in dict_search_args(rule_conf, 'destination', 'geoip'): + dest_addr = 'NOT ' + str(dest_addr) + if not dest_addr: + dest_addr = 'any' # Get inbound interface iiface = dict_search_args(rule_conf, 'inbound_interface', 'name') @@ -552,30 +592,8 @@ def show_firewall_group(name=None): header_tail = [] for group_type, group_type_conf in firewall['group'].items(): - ## - if group_type != 'dynamic_group': - - for group_name, group_conf in group_type_conf.items(): - if name and name != group_name: - continue - - references = find_references(group_type, group_name) - row = [group_name, textwrap.fill(group_conf.get('description') or '', 50), group_type, '\n'.join(references) or 'N/D'] - if 'address' in group_conf: - row.append("\n".join(sorted(group_conf['address']))) - elif 'network' in group_conf: - row.append("\n".join(sorted(group_conf['network'], key=ipaddress.ip_network))) - elif 'mac_address' in group_conf: - row.append("\n".join(sorted(group_conf['mac_address']))) - elif 'port' in group_conf: - row.append("\n".join(sorted(group_conf['port']))) - elif 'interface' in group_conf: - row.append("\n".join(sorted(group_conf['interface']))) - else: - row.append('N/D') - rows.append(row) - - else: + # interate over dynamic-groups + if group_type == 'dynamic_group': if not args.detail: header_tail = ['Timeout', 'Expires'] @@ -584,6 +602,9 @@ def show_firewall_group(name=None): prefix = 'DA_' if dynamic_type == 'address_group' else 'DA6_' if dynamic_type in firewall['group']['dynamic_group']: for dynamic_name, dynamic_conf in firewall['group']['dynamic_group'][dynamic_type].items(): + if name and name != dynamic_name: + continue + references = find_references(dynamic_type, dynamic_name) row = [dynamic_name, textwrap.fill(dynamic_conf.get('description') or '', 50), dynamic_type + '(dynamic)', '\n'.join(references) or 'N/D'] @@ -622,6 +643,68 @@ def show_firewall_group(name=None): header_tail += [""] * (len(members) - 1) rows.append(row) + # iterate over remote-groups + elif group_type == 'remote_group': + for remote_name, remote_conf in group_type_conf.items(): + if name and name != remote_name: + continue + + references = find_references(group_type, remote_name) + row = [remote_name, textwrap.fill(remote_conf.get('description') or '', 50), group_type, '\n'.join(references) or 'N/D'] + members = get_nftables_remote_group_members("ipv4", 'vyos_filter', f'R_{remote_name}') + members6 = get_nftables_remote_group_members("ipv6", 'vyos_filter', f'R6_{remote_name}') + + if 'url' in remote_conf: + # display only the url if no members are found for both views + if not members and not members6: + if args.detail: + header_tail = ['IPv6 Members', 'Remote URL'] + row.append('N/D') + row.append('N/D') + row.append(remote_conf['url']) + else: + row.append(remote_conf['url']) + rows.append(row) + else: + # display all table elements in detail view + if args.detail: + header_tail = ['IPv6 Members', 'Remote URL'] + if members: + row.append(' '.join(members)) + else: + row.append('N/D') + if members6: + row.append(' '.join(members6)) + else: + row.append('N/D') + row.append(remote_conf['url']) + rows.append(row) + else: + row.append(remote_conf['url']) + rows.append(row) + + # catch the rest of the group types + else: + for group_name, group_conf in group_type_conf.items(): + if name and name != group_name: + continue + + references = find_references(group_type, group_name) + row = [group_name, textwrap.fill(group_conf.get('description') or '', 50), group_type, '\n'.join(references) or 'N/D'] + if 'address' in group_conf: + row.append("\n".join(sorted(group_conf['address']))) + elif 'network' in group_conf: + row.append("\n".join(sorted(group_conf['network'], key=ipaddress.ip_network))) + elif 'mac_address' in group_conf: + row.append("\n".join(sorted(group_conf['mac_address']))) + elif 'port' in group_conf: + row.append("\n".join(sorted(group_conf['port']))) + elif 'interface' in group_conf: + row.append("\n".join(sorted(group_conf['interface']))) + else: + row.append('N/D') + rows.append(row) + if rows: print('Firewall Groups\n') if args.detail: diff --git a/src/op_mode/generate_psk.py b/src/op_mode/generate_psk.py new file mode 100644 index 000000000..d51293712 --- /dev/null +++ b/src/op_mode/generate_psk.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +import argparse + +from vyos.utils.process import cmd + + +def validate_hex_size(value): + """Validate that the hex_size is between 32 and 512.""" + try: + value = int(value) + except ValueError: + raise argparse.ArgumentTypeError("hex_size must be integer.") + + if value < 32 or value > 512: + raise argparse.ArgumentTypeError("hex_size must be between 32 and 512.") + return value + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "--hex_size", + type=validate_hex_size, + help='PKS value size in hex format. Default is 32 bytes.', + default=32, + + required=False, + ) + args = parser.parse_args() + + print(cmd(f'openssl rand -hex {args.hex_size}'))
\ No newline at end of file diff --git a/src/op_mode/image_installer.py b/src/op_mode/image_installer.py index bdc16de15..ac5a84419 100755 --- a/src/op_mode/image_installer.py +++ b/src/op_mode/image_installer.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2023-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# Copyright 2023-2025 VyOS maintainers and contributors <maintainers@vyos.io> # # This file is part of VyOS. # @@ -24,7 +24,9 @@ from glob import glob from sys import exit from os import environ from os import readlink -from os import getpid, getppid +from os import getpid +from os import getppid +from json import loads from typing import Union from urllib.parse import urlparse from passlib.hosts import linux_context @@ -32,22 +34,41 @@ from errno import ENOSPC from psutil import disk_partitions +from vyos.base import Warning from vyos.configtree import ConfigTree -from vyos.configquery import ConfigTreeQuery from vyos.remote import download -from vyos.system import disk, grub, image, compat, raid, SYSTEM_CFG_VER +from vyos.system import disk +from vyos.system import grub +from vyos.system import image +from vyos.system import compat +from vyos.system import raid +from vyos.system import SYSTEM_CFG_VER +from vyos.system import grub_util from vyos.template import render +from vyos.utils.auth import ( + DEFAULT_PASSWORD, + EPasswdStrength, + evaluate_strength +) +from vyos.utils.dict import dict_search from vyos.utils.io import ask_input, ask_yes_no, select_entry from vyos.utils.file import chmod_2775 -from vyos.utils.process import cmd, run -from vyos.version import get_remote_version, get_version_data +from vyos.utils.file import read_file +from vyos.utils.process import cmd, run, rc_cmd +from vyos.version import get_version_data # define text messages MSG_ERR_NOT_LIVE: str = 'The system is already installed. Please use "add system image" instead.' MSG_ERR_LIVE: str = 'The system is in live-boot mode. Please use "install image" instead.' MSG_ERR_NO_DISK: str = 'No suitable disk was found. There must be at least one disk of 2GB or greater size.' MSG_ERR_IMPROPER_IMAGE: str = 'Missing sha256sum.txt.\nEither this image is corrupted, or of era 1.2.x (md5sum) and would downgrade image tools;\ndisallowed in either case.' -MSG_ERR_ARCHITECTURE_MISMATCH: str = 'Upgrading to a different image architecture will break your system.' +MSG_ERR_INCOMPATIBLE_IMAGE: str = 'Image compatibility check failed, aborting installation.' +MSG_ERR_ARCHITECTURE_MISMATCH: str = 'The current architecture is "{0}", the new image is for "{1}". Upgrading to a different image architecture will break your system.' +MSG_ERR_FLAVOR_MISMATCH: str = 'The current image flavor is "{0}", the new image is "{1}". Upgrading to a non-matching flavor can have unpredictable consequences.' +MSG_ERR_MISSING_ARCHITECTURE: str = 'The new image version data does not specify architecture, cannot check compatibility (is it a legacy release image?)' +MSG_ERR_MISSING_FLAVOR: str = 'The new image version data does not specify flavor, cannot check compatibility (is it a legacy release image?)' +MSG_ERR_CORRUPT_CURRENT_IMAGE: str = 'Version data in the current image is malformed: missing flavor and/or architecture fields. Upgrade compatibility cannot be checked.' +MSG_ERR_UNSUPPORTED_SIGNATURE_TYPE: str = 'Unsupported signature type, signature cannot be verified.' MSG_INFO_INSTALL_WELCOME: str = 'Welcome to VyOS installation!\nThis command will install VyOS to your permanent storage.' MSG_INFO_INSTALL_EXIT: str = 'Exiting from VyOS installation' MSG_INFO_INSTALL_SUCCESS: str = 'The image installed successfully; please reboot now.' @@ -63,6 +84,7 @@ MSG_INPUT_CONFIG_FOUND: str = 'An active configuration was found. Would you like MSG_INPUT_CONFIG_CHOICE: str = 'The following config files are available for boot:' MSG_INPUT_CONFIG_CHOOSE: str = 'Which file would you like as boot config?' MSG_INPUT_IMAGE_NAME: str = 'What would you like to name this image?' +MSG_INPUT_IMAGE_NAME_TAKEN: str = 'There is already an installed image by that name; please choose again' MSG_INPUT_IMAGE_DEFAULT: str = 'Would you like to set the new image as the default one for boot?' MSG_INPUT_PASSWORD: str = 'Please enter a password for the "vyos" user:' MSG_INPUT_PASSWORD_CONFIRM: str = 'Please confirm password for the "vyos" user:' @@ -79,8 +101,10 @@ MSG_WARN_ROOT_SIZE_TOOBIG: str = 'The size is too big. Try again.' MSG_WARN_ROOT_SIZE_TOOSMALL: str = 'The size is too small. Try again' MSG_WARN_IMAGE_NAME_WRONG: str = 'The suggested name is unsupported!\n'\ 'It must be between 1 and 64 characters long and contains only the next characters: .+-_ a-z A-Z 0-9' + +MSG_WARN_CHANGE_PASSWORD: str = 'Default password used. Consider changing ' \ + 'it on next login.' MSG_WARN_PASSWORD_CONFIRM: str = 'The entered values did not match. Try again' -MSG_WARN_FLAVOR_MISMATCH: str = 'The running image flavor is "{0}". The new image flavor is "{1}".\n' \ 'Installing a different image flavor may cause functionality degradation or break your system.\n' \ 'Do you want to continue with installation?' CONST_MIN_DISK_SIZE: int = 2147483648 # 2 GB @@ -96,9 +120,10 @@ DIR_ISO_MOUNT: str = f'{DIR_INSTALLATION}/iso_src' DIR_DST_ROOT: str = f'{DIR_INSTALLATION}/disk_dst' DIR_KERNEL_SRC: str = '/boot/' FILE_ROOTFS_SRC: str = '/usr/lib/live/mount/medium/live/filesystem.squashfs' -ISO_DOWNLOAD_PATH: str = '/tmp/vyos_installation.iso' +ISO_DOWNLOAD_PATH: str = '' external_download_script = '/usr/libexec/vyos/simple-download.py' +external_latest_image_url_script = '/usr/libexec/vyos/latest-image-url.py' # default boot variables DEFAULT_BOOT_VARS: dict[str, str] = { @@ -462,6 +487,29 @@ def setup_grub(root_dir: str) -> None: render(grub_cfg_menu, grub.TMPL_GRUB_MENU, {}) render(grub_cfg_options, grub.TMPL_GRUB_OPTS, {}) +def get_cli_kernel_options(config_file: str) -> list: + config = ConfigTree(read_file(config_file)) + config_dict = loads(config.to_json()) + kernel_options = dict_search('system.option.kernel', config_dict) + if kernel_options is None: + kernel_options = {} + cmdline_options = [] + + # XXX: This code path and if statements must be kept in sync with the Kernel + # option handling in system_options.py:generate(). This occurance is used + # for having the appropriate options passed to GRUB after an image upgrade! + if 'disable-mitigations' in kernel_options: + cmdline_options.append('mitigations=off') + if 'disable-power-saving' in kernel_options: + cmdline_options.append('intel_idle.max_cstate=0 processor.max_cstate=1') + if 'amd-pstate-driver' in kernel_options: + mode = kernel_options['amd-pstate-driver'] + cmdline_options.append( + f'initcall_blacklist=acpi_cpufreq_init amd_pstate={mode}') + if 'quiet' in kernel_options: + cmdline_options.append('quiet') + + return cmdline_options def configure_authentication(config_file: str, password: str) -> None: """Write encrypted password to config file @@ -476,10 +524,7 @@ def configure_authentication(config_file: str, password: str) -> None: plaintext exposed """ encrypted_password = linux_context.hash(password) - - with open(config_file) as f: - config_string = f.read() - + config_string = read_file(config_file) config = ConfigTree(config_string) config.set([ 'system', 'login', 'user', 'vyos', 'authentication', @@ -501,7 +546,6 @@ def validate_signature(file_path: str, sign_type: str) -> None: """ print('Validating signature') signature_valid: bool = False - # validate with minisig if sign_type == 'minisig': pub_key_list = glob('/usr/share/vyos/keys/*.minisign.pub') for pubkey in pub_key_list: @@ -510,11 +554,8 @@ def validate_signature(file_path: str, sign_type: str) -> None: signature_valid = True break Path(f'{file_path}.minisig').unlink() - # validate with GPG - if sign_type == 'asc': - if run(f'gpg --verify ${file_path}.asc ${file_path}') == 0: - signature_valid = True - Path(f'{file_path}.asc').unlink() + else: + exit(MSG_ERR_UNSUPPORTED_SIGNATURE_TYPE) # warn or pass if not signature_valid: @@ -524,21 +565,18 @@ def validate_signature(file_path: str, sign_type: str) -> None: print('Signature is valid') def download_file(local_file: str, remote_path: str, vrf: str, - username: str, password: str, progressbar: bool = False, check_space: bool = False): - environ['REMOTE_USERNAME'] = username - environ['REMOTE_PASSWORD'] = password + # Server credentials are implicitly passed in environment variables + # that are set by add_image if vrf is None: download(local_file, remote_path, progressbar=progressbar, check_space=check_space, raise_error=True) else: - vrf_cmd = f'REMOTE_USERNAME={username} REMOTE_PASSWORD={password} \ - ip vrf exec {vrf} {external_download_script} \ - --local-file {local_file} --remote-path {remote_path}' - cmd(vrf_cmd) + vrf_cmd = f'ip vrf exec {vrf} {external_download_script} \ + --local-file {local_file} --remote-path {remote_path}' + cmd(vrf_cmd, env=environ) def image_fetch(image_path: str, vrf: str = None, - username: str = '', password: str = '', no_prompt: bool = False) -> Path: """Fetch an ISO image @@ -548,34 +586,44 @@ def image_fetch(image_path: str, vrf: str = None, Returns: Path: a path to a local file """ + import os.path + from uuid import uuid4 + + global ISO_DOWNLOAD_PATH + # Latest version gets url from configured "system update-check url" if image_path == 'latest': - config = ConfigTreeQuery() - if config.exists('system update-check url'): - configured_url_version = config.value('system update-check url') - remote_url_list = get_remote_version(configured_url_version) - image_path = remote_url_list[0].get('url') + command = external_latest_image_url_script + if vrf: + command = f'ip vrf exec {vrf} {command}' + code, output = rc_cmd(command, env=environ) + if code: + print(output) + exit(MSG_INFO_INSTALL_EXIT) + image_path = output if output else image_path try: # check a type of path if urlparse(image_path).scheme: - # download an image + # Download the image file + ISO_DOWNLOAD_PATH = os.path.join(os.path.expanduser("~"), '{0}.iso'.format(uuid4())) download_file(ISO_DOWNLOAD_PATH, image_path, vrf, - username, password, progressbar=True, check_space=True) - # download a signature + # Download the image signature + # VyOS only supports minisign signatures at the moment, + # but we keep the logic for multiple signatures + # in case we add something new in the future sign_file = (False, '') - for sign_type in ['minisig', 'asc']: + for sign_type in ['minisig']: try: download_file(f'{ISO_DOWNLOAD_PATH}.{sign_type}', - f'{image_path}.{sign_type}', vrf, - username, password) + f'{image_path}.{sign_type}', vrf) sign_file = (True, sign_type) break except Exception: - print(f'{sign_type} signature is not available') - # validate a signature if it is available + print(f'Could not download {sign_type} signature') + # Validate the signature if it is available if sign_file[0]: validate_signature(ISO_DOWNLOAD_PATH, sign_file[1]) else: @@ -697,30 +745,48 @@ def is_raid_install(install_object: Union[disk.DiskDetails, raid.RaidDetails]) - return False -def validate_compatibility(iso_path: str) -> None: +def validate_compatibility(iso_path: str, force: bool = False) -> None: """Check architecture and flavor compatibility with the running image Args: iso_path (str): a path to the mounted ISO image """ - old_data = get_version_data() - old_flavor = old_data.get('flavor', '') - old_architecture = old_data.get('architecture') or cmd('dpkg --print-architecture') + current_data = get_version_data() + current_flavor = current_data.get('flavor') + current_architecture = current_data.get('architecture') or cmd('dpkg --print-architecture') new_data = get_version_data(f'{iso_path}/version.json') - new_flavor = new_data.get('flavor', '') - new_architecture = new_data.get('architecture', '') + new_flavor = new_data.get('flavor') + new_architecture = new_data.get('architecture') - if not old_architecture == new_architecture: - print(MSG_ERR_ARCHITECTURE_MISMATCH) + if not current_flavor or not current_architecture: + # This may only happen if someone modified the version file. + # Unlikely but not impossible. + print(MSG_ERR_CORRUPT_CURRENT_IMAGE) cleanup() exit(MSG_INFO_INSTALL_EXIT) - if not old_flavor == new_flavor: - if not ask_yes_no(MSG_WARN_FLAVOR_MISMATCH.format(old_flavor, new_flavor), default=False): - cleanup() - exit(MSG_INFO_INSTALL_EXIT) + success = True + if current_architecture != new_architecture: + success = False + if not new_architecture: + print(MSG_ERR_MISSING_ARCHITECTURE) + else: + print(MSG_ERR_ARCHITECTURE_MISMATCH.format(current_architecture, new_architecture)) + + if current_flavor != new_flavor: + if not force: + success = False + if not new_flavor: + print(MSG_ERR_MISSING_FLAVOR) + else: + print(MSG_ERR_FLAVOR_MISMATCH.format(current_flavor, new_flavor)) + + if not success: + print(MSG_ERR_INCOMPATIBLE_IMAGE) + cleanup() + exit(MSG_INFO_INSTALL_EXIT) def install_image() -> None: """Install an image to a disk @@ -742,14 +808,25 @@ def install_image() -> None: break print(MSG_WARN_IMAGE_NAME_WRONG) + failed_check_status = [EPasswdStrength.WEAK, EPasswdStrength.ERROR] # ask for password while True: user_password: str = ask_input(MSG_INPUT_PASSWORD, no_echo=True, non_empty=True) + + if user_password == DEFAULT_PASSWORD: + Warning(MSG_WARN_CHANGE_PASSWORD) + else: + result = evaluate_strength(user_password) + if result['strength'] in failed_check_status: + Warning(result['error']) + confirm: str = ask_input(MSG_INPUT_PASSWORD_CONFIRM, no_echo=True, non_empty=True) + if user_password == confirm: break + print(MSG_WARN_PASSWORD_CONFIRM) # ask for default console @@ -845,8 +922,7 @@ def install_image() -> None: for disk_target in l: disk.partition_mount(disk_target.partition['efi'], f'{DIR_DST_ROOT}/boot/efi') grub.install(disk_target.name, f'{DIR_DST_ROOT}/boot/', - f'{DIR_DST_ROOT}/boot/efi', - id=f'VyOS (RAID disk {l.index(disk_target) + 1})') + f'{DIR_DST_ROOT}/boot/efi') disk.partition_umount(disk_target.partition['efi']) else: print('Installing GRUB to the drive') @@ -889,7 +965,7 @@ def install_image() -> None: @compat.grub_cfg_update def add_image(image_path: str, vrf: str = None, username: str = '', - password: str = '', no_prompt: bool = False) -> None: + password: str = '', no_prompt: bool = False, force: bool = False) -> None: """Add a new image Args: @@ -898,15 +974,18 @@ def add_image(image_path: str, vrf: str = None, username: str = '', if image.is_live_boot(): exit(MSG_ERR_LIVE) + environ['REMOTE_USERNAME'] = username + environ['REMOTE_PASSWORD'] = password + # fetch an image - iso_path: Path = image_fetch(image_path, vrf, username, password, no_prompt) + iso_path: Path = image_fetch(image_path, vrf, no_prompt) try: # mount an ISO Path(DIR_ISO_MOUNT).mkdir(mode=0o755, parents=True) disk.partition_mount(iso_path, DIR_ISO_MOUNT, 'iso9660') print('Validating image compatibility') - validate_compatibility(DIR_ISO_MOUNT) + validate_compatibility(DIR_ISO_MOUNT, force=force) # check sums print('Validating image checksums') @@ -932,8 +1011,12 @@ def add_image(image_path: str, vrf: str = None, username: str = '', f'Adding image would downgrade image tools to v.{cfg_ver}; disallowed') if not no_prompt: + versions = grub.version_list() while True: image_name: str = ask_input(MSG_INPUT_IMAGE_NAME, version_name) + if image_name in versions: + print(MSG_INPUT_IMAGE_NAME_TAKEN) + continue if image.validate_name(image_name): break print(MSG_WARN_IMAGE_NAME_WRONG) @@ -955,7 +1038,7 @@ def add_image(image_path: str, vrf: str = None, username: str = '', Path(target_config_dir).mkdir(parents=True) chown(target_config_dir, group='vyattacfg') chmod_2775(target_config_dir) - copytree('/opt/vyatta/etc/config/', target_config_dir, + copytree('/opt/vyatta/etc/config/', target_config_dir, symlinks=True, copy_function=copy_preserve_owner, dirs_exist_ok=True) else: Path(target_config_dir).mkdir(parents=True) @@ -988,6 +1071,12 @@ def add_image(image_path: str, vrf: str = None, username: str = '', if set_as_default: grub.set_default(image_name, root_dir) + cmdline_options = get_cli_kernel_options( + f'{target_config_dir}/config.boot') + grub_util.update_kernel_cmdline_options(' '.join(cmdline_options), + root_dir=root_dir, + version=image_name) + except OSError as e: # if no space error, remove image dir and cleanup if e.errno == ENOSPC: @@ -1027,6 +1116,9 @@ def parse_arguments() -> Namespace: parser.add_argument('--image-path', help='a path (HTTP or local file) to an image that needs to be installed' ) + parser.add_argument('--force', action='store_true', + help='Ignore flavor compatibility requirements.' + ) # parser.add_argument('--image_new_name', help='a new name for image') args: Namespace = parser.parse_args() # Validate arguments @@ -1043,7 +1135,8 @@ if __name__ == '__main__': install_image() if args.action == 'add': add_image(args.image_path, args.vrf, - args.username, args.password, args.no_prompt) + args.username, args.password, + args.no_prompt, args.force) exit() diff --git a/src/op_mode/interfaces.py b/src/op_mode/interfaces.py index e7afc4caa..c97f3b129 100755 --- a/src/op_mode/interfaces.py +++ b/src/op_mode/interfaces.py @@ -29,6 +29,7 @@ from vyos.ifconfig import Section from vyos.ifconfig import Interface from vyos.ifconfig import VRRP from vyos.utils.process import cmd +from vyos.utils.network import interface_exists from vyos.utils.process import rc_cmd from vyos.utils.process import call @@ -84,6 +85,14 @@ def filtered_interfaces(ifnames: typing.Union[str, list], yield interface +def detailed_output(dataset, headers): + for data in dataset: + adjusted_rule = data + [""] * (len(headers) - len(data)) # account for different header length, like default-action + transformed_rule = [[header, adjusted_rule[i]] for i, header in enumerate(headers) if i < len(adjusted_rule)] # create key-pair list from headers and rules lists; wrap at 100 char + + print(tabulate(transformed_rule, tablefmt="presto")) + print() + def _split_text(text, used=0): """ take a string and attempt to split it to fit with the width of the screen @@ -296,6 +305,114 @@ def _get_counter_data(ifname: typing.Optional[str], return ret +def _get_kernel_data(raw, ifname = None, detail = False): + if ifname: + # Check if the interface exists + if not interface_exists(ifname): + raise vyos.opmode.IncorrectValue(f"{ifname} does not exist!") + int_name = f'dev {ifname}' + else: + int_name = '' + + kernel_interface = json.loads(cmd(f'ip -j -d -s address show {int_name}')) + + # Return early if raw + if raw: + return kernel_interface, None + + # Format the kernel data + kernel_interface_out = _format_kernel_data(kernel_interface, detail) + + return kernel_interface, kernel_interface_out + +def _format_kernel_data(data, detail): + output_list = [] + tmpInfo = {} + + # Sort interfaces by name + for interface in sorted(data, key=lambda x: x.get('ifname', '')): + if interface.get('linkinfo', {}).get('info_kind') == 'vrf': + continue + + # Get the device model; ex. Intel Corporation Ethernet Controller I225-V + dev_model = interface.get('parentdev', '') + if 'parentdev' in interface: + parentdev = interface['parentdev'] + if re.match(r'^[0-9a-fA-F]{4}:', parentdev): + dev_model = cmd(f'lspci -nn -s {parentdev}').split(']:')[1].strip() + + # Get the IP addresses on interface + ip_list = [] + has_global = False + + for ip in interface['addr_info']: + if ip.get('scope') in ('global', 'host'): + has_global = True + local = ip.get('local', '-') + prefixlen = ip.get('prefixlen', '') + ip_list.append(f"{local}/{prefixlen}") + + + # If no global IP address, add '-'; indicates no IP address on interface + if not has_global: + ip_list.append('-') + + sl_status = ('A' if not 'UP' in interface['flags'] else 'u') + '/' + ('D' if interface['operstate'] == 'DOWN' else 'u') + + # Generate temporary dict to hold data + tmpInfo['ifname'] = interface.get('ifname', '') + tmpInfo['ip'] = ip_list + tmpInfo['mac'] = interface.get('address', '') + tmpInfo['mtu'] = interface.get('mtu', '') + tmpInfo['vrf'] = interface.get('master', 'default') + tmpInfo['status'] = sl_status + tmpInfo['description'] = interface.get('ifalias', '') + tmpInfo['device'] = dev_model + tmpInfo['alternate_names'] = interface.get('altnames', '') + tmpInfo['minimum_mtu'] = interface.get('min_mtu', '') + tmpInfo['maximum_mtu'] = interface.get('max_mtu', '') + rx_stats = interface.get('stats64', {}).get('rx') + tx_stats = interface.get('stats64', {}).get('tx') + tmpInfo['rx_packets'] = rx_stats.get('packets', "") + tmpInfo['rx_bytes'] = rx_stats.get('bytes', "") + tmpInfo['rx_errors'] = rx_stats.get('errors', "") + tmpInfo['rx_dropped'] = rx_stats.get('dropped', "") + tmpInfo['rx_over_errors'] = rx_stats.get('over_errors', '') + tmpInfo['multicast'] = rx_stats.get('multicast', "") + tmpInfo['tx_packets'] = tx_stats.get('packets', "") + tmpInfo['tx_bytes'] = tx_stats.get('bytes', "") + tmpInfo['tx_errors'] = tx_stats.get('errors', "") + tmpInfo['tx_dropped'] = tx_stats.get('dropped', "") + tmpInfo['tx_carrier_errors'] = tx_stats.get('carrier_errors', "") + tmpInfo['tx_collisions'] = tx_stats.get('collisions', "") + + # Generate output list; detail adds more fields + output_list.append([tmpInfo['ifname'], + '\n'.join(tmpInfo['ip']), + tmpInfo['mac'], + tmpInfo['vrf'], + tmpInfo['mtu'], + tmpInfo['status'], + tmpInfo['description'], + *([tmpInfo['device']] if detail else []), + *(['\n'.join(tmpInfo['alternate_names'])] if detail else []), + *([tmpInfo['minimum_mtu']] if detail else []), + *([tmpInfo['maximum_mtu']] if detail else []), + *([tmpInfo['rx_packets']] if detail else []), + *([tmpInfo['rx_bytes']] if detail else []), + *([tmpInfo['rx_errors']] if detail else []), + *([tmpInfo['rx_dropped']] if detail else []), + *([tmpInfo['rx_over_errors']] if detail else []), + *([tmpInfo['multicast']] if detail else []), + *([tmpInfo['tx_packets']] if detail else []), + *([tmpInfo['tx_bytes']] if detail else []), + *([tmpInfo['tx_errors']] if detail else []), + *([tmpInfo['tx_dropped']] if detail else []), + *([tmpInfo['tx_carrier_errors']] if detail else []), + *([tmpInfo['tx_collisions']] if detail else [])]) + + return output_list + @catch_broken_pipe def _format_show_data(data: list): unhandled = [] @@ -445,6 +562,27 @@ def _format_show_counters(data: list): print (output) return output +def show_kernel(raw: bool, intf_name: typing.Optional[str], detail: bool): + raw_data, data = _get_kernel_data(raw, intf_name, detail) + + # Return early if raw + if raw: + return raw_data + + # Normal headers; show interfaces kernel + headers = ['Interface', 'IP Address', 'MAC', 'VRF', 'MTU', 'S/L', 'Description'] + + # Detail headers; show interfaces kernel detail + detail_header = ['Interface', 'IP Address', 'MAC', 'VRF', 'MTU', 'S/L', 'Description', + 'Device', 'Alternate Names','Minimum MTU', 'Maximum MTU', 'RX_Packets', + 'RX_Bytes', 'RX_Errors', 'RX_Dropped', 'Receive Overrun Errors', 'Received Multicast', + 'TX_Packets', 'TX_Bytes', 'TX_Errors', 'TX_Dropped', 'Transmit Carrier Errors', + 'Transmit Collisions'] + + if detail: + detailed_output(data, detail_header) + else: + print(tabulate(data, headers)) def _show_raw(data: list, intf_name: str): if intf_name is not None and len(data) <= 1: diff --git a/src/op_mode/interfaces_wireguard.py b/src/op_mode/interfaces_wireguard.py new file mode 100644 index 000000000..627af0579 --- /dev/null +++ b/src/op_mode/interfaces_wireguard.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import sys +import vyos.opmode + +from vyos.ifconfig import WireGuardIf +from vyos.configquery import ConfigTreeQuery + + +def _verify(func): + """Decorator checks if WireGuard interface config exists""" + from functools import wraps + + @wraps(func) + def _wrapper(*args, **kwargs): + config = ConfigTreeQuery() + interface = kwargs.get('intf_name') + if not config.exists(['interfaces', 'wireguard', interface]): + unconf_message = f'WireGuard interface {interface} is not configured' + raise vyos.opmode.UnconfiguredSubsystem(unconf_message) + return func(*args, **kwargs) + + return _wrapper + + +@_verify +def show_summary(raw: bool, intf_name: str): + intf = WireGuardIf(intf_name, create=False, debug=False) + return intf.operational.show_interface() + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/ipsec.py b/src/op_mode/ipsec.py index 02ba126b4..1ab50b105 100755 --- a/src/op_mode/ipsec.py +++ b/src/op_mode/ipsec.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2022-2024 VyOS maintainers and contributors +# Copyright (C) 2022-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -700,15 +700,6 @@ def reset_profile_dst(profile: str, tunnel: str, nbma_dst: str): ] ) ) - # initiate IKE SAs - for ike in sa_nbma_list: - if ike_sa_name in ike: - vyos.ipsec.vici_initiate( - ike_sa_name, - 'dmvpn', - ike[ike_sa_name]['local-host'], - ike[ike_sa_name]['remote-host'], - ) print( f'Profile {profile} tunnel {tunnel} remote-host {nbma_dst} reset result: success' ) @@ -732,18 +723,6 @@ def reset_profile_all(profile: str, tunnel: str): ) # terminate IKE SAs vyos.ipsec.terminate_vici_by_name(ike_sa_name, None) - # initiate IKE SAs - for ike in sa_list: - if ike_sa_name in ike: - vyos.ipsec.vici_initiate( - ike_sa_name, - 'dmvpn', - ike[ike_sa_name]['local-host'], - ike[ike_sa_name]['remote-host'], - ) - print( - f'Profile {profile} tunnel {tunnel} remote-host {ike[ike_sa_name]["remote-host"]} reset result: success' - ) print(f'Profile {profile} tunnel {tunnel} reset result: success') except vyos.ipsec.ViciInitiateError as err: raise vyos.opmode.UnconfiguredSubsystem(err) diff --git a/src/op_mode/reverseproxy.py b/src/op_mode/load-balancing_haproxy.py index 19704182a..ae6734e16 100755 --- a/src/op_mode/reverseproxy.py +++ b/src/op_mode/load-balancing_haproxy.py @@ -217,8 +217,8 @@ def _get_formatted_output(data): def show(raw: bool): config = ConfigTreeQuery() - if not config.exists('load-balancing reverse-proxy'): - raise vyos.opmode.UnconfiguredSubsystem('Reverse-proxy is not configured') + if not config.exists('load-balancing haproxy'): + raise vyos.opmode.UnconfiguredSubsystem('Haproxy is not configured') data = _get_raw_data() if raw: diff --git a/src/op_mode/load-balancing_wan.py b/src/op_mode/load-balancing_wan.py new file mode 100755 index 000000000..9fa473802 --- /dev/null +++ b/src/op_mode/load-balancing_wan.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import json +import re +import sys + +from datetime import datetime + +from vyos.config import Config +from vyos.utils.process import cmd + +import vyos.opmode + +wlb_status_file = '/run/wlb_status.json' + +status_format = '''Interface: {ifname} +Status: {status} +Last Status Change: {last_change} +Last Interface Success: {last_success} +Last Interface Failure: {last_failure} +Interface Failures: {failures} +''' + +def _verify(func): + """Decorator checks if WLB config exists""" + from functools import wraps + + @wraps(func) + def _wrapper(*args, **kwargs): + config = Config() + if not config.exists(['load-balancing', 'wan']): + unconf_message = 'WAN load-balancing is not configured' + raise vyos.opmode.UnconfiguredSubsystem(unconf_message) + return func(*args, **kwargs) + return _wrapper + +def _get_raw_data(): + with open(wlb_status_file, 'r') as f: + data = json.loads(f.read()) + if not data: + return {} + return data + +def _get_formatted_output(raw_data): + for ifname, if_data in raw_data.items(): + latest_change = if_data['last_success'] if if_data['last_success'] > if_data['last_failure'] else if_data['last_failure'] + + change_dt = datetime.fromtimestamp(latest_change) if latest_change > 0 else None + success_dt = datetime.fromtimestamp(if_data['last_success']) if if_data['last_success'] > 0 else None + failure_dt = datetime.fromtimestamp(if_data['last_failure']) if if_data['last_failure'] > 0 else None + now = datetime.utcnow() + + fmt_data = { + 'ifname': ifname, + 'status': "active" if if_data['state'] else "failed", + 'last_change': change_dt.strftime("%Y-%m-%d %H:%M:%S") if change_dt else 'N/A', + 'last_success': str(now - success_dt) if success_dt else 'N/A', + 'last_failure': str(now - failure_dt) if failure_dt else 'N/A', + 'failures': if_data['failure_count'] + } + print(status_format.format(**fmt_data)) + +@_verify +def show_summary(raw: bool): + data = _get_raw_data() + + if raw: + return data + else: + return _get_formatted_output(data) + +@_verify +def show_connection(raw: bool): + res = cmd('sudo conntrack -L -n') + lines = res.split("\n") + filtered_lines = [line for line in lines if re.search(r' mark=[1-9]', line)] + + if raw: + return filtered_lines + + for line in lines: + print(line) + +@_verify +def show_status(raw: bool): + res = cmd('sudo nft list chain ip vyos_wanloadbalance wlb_mangle_prerouting') + lines = res.split("\n") + filtered_lines = [line.replace("\t", "") for line in lines[3:-2] if 'meta mark set' not in line] + + if raw: + return filtered_lines + + for line in filtered_lines: + print(line) + +if __name__ == "__main__": + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/mtr.py b/src/op_mode/mtr.py index de139f2fa..522cbe008 100644 --- a/src/op_mode/mtr.py +++ b/src/op_mode/mtr.py @@ -23,161 +23,162 @@ from vyos.utils.network import vrf_list from vyos.utils.process import call options = { - 'report': { + 'report-mode': { 'mtr': '{command} --report', 'type': 'noarg', - 'help': 'This option puts mtr into report mode. When in this mode, mtr will run for the number of cycles specified by the -c option, and then print statistics and exit.' + 'help': 'This option puts mtr into report mode. When in this mode, mtr will run for the number of cycles specified by the -c option, and then print statistics and exit.', }, 'report-wide': { 'mtr': '{command} --report-wide', 'type': 'noarg', - 'help': 'This option puts mtr into wide report mode. When in this mode, mtr will not cut hostnames in the report.' + 'help': 'This option puts mtr into wide report mode. When in this mode, mtr will not cut hostnames in the report.', }, 'raw': { 'mtr': '{command} --raw', 'type': 'noarg', - 'help': 'Use the raw output format. This format is better suited for archival of the measurement results.' + 'help': 'Use the raw output format. This format is better suited for archival of the measurement results.', }, 'json': { 'mtr': '{command} --json', 'type': 'noarg', - 'help': 'Use this option to tell mtr to use the JSON output format.' + 'help': 'Use this option to tell mtr to use the JSON output format.', }, 'split': { 'mtr': '{command} --split', 'type': 'noarg', - 'help': 'Use this option to set mtr to spit out a format that is suitable for a split-user interface.' + 'help': 'Use this option to set mtr to spit out a format that is suitable for a split-user interface.', }, 'no-dns': { 'mtr': '{command} --no-dns', 'type': 'noarg', - 'help': 'Use this option to force mtr to display numeric IP numbers and not try to resolve the host names.' + 'help': 'Use this option to force mtr to display numeric IP numbers and not try to resolve the host names.', }, 'show-ips': { 'mtr': '{command} --show-ips {value}', 'type': '<num>', - 'help': 'Use this option to tell mtr to display both the host names and numeric IP numbers.' + 'help': 'Use this option to tell mtr to display both the host names and numeric IP numbers.', }, 'ipinfo': { 'mtr': '{command} --ipinfo {value}', 'type': '<num>', - 'help': 'Displays information about each IP hop.' + 'help': 'Displays information about each IP hop.', }, 'aslookup': { 'mtr': '{command} --aslookup', 'type': 'noarg', - 'help': 'Displays the Autonomous System (AS) number alongside each hop. Equivalent to --ipinfo 0.' + 'help': 'Displays the Autonomous System (AS) number alongside each hop. Equivalent to --ipinfo 0.', }, 'interval': { 'mtr': '{command} --interval {value}', 'type': '<num>', - 'help': 'Use this option to specify the positive number of seconds between ICMP ECHO requests. The default value for this parameter is one second. The root user may choose values between zero and one.' + 'help': 'Use this option to specify the positive number of seconds between ICMP ECHO requests. The default value for this parameter is one second. The root user may choose values between zero and one.', }, 'report-cycles': { 'mtr': '{command} --report-cycles {value}', 'type': '<num>', - 'help': 'Use this option to set the number of pings sent to determine both the machines on the network and the reliability of those machines. Each cycle lasts one second.' + 'help': 'Use this option to set the number of pings sent to determine both the machines on the network and the reliability of those machines. Each cycle lasts one second.', }, 'psize': { 'mtr': '{command} --psize {value}', 'type': '<num>', - 'help': 'This option sets the packet size used for probing. It is in bytes, inclusive IP and ICMP headers. If set to a negative number, every iteration will use a different, random packet size up to that number.' + 'help': 'This option sets the packet size used for probing. It is in bytes, inclusive IP and ICMP headers. If set to a negative number, every iteration will use a different, random packet size up to that number.', }, 'bitpattern': { 'mtr': '{command} --bitpattern {value}', 'type': '<num>', - 'help': 'Specifies bit pattern to use in payload. Should be within range 0 - 255. If NUM is greater than 255, a random pattern is used.' + 'help': 'Specifies bit pattern to use in payload. Should be within range 0 - 255. If NUM is greater than 255, a random pattern is used.', }, 'gracetime': { 'mtr': '{command} --gracetime {value}', 'type': '<num>', - 'help': 'Use this option to specify the positive number of seconds to wait for responses after the final request. The default value is five seconds.' + 'help': 'Use this option to specify the positive number of seconds to wait for responses after the final request. The default value is five seconds.', }, 'tos': { 'mtr': '{command} --tos {value}', 'type': '<tos>', - 'help': 'Specifies value for type of service field in IP header. Should be within range 0 - 255.' + 'help': 'Specifies value for type of service field in IP header. Should be within range 0 - 255.', }, 'mpls': { 'mtr': '{command} --mpls {value}', 'type': 'noarg', - 'help': 'Use this option to tell mtr to display information from ICMP extensions for MPLS (RFC 4950) that are encoded in the response packets.' + 'help': 'Use this option to tell mtr to display information from ICMP extensions for MPLS (RFC 4950) that are encoded in the response packets.', }, 'interface': { 'mtr': '{command} --interface {value}', 'type': '<interface>', 'helpfunction': interface_list, - 'help': 'Use the network interface with a specific name for sending network probes. This can be useful when you have multiple network interfaces with routes to your destination, for example both wired Ethernet and WiFi, and wish to test a particular interface.' + 'help': 'Use the network interface with a specific name for sending network probes. This can be useful when you have multiple network interfaces with routes to your destination, for example both wired Ethernet and WiFi, and wish to test a particular interface.', }, 'address': { 'mtr': '{command} --address {value}', 'type': '<x.x.x.x> <h:h:h:h:h:h:h:h>', - 'help': 'Use this option to bind the outgoing socket to ADDRESS, so that all packets will be sent with ADDRESS as source address.' + 'help': 'Use this option to bind the outgoing socket to ADDRESS, so that all packets will be sent with ADDRESS as source address.', }, 'first-ttl': { 'mtr': '{command} --first-ttl {value}', 'type': '<num>', - 'help': 'Specifies with what TTL to start. Defaults to 1.' + 'help': 'Specifies with what TTL to start. Defaults to 1.', }, 'max-ttl': { 'mtr': '{command} --max-ttl {value}', 'type': '<num>', - 'help': 'Specifies the maximum number of hops or max time-to-live value mtr will probe. Default is 30.' + 'help': 'Specifies the maximum number of hops or max time-to-live value mtr will probe. Default is 30.', }, 'max-unknown': { 'mtr': '{command} --max-unknown {value}', 'type': '<num>', - 'help': 'Specifies the maximum unknown host. Default is 5.' + 'help': 'Specifies the maximum unknown host. Default is 5.', }, 'udp': { 'mtr': '{command} --udp', 'type': 'noarg', - 'help': 'Use UDP datagrams instead of ICMP ECHO.' + 'help': 'Use UDP datagrams instead of ICMP ECHO.', }, 'tcp': { 'mtr': '{command} --tcp', 'type': 'noarg', - 'help': ' Use TCP SYN packets instead of ICMP ECHO. PACKETSIZE is ignored, since SYN packets can not contain data.' + 'help': ' Use TCP SYN packets instead of ICMP ECHO. PACKETSIZE is ignored, since SYN packets can not contain data.', }, 'sctp': { 'mtr': '{command} --sctp', 'type': 'noarg', - 'help': 'Use Stream Control Transmission Protocol packets instead of ICMP ECHO.' + 'help': 'Use Stream Control Transmission Protocol packets instead of ICMP ECHO.', }, 'port': { 'mtr': '{command} --port {value}', 'type': '<port>', - 'help': 'The target port number for TCP/SCTP/UDP traces.' + 'help': 'The target port number for TCP/SCTP/UDP traces.', }, 'localport': { 'mtr': '{command} --localport {value}', 'type': '<port>', - 'help': 'The source port number for UDP traces.' + 'help': 'The source port number for UDP traces.', }, 'timeout': { 'mtr': '{command} --timeout {value}', 'type': '<num>', - 'help': ' The number of seconds to keep probe sockets open before giving up on the connection.' + 'help': ' The number of seconds to keep probe sockets open before giving up on the connection.', }, 'mark': { 'mtr': '{command} --mark {value}', 'type': '<num>', - 'help': ' Set the mark for each packet sent through this socket similar to the netfilter MARK target but socket-based. MARK is 32 unsigned integer.' + 'help': ' Set the mark for each packet sent through this socket similar to the netfilter MARK target but socket-based. MARK is 32 unsigned integer.', }, 'vrf': { 'mtr': 'sudo ip vrf exec {value} {command}', 'type': '<vrf>', 'help': 'Use specified VRF table', 'helpfunction': vrf_list, - 'dflt': 'default' - } - } + 'dflt': 'default', + }, +} mtr = { 4: '/bin/mtr -4', 6: '/bin/mtr -6', } + class List(list): def first(self): return self.pop(0) if self else '' @@ -203,8 +204,8 @@ def completion_failure(option: str) -> None: def expension_failure(option, completions): reason = 'Ambiguous' if completions else 'Invalid' sys.stderr.write( - '\n\n {} command: {} [{}]\n\n'.format(reason, ' '.join(sys.argv), - option)) + '\n\n {} command: {} [{}]\n\n'.format(reason, ' '.join(sys.argv), option) + ) if completions: sys.stderr.write(' Possible completions:\n ') sys.stderr.write('\n '.join(completions)) @@ -218,21 +219,24 @@ def complete(prefix): def convert(command, args): + to_json = False while args: shortname = args.first() longnames = complete(shortname) if len(longnames) != 1: expension_failure(shortname, longnames) longname = longnames[0] + if longname == 'json': + to_json = True if options[longname]['type'] == 'noarg': - command = options[longname]['mtr'].format( - command=command, value='') + command = options[longname]['mtr'].format(command=command, value='') elif not args: sys.exit(f'mtr: missing argument for {longname} option') else: command = options[longname]['mtr'].format( - command=command, value=args.first()) - return command + command=command, value=args.first() + ) + return command, to_json if __name__ == '__main__': @@ -240,8 +244,7 @@ if __name__ == '__main__': host = args.first() if not host: - sys.exit("mtr: Missing host") - + sys.exit('mtr: Missing host') if host == '--get-options' or host == '--get-options-nested': if host == '--get-options-nested': @@ -302,5 +305,8 @@ if __name__ == '__main__': except ValueError: sys.exit(f'mtr: Unknown host: {host}') - command = convert(mtr[version], args) - call(f'{command} --curses --displaymode 0 {host}') + command, to_json = convert(mtr[version], args) + if to_json: + call(f'{command} {host}') + else: + call(f'{command} --curses --displaymode 0 {host}') diff --git a/src/op_mode/mtr_execute.py b/src/op_mode/mtr_execute.py new file mode 100644 index 000000000..2585a7ee4 --- /dev/null +++ b/src/op_mode/mtr_execute.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import ipaddress +import socket +import sys +import typing + +from json import loads + +from vyos.utils.network import interface_list +from vyos.utils.network import vrf_list +from vyos.utils.process import cmd +from vyos.utils.process import call + +import vyos.opmode + +ArgProtocol = typing.Literal['tcp', 'udp', 'sctp'] +noargs_list = [ + 'report_mode', + 'json', + 'report_wide', + 'split', + 'raw', + 'no_dns', + 'aslookup', +] + + +def vrf_list_default(): + return vrf_list() + ['default'] + + +options = { + 'report_mode': { + 'mtr': '{command} --report', + }, + 'protocol': { + 'mtr': '{command} --{value}', + }, + 'json': { + 'mtr': '{command} --json', + }, + 'report_wide': { + 'mtr': '{command} --report-wide', + }, + 'raw': { + 'mtr': '{command} --raw', + }, + 'split': { + 'mtr': '{command} --split', + }, + 'no_dns': { + 'mtr': '{command} --no-dns', + }, + 'show_ips': { + 'mtr': '{command} --show-ips {value}', + }, + 'ipinfo': { + 'mtr': '{command} --ipinfo {value}', + }, + 'aslookup': { + 'mtr': '{command} --aslookup', + }, + 'interval': { + 'mtr': '{command} --interval {value}', + }, + 'report_cycles': { + 'mtr': '{command} --report-cycles {value}', + }, + 'psize': { + 'mtr': '{command} --psize {value}', + }, + 'bitpattern': { + 'mtr': '{command} --bitpattern {value}', + }, + 'gracetime': { + 'mtr': '{command} --gracetime {value}', + }, + 'tos': { + 'mtr': '{command} --tos {value}', + }, + 'mpls': { + 'mtr': '{command} --mpls {value}', + }, + 'interface': { + 'mtr': '{command} --interface {value}', + 'helpfunction': interface_list, + }, + 'address': { + 'mtr': '{command} --address {value}', + }, + 'first_ttl': { + 'mtr': '{command} --first-ttl {value}', + }, + 'max_ttl': { + 'mtr': '{command} --max-ttl {value}', + }, + 'max_unknown': { + 'mtr': '{command} --max-unknown {value}', + }, + 'port': { + 'mtr': '{command} --port {value}', + }, + 'localport': { + 'mtr': '{command} --localport {value}', + }, + 'timeout': { + 'mtr': '{command} --timeout {value}', + }, + 'mark': { + 'mtr': '{command} --mark {value}', + }, + 'vrf': { + 'mtr': 'sudo ip vrf exec {value} {command}', + 'helpfunction': vrf_list_default, + 'dflt': 'default', + }, +} + +mtr_command = { + 4: '/bin/mtr -4', + 6: '/bin/mtr -6', +} + + +def mtr( + host: str, + for_api: typing.Optional[bool], + report_mode: typing.Optional[bool], + protocol: typing.Optional[ArgProtocol], + report_wide: typing.Optional[bool], + raw: typing.Optional[bool], + json: typing.Optional[bool], + split: typing.Optional[bool], + no_dns: typing.Optional[bool], + show_ips: typing.Optional[str], + ipinfo: typing.Optional[str], + aslookup: typing.Optional[bool], + interval: typing.Optional[str], + report_cycles: typing.Optional[str], + psize: typing.Optional[str], + bitpattern: typing.Optional[str], + gracetime: typing.Optional[str], + tos: typing.Optional[str], + mpl: typing.Optional[bool], + interface: typing.Optional[str], + address: typing.Optional[str], + first_ttl: typing.Optional[str], + max_ttl: typing.Optional[str], + max_unknown: typing.Optional[str], + port: typing.Optional[str], + localport: typing.Optional[str], + timeout: typing.Optional[str], + mark: typing.Optional[str], + vrf: typing.Optional[str], +): + args = locals() + for name, option in options.items(): + if 'dflt' in option and not args[name]: + args[name] = option['dflt'] + + try: + ip = socket.gethostbyname(host) + except UnicodeError: + raise vyos.opmode.InternalError(f'Unknown host: {host}') + except socket.gaierror: + ip = host + + try: + version = ipaddress.ip_address(ip).version + except ValueError: + raise vyos.opmode.InternalError(f'Unknown host: {host}') + + command = mtr_command[version] + + for key, val in args.items(): + if key in options and val: + if 'helpfunction' in options[key]: + allowed_values = options[key]['helpfunction']() + if val not in allowed_values: + raise vyos.opmode.InternalError( + f'Invalid argument for option {key} - {val}' + ) + value = '' if key in noargs_list else val + command = options[key]['mtr'].format(command=command, value=val) + + if json: + output = cmd(f'{command} {host}') + if for_api: + output = loads(output) + print(output) + else: + call(f'{command} --curses --displaymode 0 {host}') + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/nhrp.py b/src/op_mode/nhrp.py deleted file mode 100755 index e66f33079..000000000 --- a/src/op_mode/nhrp.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2023 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import sys -import tabulate -import vyos.opmode - -from vyos.utils.process import cmd -from vyos.utils.process import process_named_running -from vyos.utils.dict import colon_separated_to_dict - - -def _get_formatted_output(output_dict: dict) -> str: - """ - Create formatted table for CLI output - :param output_dict: dictionary for API - :type output_dict: dict - :return: tabulate string - :rtype: str - """ - print(f"Status: {output_dict['Status']}") - output: str = tabulate.tabulate(output_dict['routes'], headers='keys', - numalign="left") - return output - - -def _get_formatted_dict(output_string: str) -> dict: - """ - Format string returned from CMD to API list - :param output_string: String received by CMD - :type output_string: str - :return: dictionary for API - :rtype: dict - """ - formatted_dict: dict = { - 'Status': '', - 'routes': [] - } - output_list: list = output_string.split('\n\n') - for list_a in output_list: - output_dict = colon_separated_to_dict(list_a, True) - if 'Status' in output_dict: - formatted_dict['Status'] = output_dict['Status'] - else: - formatted_dict['routes'].append(output_dict) - return formatted_dict - - -def show_interface(raw: bool): - """ - Command 'show nhrp interface' - :param raw: if API - :type raw: bool - """ - if not process_named_running('opennhrp'): - raise vyos.opmode.UnconfiguredSubsystem('OpenNHRP is not running.') - interface_string: str = cmd('sudo opennhrpctl interface show') - interface_dict: dict = _get_formatted_dict(interface_string) - if raw: - return interface_dict - else: - return _get_formatted_output(interface_dict) - - -def show_tunnel(raw: bool): - """ - Command 'show nhrp tunnel' - :param raw: if API - :type raw: bool - """ - if not process_named_running('opennhrp'): - raise vyos.opmode.UnconfiguredSubsystem('OpenNHRP is not running.') - tunnel_string: str = cmd('sudo opennhrpctl show') - tunnel_dict: list = _get_formatted_dict(tunnel_string) - if raw: - return tunnel_dict - else: - return _get_formatted_output(tunnel_dict) - - -if __name__ == '__main__': - try: - res = vyos.opmode.run(sys.modules[__name__]) - if res: - print(res) - except (ValueError, vyos.opmode.Error) as e: - print(e) - sys.exit(1) diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py index ab613e5c4..49a461e9e 100755 --- a/src/op_mode/pki.py +++ b/src/op_mode/pki.py @@ -14,25 +14,36 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import argparse import ipaddress import os import re import sys import tabulate +import typing from cryptography import x509 from cryptography.x509.oid import ExtendedKeyUsageOID +import vyos.opmode + from vyos.config import Config from vyos.config import config_dict_mangle_acme -from vyos.pki import encode_certificate, encode_public_key, encode_private_key, encode_dh_parameters +from vyos.pki import encode_certificate +from vyos.pki import encode_public_key +from vyos.pki import encode_private_key +from vyos.pki import encode_dh_parameters from vyos.pki import get_certificate_fingerprint -from vyos.pki import create_certificate, create_certificate_request, create_certificate_revocation_list +from vyos.pki import create_certificate +from vyos.pki import create_certificate_request +from vyos.pki import create_certificate_revocation_list from vyos.pki import create_private_key from vyos.pki import create_dh_parameters -from vyos.pki import load_certificate, load_certificate_request, load_private_key -from vyos.pki import load_crl, load_dh_parameters, load_public_key +from vyos.pki import load_certificate +from vyos.pki import load_certificate_request +from vyos.pki import load_private_key +from vyos.pki import load_crl +from vyos.pki import load_dh_parameters +from vyos.pki import load_public_key from vyos.pki import verify_certificate from vyos.utils.io import ask_input from vyos.utils.io import ask_yes_no @@ -42,18 +53,50 @@ from vyos.utils.process import cmd CERT_REQ_END = '-----END CERTIFICATE REQUEST-----' auth_dir = '/config/auth' +ArgsPkiType = typing.Literal['ca', 'certificate', 'dh', 'key-pair', 'openvpn', 'crl'] +ArgsPkiTypeGen = typing.Literal[ArgsPkiType, typing.Literal['ssh', 'wireguard']] +ArgsFingerprint = typing.Literal['sha256', 'sha384', 'sha512'] + # Helper Functions conf = Config() + + +def _verify(target): + """Decorator checks if config for PKI exists""" + from functools import wraps + + if target not in ['ca', 'certificate']: + raise ValueError('Invalid PKI') + + def _verify_target(func): + @wraps(func) + def _wrapper(*args, **kwargs): + name = kwargs.get('name') + unconf_message = f'PKI {target} "{name}" does not exist!' + if name: + if not conf.exists(['pki', target, name]): + raise vyos.opmode.UnconfiguredSubsystem(unconf_message) + return func(*args, **kwargs) + + return _wrapper + + return _verify_target + + def get_default_values(): # Fetch default x509 values base = ['pki', 'x509', 'default'] - x509_defaults = conf.get_config_dict(base, key_mangling=('-', '_'), - no_tag_node_value_mangle=True, - get_first_key=True, - with_recursive_defaults=True) + x509_defaults = conf.get_config_dict( + base, + key_mangling=('-', '_'), + no_tag_node_value_mangle=True, + get_first_key=True, + with_recursive_defaults=True, + ) return x509_defaults + def get_config_ca_certificate(name=None): # Fetch ca certificates from config base = ['pki', 'ca'] @@ -62,12 +105,15 @@ def get_config_ca_certificate(name=None): if name: base = base + [name] - if not conf.exists(base + ['private', 'key']) or not conf.exists(base + ['certificate']): + if not conf.exists(base + ['private', 'key']) or not conf.exists( + base + ['certificate'] + ): return False - return conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + return conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True + ) + def get_config_certificate(name=None): # Get certificates from config @@ -77,18 +123,21 @@ def get_config_certificate(name=None): if name: base = base + [name] - if not conf.exists(base + ['private', 'key']) or not conf.exists(base + ['certificate']): + if not conf.exists(base + ['private', 'key']) or not conf.exists( + base + ['certificate'] + ): return False - pki = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + pki = conf.get_config_dict( + base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True + ) if pki: for certificate in pki: pki[certificate] = config_dict_mangle_acme(certificate, pki[certificate]) return pki + def get_certificate_ca(cert, ca_certs): # Find CA certificate for given certificate if not ca_certs: @@ -107,6 +156,7 @@ def get_certificate_ca(cert, ca_certs): return ca_name return None + def get_config_revoked_certificates(): # Fetch revoked certificates from config ca_base = ['pki', 'ca'] @@ -115,19 +165,26 @@ def get_config_revoked_certificates(): certs = [] if conf.exists(ca_base): - ca_certificates = conf.get_config_dict(ca_base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + ca_certificates = conf.get_config_dict( + ca_base, + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + ) certs.extend(ca_certificates.values()) if conf.exists(cert_base): - certificates = conf.get_config_dict(cert_base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) + certificates = conf.get_config_dict( + cert_base, + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + ) certs.extend(certificates.values()) return [cert_dict for cert_dict in certs if 'revoke' in cert_dict] + def get_revoked_by_serial_numbers(serial_numbers=[]): # Return serial numbers of revoked certificates certs_out = [] @@ -151,113 +208,153 @@ def get_revoked_by_serial_numbers(serial_numbers=[]): certs_out.append(cert_name) return certs_out -def install_certificate(name, cert='', private_key=None, key_type=None, key_passphrase=None, is_ca=False): + +def install_certificate( + name, cert='', private_key=None, key_type=None, key_passphrase=None, is_ca=False +): # Show/install conf commands for certificate prefix = 'ca' if is_ca else 'certificate' - base = f"pki {prefix} {name}" + base = f'pki {prefix} {name}' config_paths = [] if cert: - cert_pem = "".join(encode_certificate(cert).strip().split("\n")[1:-1]) + cert_pem = ''.join(encode_certificate(cert).strip().split('\n')[1:-1]) config_paths.append(f"{base} certificate '{cert_pem}'") if private_key: - key_pem = "".join(encode_private_key(private_key, passphrase=key_passphrase).strip().split("\n")[1:-1]) + key_pem = ''.join( + encode_private_key(private_key, passphrase=key_passphrase) + .strip() + .split('\n')[1:-1] + ) config_paths.append(f"{base} private key '{key_pem}'") if key_passphrase: - config_paths.append(f"{base} private password-protected") + config_paths.append(f'{base} private password-protected') install_into_config(conf, config_paths) + def install_crl(ca_name, crl): # Show/install conf commands for crl - crl_pem = "".join(encode_certificate(crl).strip().split("\n")[1:-1]) + crl_pem = ''.join(encode_certificate(crl).strip().split('\n')[1:-1]) install_into_config(conf, [f"pki ca {ca_name} crl '{crl_pem}'"]) + def install_dh_parameters(name, params): # Show/install conf commands for dh params - dh_pem = "".join(encode_dh_parameters(params).strip().split("\n")[1:-1]) + dh_pem = ''.join(encode_dh_parameters(params).strip().split('\n')[1:-1]) install_into_config(conf, [f"pki dh {name} parameters '{dh_pem}'"]) + def install_ssh_key(name, public_key, private_key, passphrase=None): # Show/install conf commands for ssh key - key_openssh = encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH') + key_openssh = encode_public_key( + public_key, encoding='OpenSSH', key_format='OpenSSH' + ) username = os.getlogin() - type_key_split = key_openssh.split(" ") - - base = f"system login user {username} authentication public-keys {name}" - install_into_config(conf, [ - f"{base} key '{type_key_split[1]}'", - f"{base} type '{type_key_split[0]}'" - ]) - print(encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) - -def install_keypair(name, key_type, private_key=None, public_key=None, passphrase=None, prompt=True): + type_key_split = key_openssh.split(' ') + + base = f'system login user {username} authentication public-keys {name}' + install_into_config( + conf, + [f"{base} key '{type_key_split[1]}'", f"{base} type '{type_key_split[0]}'"], + ) + print( + encode_private_key( + private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase + ) + ) + + +def install_keypair( + name, key_type, private_key=None, public_key=None, passphrase=None, prompt=True +): # Show/install conf commands for key-pair config_paths = [] if public_key: - install_public_key = not prompt or ask_yes_no('Do you want to install the public key?', default=True) + install_public_key = not prompt or ask_yes_no( + 'Do you want to install the public key?', default=True + ) public_key_pem = encode_public_key(public_key) if install_public_key: - install_public_pem = "".join(public_key_pem.strip().split("\n")[1:-1]) - config_paths.append(f"pki key-pair {name} public key '{install_public_pem}'") + install_public_pem = ''.join(public_key_pem.strip().split('\n')[1:-1]) + config_paths.append( + f"pki key-pair {name} public key '{install_public_pem}'" + ) else: - print("Public key:") + print('Public key:') print(public_key_pem) if private_key: - install_private_key = not prompt or ask_yes_no('Do you want to install the private key?', default=True) + install_private_key = not prompt or ask_yes_no( + 'Do you want to install the private key?', default=True + ) private_key_pem = encode_private_key(private_key, passphrase=passphrase) if install_private_key: - install_private_pem = "".join(private_key_pem.strip().split("\n")[1:-1]) - config_paths.append(f"pki key-pair {name} private key '{install_private_pem}'") + install_private_pem = ''.join(private_key_pem.strip().split('\n')[1:-1]) + config_paths.append( + f"pki key-pair {name} private key '{install_private_pem}'" + ) if passphrase: - config_paths.append(f"pki key-pair {name} private password-protected") + config_paths.append(f'pki key-pair {name} private password-protected') else: - print("Private key:") + print('Private key:') print(private_key_pem) install_into_config(conf, config_paths) + def install_openvpn_key(name, key_data, key_version='1'): config_paths = [ f"pki openvpn shared-secret {name} key '{key_data}'", - f"pki openvpn shared-secret {name} version '{key_version}'" + f"pki openvpn shared-secret {name} version '{key_version}'", ] install_into_config(conf, config_paths) + def install_wireguard_key(interface, private_key, public_key): # Show conf commands for installing wireguard key pairs from vyos.ifconfig import Section + if Section.section(interface) != 'wireguard': print(f'"{interface}" is not a WireGuard interface name!') exit(1) # Check if we are running in a config session - if yes, we can directly write to the CLI - install_into_config(conf, [f"interfaces wireguard {interface} private-key '{private_key}'"]) + install_into_config( + conf, [f"interfaces wireguard {interface} private-key '{private_key}'"] + ) print(f"Corresponding public-key to use on peer system is: '{public_key}'") + def install_wireguard_psk(interface, peer, psk): from vyos.ifconfig import Section + if Section.section(interface) != 'wireguard': print(f'"{interface}" is not a WireGuard interface name!') exit(1) # Check if we are running in a config session - if yes, we can directly write to the CLI - install_into_config(conf, [f"interfaces wireguard {interface} peer {peer} preshared-key '{psk}'"]) + install_into_config( + conf, [f"interfaces wireguard {interface} peer {peer} preshared-key '{psk}'"] + ) + def ask_passphrase(): passphrase = None - print("Note: If you plan to use the generated key on this router, do not encrypt the private key.") + print( + 'Note: If you plan to use the generated key on this router, do not encrypt the private key.' + ) if ask_yes_no('Do you want to encrypt the private key with a passphrase?'): passphrase = ask_input('Enter passphrase:') return passphrase + def write_file(filename, contents): full_path = os.path.join(auth_dir, filename) directory = os.path.dirname(full_path) @@ -266,7 +363,9 @@ def write_file(filename, contents): print('Failed to write file: directory does not exist') return False - if os.path.exists(full_path) and not ask_yes_no('Do you want to overwrite the existing file?'): + if os.path.exists(full_path) and not ask_yes_no( + 'Do you want to overwrite the existing file?' + ): return False with open(full_path, 'w') as f: @@ -274,10 +373,14 @@ def write_file(filename, contents): print(f'File written to {full_path}') -# Generation functions +# Generation functions def generate_private_key(): - key_type = ask_input('Enter private key type: [rsa, dsa, ec]', default='rsa', valid_responses=['rsa', 'dsa', 'ec']) + key_type = ask_input( + 'Enter private key type: [rsa, dsa, ec]', + default='rsa', + valid_responses=['rsa', 'dsa', 'ec'], + ) size_valid = [] size_default = 0 @@ -289,28 +392,43 @@ def generate_private_key(): size_default = 256 size_valid = [224, 256, 384, 521] - size = ask_input('Enter private key bits:', default=size_default, numeric_only=True, valid_responses=size_valid) + size = ask_input( + 'Enter private key bits:', + default=size_default, + numeric_only=True, + valid_responses=size_valid, + ) return create_private_key(key_type, size), key_type + def parse_san_string(san_string): if not san_string: return None output = [] - san_split = san_string.strip().split(",") + san_split = san_string.strip().split(',') for pair_str in san_split: - tag, value = pair_str.strip().split(":", 1) + tag, value = pair_str.strip().split(':', 1) if tag == 'ipv4': output.append(ipaddress.IPv4Address(value)) elif tag == 'ipv6': output.append(ipaddress.IPv6Address(value)) elif tag == 'dns' or tag == 'rfc822': output.append(value) - return output - -def generate_certificate_request(private_key=None, key_type=None, return_request=False, name=None, install=False, file=False, ask_san=True): + return + + +def generate_certificate_request( + private_key=None, + key_type=None, + return_request=False, + name=None, + install=False, + file=False, + ask_san=True, +): if not private_key: private_key, key_type = generate_private_key() @@ -319,18 +437,24 @@ def generate_certificate_request(private_key=None, key_type=None, return_request while True: country = ask_input('Enter country code:', default=default_values['country']) if len(country) != 2: - print("Country name must be a 2 character country code") + print('Country name must be a 2 character country code') continue subject['country'] = country break subject['state'] = ask_input('Enter state:', default=default_values['state']) - subject['locality'] = ask_input('Enter locality:', default=default_values['locality']) - subject['organization'] = ask_input('Enter organization name:', default=default_values['organization']) + subject['locality'] = ask_input( + 'Enter locality:', default=default_values['locality'] + ) + subject['organization'] = ask_input( + 'Enter organization name:', default=default_values['organization'] + ) subject['common_name'] = ask_input('Enter common name:', default='vyos.io') subject_alt_names = None if ask_san and ask_yes_no('Do you want to configure Subject Alternative Names?'): - print("Enter alternative names in a comma separate list, example: ipv4:1.1.1.1,ipv6:fe80::1,dns:vyos.net,rfc822:user@vyos.net") + print( + 'Enter alternative names in a comma separate list, example: ipv4:1.1.1.1,ipv6:fe80::1,dns:vyos.net,rfc822:user@vyos.net' + ) san_string = ask_input('Enter Subject Alternative Names:') subject_alt_names = parse_san_string(san_string) @@ -347,24 +471,48 @@ def generate_certificate_request(private_key=None, key_type=None, return_request return None if install: - print("Certificate request:") - print(encode_certificate(cert_req) + "\n") - install_certificate(name, private_key=private_key, key_type=key_type, key_passphrase=passphrase, is_ca=False) + print('Certificate request:') + print(encode_certificate(cert_req) + '\n') + install_certificate( + name, + private_key=private_key, + key_type=key_type, + key_passphrase=passphrase, + is_ca=False, + ) if file: write_file(f'{name}.csr', encode_certificate(cert_req)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) - -def generate_certificate(cert_req, ca_cert, ca_private_key, is_ca=False, is_sub_ca=False): - valid_days = ask_input('Enter how many days certificate will be valid:', default='365' if not is_ca else '1825', numeric_only=True) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + + +def generate_certificate( + cert_req, ca_cert, ca_private_key, is_ca=False, is_sub_ca=False +): + valid_days = ask_input( + 'Enter how many days certificate will be valid:', + default='365' if not is_ca else '1825', + numeric_only=True, + ) cert_type = None if not is_ca: - cert_type = ask_input('Enter certificate type: (client, server)', default='server', valid_responses=['client', 'server']) - return create_certificate(cert_req, ca_cert, ca_private_key, valid_days, cert_type, is_ca, is_sub_ca) + cert_type = ask_input( + 'Enter certificate type: (client, server)', + default='server', + valid_responses=['client', 'server'], + ) + return create_certificate( + cert_req, ca_cert, ca_private_key, valid_days, cert_type, is_ca, is_sub_ca + ) + def generate_ca_certificate(name, install=False, file=False): private_key, key_type = generate_private_key() - cert_req = generate_certificate_request(private_key, key_type, return_request=True, ask_san=False) + cert_req = generate_certificate_request( + private_key, key_type, return_request=True, ask_san=False + ) cert = generate_certificate(cert_req, cert_req, private_key, is_ca=True) passphrase = ask_passphrase() @@ -374,11 +522,16 @@ def generate_ca_certificate(name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True) + install_certificate( + name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_ca_certificate_sign(name, ca_name, install=False, file=False): ca_dict = get_config_ca_certificate(ca_name) @@ -390,17 +543,19 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): ca_cert = load_certificate(ca_dict['certificate']) if not ca_cert: - print("Failed to load signing CA certificate, aborting") + print('Failed to load signing CA certificate, aborting') return None ca_private = ca_dict['private'] ca_private_passphrase = None if 'password_protected' in ca_private: ca_private_passphrase = ask_input('Enter signing CA private key passphrase:') - ca_private_key = load_private_key(ca_private['key'], passphrase=ca_private_passphrase) + ca_private_key = load_private_key( + ca_private['key'], passphrase=ca_private_passphrase + ) if not ca_private_key: - print("Failed to load signing CA private key, aborting") + print('Failed to load signing CA private key, aborting') return None private_key = None @@ -409,9 +564,11 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): cert_req = None if not ask_yes_no('Do you already have a certificate request?'): private_key, key_type = generate_private_key() - cert_req = generate_certificate_request(private_key, key_type, return_request=True, ask_san=False) + cert_req = generate_certificate_request( + private_key, key_type, return_request=True, ask_san=False + ) else: - print("Paste certificate request and press enter:") + print('Paste certificate request and press enter:') lines = [] curr_line = '' while True: @@ -421,17 +578,21 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): lines.append(curr_line) if not lines: - print("Aborted") + print('Aborted') return None - wrap = lines[0].find('-----') < 0 # Only base64 pasted, add the CSR tags for parsing - cert_req = load_certificate_request("\n".join(lines), wrap) + wrap = ( + lines[0].find('-----') < 0 + ) # Only base64 pasted, add the CSR tags for parsing + cert_req = load_certificate_request('\n'.join(lines), wrap) if not cert_req: - print("Invalid certificate request") + print('Invalid certificate request') return None - cert = generate_certificate(cert_req, ca_cert, ca_private_key, is_ca=True, is_sub_ca=True) + cert = generate_certificate( + cert_req, ca_cert, ca_private_key, is_ca=True, is_sub_ca=True + ) passphrase = None if private_key is not None: @@ -444,12 +605,17 @@ def generate_ca_certificate_sign(name, ca_name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True) + install_certificate( + name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=True + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) if private_key is not None: - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_certificate_sign(name, ca_name, install=False, file=False): ca_dict = get_config_ca_certificate(ca_name) @@ -461,17 +627,19 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): ca_cert = load_certificate(ca_dict['certificate']) if not ca_cert: - print("Failed to load CA certificate, aborting") + print('Failed to load CA certificate, aborting') return None ca_private = ca_dict['private'] ca_private_passphrase = None if 'password_protected' in ca_private: ca_private_passphrase = ask_input('Enter CA private key passphrase:') - ca_private_key = load_private_key(ca_private['key'], passphrase=ca_private_passphrase) + ca_private_key = load_private_key( + ca_private['key'], passphrase=ca_private_passphrase + ) if not ca_private_key: - print("Failed to load CA private key, aborting") + print('Failed to load CA private key, aborting') return None private_key = None @@ -480,9 +648,11 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): cert_req = None if not ask_yes_no('Do you already have a certificate request?'): private_key, key_type = generate_private_key() - cert_req = generate_certificate_request(private_key, key_type, return_request=True) + cert_req = generate_certificate_request( + private_key, key_type, return_request=True + ) else: - print("Paste certificate request and press enter:") + print('Paste certificate request and press enter:') lines = [] curr_line = '' while True: @@ -492,18 +662,20 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): lines.append(curr_line) if not lines: - print("Aborted") + print('Aborted') return None - wrap = lines[0].find('-----') < 0 # Only base64 pasted, add the CSR tags for parsing - cert_req = load_certificate_request("\n".join(lines), wrap) + wrap = ( + lines[0].find('-----') < 0 + ) # Only base64 pasted, add the CSR tags for parsing + cert_req = load_certificate_request('\n'.join(lines), wrap) if not cert_req: - print("Invalid certificate request") + print('Invalid certificate request') return None cert = generate_certificate(cert_req, ca_cert, ca_private_key, is_ca=False) - + passphrase = None if private_key is not None: passphrase = ask_passphrase() @@ -515,12 +687,17 @@ def generate_certificate_sign(name, ca_name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=False) + install_certificate( + name, cert, private_key, key_type, key_passphrase=passphrase, is_ca=False + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) if private_key is not None: - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_certificate_selfsign(name, install=False, file=False): private_key, key_type = generate_private_key() @@ -534,11 +711,21 @@ def generate_certificate_selfsign(name, install=False, file=False): return None if install: - install_certificate(name, cert, private_key=private_key, key_type=key_type, key_passphrase=passphrase, is_ca=False) + install_certificate( + name, + cert, + private_key=private_key, + key_type=key_type, + key_passphrase=passphrase, + is_ca=False, + ) if file: write_file(f'{name}.pem', encode_certificate(cert)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_certificate_revocation_list(ca_name, install=False, file=False): ca_dict = get_config_ca_certificate(ca_name) @@ -550,17 +737,19 @@ def generate_certificate_revocation_list(ca_name, install=False, file=False): ca_cert = load_certificate(ca_dict['certificate']) if not ca_cert: - print("Failed to load CA certificate, aborting") + print('Failed to load CA certificate, aborting') return None ca_private = ca_dict['private'] ca_private_passphrase = None if 'password_protected' in ca_private: ca_private_passphrase = ask_input('Enter CA private key passphrase:') - ca_private_key = load_private_key(ca_private['key'], passphrase=ca_private_passphrase) + ca_private_key = load_private_key( + ca_private['key'], passphrase=ca_private_passphrase + ) if not ca_private_key: - print("Failed to load CA private key, aborting") + print('Failed to load CA private key, aborting') return None revoked_certs = get_config_revoked_certificates() @@ -581,13 +770,13 @@ def generate_certificate_revocation_list(ca_name, install=False, file=False): continue if not to_revoke: - print("No revoked certificates to add to the CRL") + print('No revoked certificates to add to the CRL') return None crl = create_certificate_revocation_list(ca_cert, ca_private_key, to_revoke) if not crl: - print("Failed to create CRL") + print('Failed to create CRL') return None if not install and not file: @@ -598,7 +787,8 @@ def generate_certificate_revocation_list(ca_name, install=False, file=False): install_crl(ca_name, crl) if file: - write_file(f'{name}.crl', encode_certificate(crl)) + write_file(f'{ca_name}.crl', encode_certificate(crl)) + def generate_ssh_keypair(name, install=False, file=False): private_key, key_type = generate_private_key() @@ -607,29 +797,42 @@ def generate_ssh_keypair(name, install=False, file=False): if not install and not file: print(encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH')) - print("") - print(encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) + print('') + print( + encode_private_key( + private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase + ) + ) return None if install: install_ssh_key(name, public_key, private_key, passphrase) if file: - write_file(f'{name}.pem', encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH')) - write_file(f'{name}.key', encode_private_key(private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase)) + write_file( + f'{name}.pem', + encode_public_key(public_key, encoding='OpenSSH', key_format='OpenSSH'), + ) + write_file( + f'{name}.key', + encode_private_key( + private_key, encoding='PEM', key_format='OpenSSH', passphrase=passphrase + ), + ) + def generate_dh_parameters(name, install=False, file=False): bits = ask_input('Enter DH parameters key size:', default=2048, numeric_only=True) - print("Generating parameters...") + print('Generating parameters...') dh_params = create_dh_parameters(bits) if not dh_params: - print("Failed to create DH parameters") + print('Failed to create DH parameters') return None if not install and not file: - print("DH Parameters:") + print('DH Parameters:') print(encode_dh_parameters(dh_params)) if install: @@ -638,6 +841,7 @@ def generate_dh_parameters(name, install=False, file=False): if file: write_file(f'{name}.pem', encode_dh_parameters(dh_params)) + def generate_keypair(name, install=False, file=False): private_key, key_type = generate_private_key() public_key = private_key.public_key() @@ -645,7 +849,7 @@ def generate_keypair(name, install=False, file=False): if not install and not file: print(encode_public_key(public_key)) - print("") + print('') print(encode_private_key(private_key, passphrase=passphrase)) return None @@ -654,13 +858,16 @@ def generate_keypair(name, install=False, file=False): if file: write_file(f'{name}.pem', encode_public_key(public_key)) - write_file(f'{name}.key', encode_private_key(private_key, passphrase=passphrase)) + write_file( + f'{name}.key', encode_private_key(private_key, passphrase=passphrase) + ) + def generate_openvpn_key(name, install=False, file=False): result = cmd('openvpn --genkey secret /dev/stdout | grep -o "^[^#]*"') if not result: - print("Failed to generate OpenVPN key") + print('Failed to generate OpenVPN key') return None if not install and not file: @@ -668,11 +875,13 @@ def generate_openvpn_key(name, install=False, file=False): return None if install: - key_lines = result.split("\n") - key_data = "".join(key_lines[1:-1]) # Remove wrapper tags and line endings + key_lines = result.split('\n') + key_data = ''.join(key_lines[1:-1]) # Remove wrapper tags and line endings key_version = '1' - version_search = re.search(r'BEGIN OpenVPN Static key V(\d+)', result) # Future-proofing (hopefully) + version_search = re.search( + r'BEGIN OpenVPN Static key V(\d+)', result + ) # Future-proofing (hopefully) if version_search: key_version = version_search[1] @@ -681,6 +890,7 @@ def generate_openvpn_key(name, install=False, file=False): if file: write_file(f'{name}.key', result) + def generate_wireguard_key(interface=None, install=False): private_key = cmd('wg genkey') public_key = cmd('wg pubkey', input=private_key) @@ -691,6 +901,7 @@ def generate_wireguard_key(interface=None, install=False): print(f'Private key: {private_key}') print(f'Public key: {public_key}', end='\n\n') + def generate_wireguard_psk(interface=None, peer=None, install=False): psk = cmd('wg genpsk') if interface and peer and install: @@ -698,8 +909,11 @@ def generate_wireguard_psk(interface=None, peer=None, install=False): else: print(f'Pre-shared key: {psk}') + # Import functions -def import_ca_certificate(name, path=None, key_path=None, no_prompt=False, passphrase=None): +def import_ca_certificate( + name, path=None, key_path=None, no_prompt=False, passphrase=None +): if path: if not os.path.exists(path): print(f'File not found: {path}') @@ -736,7 +950,10 @@ def import_ca_certificate(name, path=None, key_path=None, no_prompt=False, passp install_certificate(name, private_key=key, is_ca=True) -def import_certificate(name, path=None, key_path=None, no_prompt=False, passphrase=None): + +def import_certificate( + name, path=None, key_path=None, no_prompt=False, passphrase=None +): if path: if not os.path.exists(path): print(f'File not found: {path}') @@ -773,6 +990,7 @@ def import_certificate(name, path=None, key_path=None, no_prompt=False, passphra install_certificate(name, private_key=key, is_ca=False) + def import_crl(name, path): if not os.path.exists(path): print(f'File not found: {path}') @@ -790,6 +1008,7 @@ def import_crl(name, path): install_crl(name, crl) + def import_dh_parameters(name, path): if not os.path.exists(path): print(f'File not found: {path}') @@ -807,6 +1026,7 @@ def import_dh_parameters(name, path): install_dh_parameters(name, dh) + def import_keypair(name, path=None, key_path=None, no_prompt=False, passphrase=None): if path: if not os.path.exists(path): @@ -844,6 +1064,7 @@ def import_keypair(name, path=None, key_path=None, no_prompt=False, passphrase=N install_keypair(name, None, private_key=key, prompt=False) + def import_openvpn_secret(name, path): if not os.path.exists(path): print(f'File not found: {path}') @@ -853,19 +1074,134 @@ def import_openvpn_secret(name, path): key_version = '1' with open(path) as f: - key_lines = f.read().strip().split("\n") - key_lines = list(filter(lambda line: not line.strip().startswith('#'), key_lines)) # Remove commented lines - key_data = "".join(key_lines[1:-1]) # Remove wrapper tags and line endings - - version_search = re.search(r'BEGIN OpenVPN Static key V(\d+)', key_lines[0]) # Future-proofing (hopefully) + key_lines = f.read().strip().split('\n') + key_lines = list( + filter(lambda line: not line.strip().startswith('#'), key_lines) + ) # Remove commented lines + key_data = ''.join(key_lines[1:-1]) # Remove wrapper tags and line endings + + version_search = re.search( + r'BEGIN OpenVPN Static key V(\d+)', key_lines[0] + ) # Future-proofing (hopefully) if version_search: key_version = version_search[1] install_openvpn_key(name, key_data, key_version) -# Show functions -def show_certificate_authority(name=None, pem=False): - headers = ['Name', 'Subject', 'Issuer CN', 'Issued', 'Expiry', 'Private Key', 'Parent'] + +def generate_pki( + raw: bool, + pki_type: ArgsPkiTypeGen, + name: typing.Optional[str], + file: typing.Optional[bool], + install: typing.Optional[bool], + sign: typing.Optional[str], + self_sign: typing.Optional[bool], + key: typing.Optional[bool], + psk: typing.Optional[bool], + interface: typing.Optional[str], + peer: typing.Optional[str], +): + try: + if pki_type == 'ca': + if sign: + generate_ca_certificate_sign(name, sign, install=install, file=file) + else: + generate_ca_certificate(name, install=install, file=file) + elif pki_type == 'certificate': + if sign: + generate_certificate_sign(name, sign, install=install, file=file) + elif self_sign: + generate_certificate_selfsign(name, install=install, file=file) + else: + generate_certificate_request(name=name, install=install, file=file) + + elif pki_type == 'crl': + generate_certificate_revocation_list(name, install=install, file=file) + + elif pki_type == 'ssh': + generate_ssh_keypair(name, install=install, file=file) + + elif pki_type == 'dh': + generate_dh_parameters(name, install=install, file=file) + + elif pki_type == 'key-pair': + generate_keypair(name, install=install, file=file) + + elif pki_type == 'openvpn': + generate_openvpn_key(name, install=install, file=file) + + elif pki_type == 'wireguard': + # WireGuard supports writing key directly into the CLI, but this + # requires the vyos_libexec_dir environment variable to be set + os.environ['vyos_libexec_dir'] = '/usr/libexec/vyos' + + if key: + generate_wireguard_key(interface, install=install) + if psk: + generate_wireguard_psk(interface, peer=peer, install=install) + except KeyboardInterrupt: + print('Aborted') + sys.exit(0) + + +def import_pki( + name: str, + pki_type: ArgsPkiType, + filename: typing.Optional[str], + key_filename: typing.Optional[str], + no_prompt: typing.Optional[bool], + passphrase: typing.Optional[str], +): + try: + if pki_type == 'ca': + import_ca_certificate( + name, + path=filename, + key_path=key_filename, + no_prompt=no_prompt, + passphrase=passphrase, + ) + elif pki_type == 'certificate': + import_certificate( + name, + path=filename, + key_path=key_filename, + no_prompt=no_prompt, + passphrase=passphrase, + ) + elif pki_type == 'crl': + import_crl(name, filename) + elif pki_type == 'dh': + import_dh_parameters(name, filename) + elif pki_type == 'key-pair': + import_keypair( + name, + path=filename, + key_path=key_filename, + no_prompt=no_prompt, + passphrase=passphrase, + ) + elif pki_type == 'openvpn': + import_openvpn_secret(name, filename) + except KeyboardInterrupt: + print('Aborted') + sys.exit(0) + + +@_verify('ca') +def show_certificate_authority( + raw: bool, name: typing.Optional[str] = None, pem: typing.Optional[bool] = False +): + headers = [ + 'Name', + 'Subject', + 'Issuer CN', + 'Issued', + 'Expiry', + 'Private Key', + 'Parent', + ] data = [] certs = get_config_ca_certificate() if certs: @@ -882,7 +1218,7 @@ def show_certificate_authority(name=None, pem=False): return parent_ca_name = get_certificate_ca(cert, certs) - cert_issuer_cn = cert.issuer.rfc4514_string().split(",")[0] + cert_issuer_cn = cert.issuer.rfc4514_string().split(',')[0] if not parent_ca_name or parent_ca_name == cert_name: parent_ca_name = 'N/A' @@ -890,14 +1226,45 @@ def show_certificate_authority(name=None, pem=False): if not cert: continue - have_private = 'Yes' if 'private' in cert_dict and 'key' in cert_dict['private'] else 'No' - data.append([cert_name, cert.subject.rfc4514_string(), cert_issuer_cn, cert.not_valid_before, cert.not_valid_after, have_private, parent_ca_name]) - - print("Certificate Authorities:") + have_private = ( + 'Yes' + if 'private' in cert_dict and 'key' in cert_dict['private'] + else 'No' + ) + data.append( + [ + cert_name, + cert.subject.rfc4514_string(), + cert_issuer_cn, + cert.not_valid_before, + cert.not_valid_after, + have_private, + parent_ca_name, + ] + ) + + print('Certificate Authorities:') print(tabulate.tabulate(data, headers)) -def show_certificate(name=None, pem=False, fingerprint_hash=None): - headers = ['Name', 'Type', 'Subject CN', 'Issuer CN', 'Issued', 'Expiry', 'Revoked', 'Private Key', 'CA Present'] + +@_verify('certificate') +def show_certificate( + raw: bool, + name: typing.Optional[str] = None, + pem: typing.Optional[bool] = False, + fingerprint: typing.Optional[ArgsFingerprint] = None, +): + headers = [ + 'Name', + 'Type', + 'Subject CN', + 'Issuer CN', + 'Issued', + 'Expiry', + 'Revoked', + 'Private Key', + 'CA Present', + ] data = [] certs = get_config_certificate() if certs: @@ -917,13 +1284,13 @@ def show_certificate(name=None, pem=False, fingerprint_hash=None): if name and pem: print(encode_certificate(cert)) return - elif name and fingerprint_hash: - print(get_certificate_fingerprint(cert, fingerprint_hash)) + elif name and fingerprint: + print(get_certificate_fingerprint(cert, fingerprint)) return ca_name = get_certificate_ca(cert, ca_certs) - cert_subject_cn = cert.subject.rfc4514_string().split(",")[0] - cert_issuer_cn = cert.issuer.rfc4514_string().split(",")[0] + cert_subject_cn = cert.subject.rfc4514_string().split(',')[0] + cert_issuer_cn = cert.issuer.rfc4514_string().split(',')[0] cert_type = 'Unknown' try: @@ -932,21 +1299,37 @@ def show_certificate(name=None, pem=False, fingerprint_hash=None): cert_type = 'Server' elif ext and ExtendedKeyUsageOID.CLIENT_AUTH in ext.value: cert_type = 'Client' - except: + except Exception: pass revoked = 'Yes' if 'revoke' in cert_dict else 'No' - have_private = 'Yes' if 'private' in cert_dict and 'key' in cert_dict['private'] else 'No' + have_private = ( + 'Yes' + if 'private' in cert_dict and 'key' in cert_dict['private'] + else 'No' + ) have_ca = f'Yes ({ca_name})' if ca_name else 'No' - data.append([ - cert_name, cert_type, cert_subject_cn, cert_issuer_cn, - cert.not_valid_before, cert.not_valid_after, - revoked, have_private, have_ca]) - - print("Certificates:") + data.append( + [ + cert_name, + cert_type, + cert_subject_cn, + cert_issuer_cn, + cert.not_valid_before, + cert.not_valid_after, + revoked, + have_private, + have_ca, + ] + ) + + print('Certificates:') print(tabulate.tabulate(data, headers)) -def show_crl(name=None, pem=False): + +def show_crl( + raw: bool, name: typing.Optional[str] = None, pem: typing.Optional[bool] = False +): headers = ['CA Name', 'Updated', 'Revokes'] data = [] certs = get_config_ca_certificate() @@ -971,141 +1354,31 @@ def show_crl(name=None, pem=False): print(encode_certificate(crl)) continue - certs = get_revoked_by_serial_numbers([revoked.serial_number for revoked in crl]) - data.append([cert_name, crl.last_update, ", ".join(certs)]) + certs = get_revoked_by_serial_numbers( + [revoked.serial_number for revoked in crl] + ) + data.append([cert_name, crl.last_update, ', '.join(certs)]) if name and pem: return - print("Certificate Revocation Lists:") + print('Certificate Revocation Lists:') print(tabulate.tabulate(data, headers)) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--action', help='PKI action', required=True) - - # X509 - parser.add_argument('--ca', help='Certificate Authority', required=False) - parser.add_argument('--certificate', help='Certificate', required=False) - parser.add_argument('--crl', help='Certificate Revocation List', required=False) - parser.add_argument('--sign', help='Sign certificate with specified CA', required=False) - parser.add_argument('--self-sign', help='Self-sign the certificate', action='store_true') - parser.add_argument('--pem', help='Output using PEM encoding', action='store_true') - parser.add_argument('--fingerprint', help='Show fingerprint and exit', action='store') - # SSH - parser.add_argument('--ssh', help='SSH Key', required=False) +def show_all(raw: bool): + show_certificate_authority(raw) + print('\n') + show_certificate(raw) + print('\n') + show_crl(raw) - # DH - parser.add_argument('--dh', help='DH Parameters', required=False) - - # Key pair - parser.add_argument('--keypair', help='Key pair', required=False) - - # OpenVPN - parser.add_argument('--openvpn', help='OpenVPN TLS key', required=False) - - # WireGuard - parser.add_argument('--wireguard', help='Wireguard', action='store_true') - group = parser.add_mutually_exclusive_group() - group.add_argument('--key', help='Wireguard key pair', action='store_true', required=False) - group.add_argument('--psk', help='Wireguard pre shared key', action='store_true', required=False) - parser.add_argument('--interface', help='Install generated keys into running-config for named interface', action='store') - parser.add_argument('--peer', help='Install generated keys into running-config for peer', action='store') - - # Global - parser.add_argument('--file', help='Write generated keys into specified filename', action='store_true') - parser.add_argument('--install', help='Install generated keys into running-config', action='store_true') - - parser.add_argument('--filename', help='Write certificate into specified filename', action='store') - parser.add_argument('--key-filename', help='Write key into specified filename', action='store') - - parser.add_argument('--no-prompt', action='store_true', help='Perform action non-interactively') - parser.add_argument('--passphrase', help='A passphrase to decrypt the private key') - - args = parser.parse_args() +if __name__ == '__main__': try: - if args.action == 'generate': - if args.ca: - if args.sign: - generate_ca_certificate_sign(args.ca, args.sign, install=args.install, file=args.file) - else: - generate_ca_certificate(args.ca, install=args.install, file=args.file) - elif args.certificate: - if args.sign: - generate_certificate_sign(args.certificate, args.sign, install=args.install, file=args.file) - elif args.self_sign: - generate_certificate_selfsign(args.certificate, install=args.install, file=args.file) - else: - generate_certificate_request(name=args.certificate, install=args.install, file=args.file) - - elif args.crl: - generate_certificate_revocation_list(args.crl, install=args.install, file=args.file) - - elif args.ssh: - generate_ssh_keypair(args.ssh, install=args.install, file=args.file) - - elif args.dh: - generate_dh_parameters(args.dh, install=args.install, file=args.file) - - elif args.keypair: - generate_keypair(args.keypair, install=args.install, file=args.file) - - elif args.openvpn: - generate_openvpn_key(args.openvpn, install=args.install, file=args.file) - - elif args.wireguard: - # WireGuard supports writing key directly into the CLI, but this - # requires the vyos_libexec_dir environment variable to be set - os.environ["vyos_libexec_dir"] = "/usr/libexec/vyos" - - if args.key: - generate_wireguard_key(args.interface, install=args.install) - if args.psk: - generate_wireguard_psk(args.interface, peer=args.peer, install=args.install) - elif args.action == 'import': - if args.ca: - import_ca_certificate(args.ca, path=args.filename, key_path=args.key_filename, - no_prompt=args.no_prompt, passphrase=args.passphrase) - elif args.certificate: - import_certificate(args.certificate, path=args.filename, key_path=args.key_filename, - no_prompt=args.no_prompt, passphrase=args.passphrase) - elif args.crl: - import_crl(args.crl, args.filename) - elif args.dh: - import_dh_parameters(args.dh, args.filename) - elif args.keypair: - import_keypair(args.keypair, path=args.filename, key_path=args.key_filename, - no_prompt=args.no_prompt, passphrase=args.passphrase) - elif args.openvpn: - import_openvpn_secret(args.openvpn, args.filename) - elif args.action == 'show': - if args.ca: - ca_name = None if args.ca == 'all' else args.ca - if ca_name: - if not conf.exists(['pki', 'ca', ca_name]): - print(f'CA "{ca_name}" does not exist!') - exit(1) - show_certificate_authority(ca_name, args.pem) - elif args.certificate: - cert_name = None if args.certificate == 'all' else args.certificate - if cert_name: - if not conf.exists(['pki', 'certificate', cert_name]): - print(f'Certificate "{cert_name}" does not exist!') - exit(1) - if args.fingerprint is None: - show_certificate(None if args.certificate == 'all' else args.certificate, args.pem) - else: - show_certificate(args.certificate, fingerprint_hash=args.fingerprint) - elif args.crl: - show_crl(None if args.crl == 'all' else args.crl, args.pem) - else: - show_certificate_authority() - print('\n') - show_certificate() - print('\n') - show_crl() - except KeyboardInterrupt: - print("Aborted") - sys.exit(0) + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/qos.py b/src/op_mode/qos.py index b8ca149a0..464b552ee 100755 --- a/src/op_mode/qos.py +++ b/src/op_mode/qos.py @@ -38,7 +38,7 @@ def get_tc_info(interface_dict, interface_name, policy_type): if not policy_name: return None, None - class_dict = op_mode_config_dict(['qos', 'policy', policy_type, policy_name], key_mangling=('-', '_'), + class_dict = op_mode_config_dict(['qos', 'policy', policy_type, policy_name], get_first_key=True) if not class_dict: return None, None diff --git a/src/op_mode/reset_wireguard.py b/src/op_mode/reset_wireguard.py new file mode 100755 index 000000000..1fcfb31b5 --- /dev/null +++ b/src/op_mode/reset_wireguard.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import sys +import typing + +import vyos.opmode + +from vyos.ifconfig import WireGuardIf +from vyos.configquery import ConfigTreeQuery + + +def _verify(func): + """Decorator checks if WireGuard interface config exists""" + from functools import wraps + + @wraps(func) + def _wrapper(*args, **kwargs): + config = ConfigTreeQuery() + interface = kwargs.get('interface') + if not config.exists(['interfaces', 'wireguard', interface]): + unconf_message = f'WireGuard interface {interface} is not configured' + raise vyos.opmode.UnconfiguredSubsystem(unconf_message) + return func(*args, **kwargs) + + return _wrapper + + +@_verify +def reset_peer(interface: str, peer: typing.Optional[str] = None): + intf = WireGuardIf(interface, create=False, debug=False) + return intf.operational.reset_peer(peer) + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/restart.py b/src/op_mode/restart.py index a83c8b9d8..efa835485 100755 --- a/src/op_mode/restart.py +++ b/src/op_mode/restart.py @@ -41,6 +41,10 @@ service_map = { 'systemd_service': 'pdns-recursor', 'path': ['service', 'dns', 'forwarding'], }, + 'haproxy': { + 'systemd_service': 'haproxy', + 'path': ['load-balancing', 'haproxy'], + }, 'igmp_proxy': { 'systemd_service': 'igmpproxy', 'path': ['protocols', 'igmp-proxy'], @@ -49,14 +53,14 @@ service_map = { 'systemd_service': 'strongswan', 'path': ['vpn', 'ipsec'], }, + 'load-balancing_wan': { + 'systemd_service': 'vyos-wan-load-balance', + 'path': ['load-balancing', 'wan'], + }, 'mdns_repeater': { 'systemd_service': 'avahi-daemon', 'path': ['service', 'mdns', 'repeater'], }, - 'reverse_proxy': { - 'systemd_service': 'haproxy', - 'path': ['load-balancing', 'reverse-proxy'], - }, 'router_advert': { 'systemd_service': 'radvd', 'path': ['service', 'router-advert'], @@ -83,10 +87,11 @@ services = typing.Literal[ 'dhcpv6', 'dns_dynamic', 'dns_forwarding', + 'haproxy', 'igmp_proxy', 'ipsec', + 'load-balancing_wan', 'mdns_repeater', - 'reverse_proxy', 'router_advert', 'snmp', 'ssh', diff --git a/src/op_mode/show_configuration_files.sh b/src/op_mode/show_configuration_files.sh deleted file mode 100755 index ad8e0747c..000000000 --- a/src/op_mode/show_configuration_files.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Wrapper script for the show configuration files command -find ${vyatta_sysconfdir}/config/ \ - -type f \ - -not -name ".*" \ - -not -name "config.boot.*" \ - -printf "%f\t(%Tc)\t%T@\n" \ - | sort -r -k3 \ - | awk -F"\t" '{printf ("%-20s\t%s\n", $1,$2) ;}' diff --git a/src/op_mode/stp.py b/src/op_mode/stp.py new file mode 100755 index 000000000..fb57bd7ee --- /dev/null +++ b/src/op_mode/stp.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import sys +import typing +import json +from tabulate import tabulate + +import vyos.opmode +from vyos.utils.process import cmd +from vyos.utils.network import interface_exists + +def detailed_output(dataset, headers): + for data in dataset: + adjusted_rule = data + [""] * (len(headers) - len(data)) # account for different header length, like default-action + transformed_rule = [[header, adjusted_rule[i]] for i, header in enumerate(headers) if i < len(adjusted_rule)] # create key-pair list from headers and rules lists; wrap at 100 char + + print(tabulate(transformed_rule, tablefmt="presto")) + print() + +def _get_bridge_vlan_data(iface): + allowed_vlans = [] + native_vlan = None + vlanData = json.loads(cmd(f"bridge -j -d vlan show")) + for vlans in vlanData: + if vlans['ifname'] == iface: + for allowed in vlans['vlans']: + if "flags" in allowed and "PVID" in allowed["flags"]: + native_vlan = allowed['vlan'] + elif allowed.get('vlanEnd', None): + allowed_vlans.append(f"{allowed['vlan']}-{allowed['vlanEnd']}") + else: + allowed_vlans.append(str(allowed['vlan'])) + + if not allowed_vlans: + allowed_vlans = ["none"] + if not native_vlan: + native_vlan = "none" + + return ",".join(allowed_vlans), native_vlan + +def _get_stp_data(ifname, brInfo, brStatus): + tmpInfo = {} + + tmpInfo['bridge_name'] = brInfo.get('ifname') + tmpInfo['up_state'] = brInfo.get('operstate') + tmpInfo['priority'] = brInfo.get('linkinfo').get('info_data').get('priority') + tmpInfo['vlan_filtering'] = "Enabled" if brInfo.get('linkinfo').get('info_data').get('vlan_filtering') == 1 else "Disabled" + tmpInfo['vlan_protocol'] = brInfo.get('linkinfo').get('info_data').get('vlan_protocol') + + # The version of VyOS I tested had am issue with the "ip -d link show type bridge" + # output. The root_id was always the local bridge, even though the underlying system + # understood when it wasn't. Could be an upstream Bug. I pull from the "/sys/class/net" + # structure instead. This can be changed later if the "ip link" behavior is corrected. + + #tmpInfo['bridge_id'] = brInfo.get('linkinfo').get('info_data').get('bridge_id') + #tmpInfo['root_id'] = brInfo.get('linkinfo').get('info_data').get('root_id') + + tmpInfo['bridge_id'] = cmd(f"cat /sys/class/net/{brInfo.get('ifname')}/bridge/bridge_id").split('.') + tmpInfo['root_id'] = cmd(f"cat /sys/class/net/{brInfo.get('ifname')}/bridge/root_id").split('.') + + # The "/sys/class/net" structure stores the IDs without seperators like ':' or '.' + # This adds a ':' after every 2 characters to make it resemble a MAC Address + tmpInfo['bridge_id'][1] = ':'.join(tmpInfo['bridge_id'][1][i:i+2] for i in range(0, len(tmpInfo['bridge_id'][1]), 2)) + tmpInfo['root_id'][1] = ':'.join(tmpInfo['root_id'][1][i:i+2] for i in range(0, len(tmpInfo['root_id'][1]), 2)) + + tmpInfo['stp_state'] = "Enabled" if brInfo.get('linkinfo', {}).get('info_data', {}).get('stp_state') == 1 else "Disabled" + + # I don't call any of these values, but I created them to be called within raw output if desired + + tmpInfo['mcast_snooping'] = "Enabled" if brInfo.get('linkinfo').get('info_data').get('mcast_snooping') == 1 else "Disabled" + tmpInfo['rxbytes'] = brInfo.get('stats64').get('rx').get('bytes') + tmpInfo['rxpackets'] = brInfo.get('stats64').get('rx').get('packets') + tmpInfo['rxerrors'] = brInfo.get('stats64').get('rx').get('errors') + tmpInfo['rxdropped'] = brInfo.get('stats64').get('rx').get('dropped') + tmpInfo['rxover_errors'] = brInfo.get('stats64').get('rx').get('over_errors') + tmpInfo['rxmulticast'] = brInfo.get('stats64').get('rx').get('multicast') + tmpInfo['txbytes'] = brInfo.get('stats64').get('tx').get('bytes') + tmpInfo['txpackets'] = brInfo.get('stats64').get('tx').get('packets') + tmpInfo['txerrors'] = brInfo.get('stats64').get('tx').get('errors') + tmpInfo['txdropped'] = brInfo.get('stats64').get('tx').get('dropped') + tmpInfo['txcarrier_errors'] = brInfo.get('stats64').get('tx').get('carrier_errors') + tmpInfo['txcollosions'] = brInfo.get('stats64').get('tx').get('collisions') + + tmpStatus = [] + for members in brStatus: + if members.get('master') == brInfo.get('ifname'): + allowed_vlans, native_vlan = _get_bridge_vlan_data(members['ifname']) + tmpStatus.append({'interface': members.get('ifname'), + 'state': members.get('state').capitalize(), + 'mtu': members.get('mtu'), + 'pathcost': members.get('cost'), + 'bpduguard': "Enabled" if members.get('guard') == True else "Disabled", + 'rootguard': "Enabled" if members.get('root_block') == True else "Disabled", + 'mac_learning': "Enabled" if members.get('learning') == True else "Disabled", + 'neigh_suppress': "Enabled" if members.get('neigh_suppress') == True else "Disabled", + 'vlan_tunnel': "Enabled" if members.get('vlan_tunnel') == True else "Disabled", + 'isolated': "Enabled" if members.get('isolated') == True else "Disabled", + **({'allowed_vlans': allowed_vlans} if allowed_vlans else {}), + **({'native_vlan': native_vlan} if native_vlan else {})}) + + tmpInfo['members'] = tmpStatus + return tmpInfo + +def show_stp(raw: bool, ifname: typing.Optional[str], detail: bool): + rawList = [] + rawDict = {'stp': []} + + if ifname: + if not interface_exists(ifname): + raise vyos.opmode.Error(f"{ifname} does not exist!") + else: + ifname = "" + + bridgeInfo = json.loads(cmd(f"ip -j -d -s link show type bridge {ifname}")) + + if not bridgeInfo: + raise vyos.opmode.Error(f"No Bridges configured!") + + bridgeStatus = json.loads(cmd(f"bridge -j -s -d link show")) + + for bridges in bridgeInfo: + output_list = [] + amRoot = "" + bridgeDict = _get_stp_data(ifname, bridges, bridgeStatus) + + if bridgeDict['bridge_id'][1] == bridgeDict['root_id'][1]: + amRoot = " (This bridge is the root)" + + print('-' * 80) + print(f"Bridge interface {bridgeDict['bridge_name']} ({bridgeDict['up_state']}):\n") + print(f"Spanning Tree is {bridgeDict['stp_state']}") + print(f"Bridge ID {bridgeDict['bridge_id'][1]}, Priority {int(bridgeDict['bridge_id'][0], 16)}") + print(f"Root ID {bridgeDict['root_id'][1]}, Priority {int(bridgeDict['root_id'][0], 16)}{amRoot}") + print(f"VLANs {bridgeDict['vlan_filtering'].capitalize()}, Protocol {bridgeDict['vlan_protocol']}") + print() + + for members in bridgeDict['members']: + output_list.append([members['interface'], + members['state'], + *([members['pathcost']] if detail else []), + members['bpduguard'], + members['rootguard'], + members['mac_learning'], + *([members['neigh_suppress']] if detail else []), + *([members['vlan_tunnel']] if detail else []), + *([members['isolated']] if detail else []), + *([members['allowed_vlans']] if detail else []), + *([members['native_vlan']] if detail else [])]) + + if raw: + rawList.append(bridgeDict) + elif detail: + headers = ['Interface', 'State', 'Pathcost', 'BPDU_Guard', 'Root_Guard', 'Learning', 'Neighbor_Suppression', 'Q-in-Q', 'Port_Isolation', 'Allowed VLANs', 'Native VLAN'] + detailed_output(output_list, headers) + else: + headers = ['Interface', 'State', 'BPDU_Guard', 'Root_Guard', 'Learning'] + print(tabulate(output_list, headers)) + print() + + if raw: + rawDict['stp'] = rawList + return rawDict + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/tech_support.py b/src/op_mode/tech_support.py index f60bb87ff..c4496dfa3 100644 --- a/src/op_mode/tech_support.py +++ b/src/op_mode/tech_support.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2024 VyOS maintainers and contributors +# Copyright (C) 2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -20,6 +20,7 @@ import json import vyos.opmode from vyos.utils.process import cmd +from vyos.base import Warning def _get_version_data(): from vyos.version import get_version_data @@ -51,7 +52,12 @@ def _get_storage(): def _get_devices(): devices = {} devices["pci"] = cmd("lspci") - devices["usb"] = cmd("lsusb") + + try: + devices["usb"] = cmd("lsusb") + except OSError: + Warning("Could not retrieve information about USB devices") + devices["usb"] = {} return devices @@ -97,21 +103,22 @@ def _get_boot_config(): return strip_config_source(config) def _get_config_scripts(): - from os import listdir + from os import walk from os.path import join from vyos.utils.file import read_file scripts = [] dir = '/config/scripts' - for f in listdir(dir): - script = {} - path = join(dir, f) - data = read_file(path) - script["path"] = path - script["data"] = data - - scripts.append(script) + for dirpath, _, filenames in walk(dir): + for filename in filenames: + script = {} + path = join(dirpath, filename) + data = read_file(path) + script["path"] = path + script["data"] = data + + scripts.append(script) return scripts diff --git a/src/op_mode/vrrp.py b/src/op_mode/vrrp.py index 60be86065..ef1338e23 100755 --- a/src/op_mode/vrrp.py +++ b/src/op_mode/vrrp.py @@ -13,47 +13,324 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. - +import json import sys -import argparse +import typing + +from jinja2 import Template -from vyos.configquery import ConfigTreeQuery -from vyos.ifconfig.vrrp import VRRP +import vyos.opmode +from vyos.ifconfig import VRRP from vyos.ifconfig.vrrp import VRRPNoData -parser = argparse.ArgumentParser() -group = parser.add_mutually_exclusive_group() -group.add_argument("-s", "--summary", action="store_true", help="Print VRRP summary") -group.add_argument("-t", "--statistics", action="store_true", help="Print VRRP statistics") -group.add_argument("-d", "--data", action="store_true", help="Print detailed VRRP data") - -args = parser.parse_args() - -def is_configured(): - """ Check if VRRP is configured """ - config = ConfigTreeQuery() - if not config.exists(['high-availability', 'vrrp', 'group']): - return False - return True - -# Exit early if VRRP is dead or not configured -if is_configured() == False: - print('VRRP not configured!') - exit(0) -if not VRRP.is_running(): - print('VRRP is not running') - sys.exit(0) - -try: - if args.summary: - print(VRRP.format(VRRP.collect('json'))) - elif args.statistics: - print(VRRP.collect('stats')) - elif args.data: - print(VRRP.collect('state')) - else: - parser.print_help() + +stat_template = Template(""" +{% for rec in instances %} +VRRP Instance: {{rec.instance}} + Advertisements: + Received: {{rec.advert_rcvd}} + Sent: {{rec.advert_sent}} + Became master: {{rec.become_master}} + Released master: {{rec.release_master}} + Packet Errors: + Length: {{rec.packet_len_err}} + TTL: {{rec.ip_ttl_err}} + Invalid Type: {{rec.invalid_type_rcvd}} + Advertisement Interval: {{rec.advert_interval_err}} + Address List: {{rec.addr_list_err}} + Authentication Errors: + Invalid Type: {{rec.invalid_authtype}} + Type Mismatch: {{rec.authtype_mismatch}} + Failure: {{rec.auth_failure}} + Priority Zero: + Received: {{rec.pri_zero_rcvd}} + Sent: {{rec.pri_zero_sent}} +{% endfor %} +""") + +detail_template = Template(""" +{%- for rec in instances %} + VRRP Instance: {{rec.iname}} + VRRP Version: {{rec.version}} + State: {{rec.state}} + {% if rec.state == 'BACKUP' -%} + Master priority: {{ rec.master_priority }} + {% if rec.version == 3 -%} + Master advert interval: {{ rec.master_adver_int }} + {% endif -%} + {% endif -%} + Wantstate: {{rec.wantstate}} + Last transition: {{rec.last_transition}} + Interface: {{rec.ifp_ifname}} + {% if rec.dont_track_primary > 0 -%} + VRRP interface tracking disabled + {% endif -%} + {% if rec.skip_check_adv_addr > 0 -%} + Skip checking advert IP addresses + {% endif -%} + {% if rec.strict_mode > 0 -%} + Enforcing strict VRRP compliance + {% endif -%} + Gratuitous ARP delay: {{rec.garp_delay}} + Gratuitous ARP repeat: {{rec.garp_rep}} + Gratuitous ARP refresh: {{rec.garp_refresh}} + Gratuitous ARP refresh repeat: {{rec.garp_refresh_rep}} + Gratuitous ARP lower priority delay: {{rec.garp_lower_prio_delay}} + Gratuitous ARP lower priority repeat: {{rec.garp_lower_prio_rep}} + Send advert after receive lower priority advert: {{rec.lower_prio_no_advert}} + Send advert after receive higher priority advert: {{rec.higher_prio_send_advert}} + Virtual Router ID: {{rec.vrid}} + Priority: {{rec.base_priority}} + Effective priority: {{rec.effective_priority}} + Advert interval: {{rec.adver_int}} sec + Accept: {{rec.accept}} + Preempt: {{rec.nopreempt}} + {% if rec.preempt_delay -%} + Preempt delay: {{rec.preempt_delay}} + {% endif -%} + Promote secondaries: {{rec.promote_secondaries}} + Authentication type: {{rec.auth_type}} + {% if rec.vips %} + Virtual IP ({{ rec.vips | length }}): + {% for ip in rec.vips -%} + {{ip}} + {% endfor -%} + {% endif -%} + {% if rec.evips %} + Virtual IP Excluded: + {% for ip in rec.evips -%} + {{ip}} + {% endfor -%} + {% endif -%} + {% if rec.vroutes %} + Virtual Routes: + {% for route in rec.vroutes -%} + {{route}} + {% endfor -%} + {% endif -%} + {% if rec.vrules %} + Virtual Rules: + {% for rule in rec.vrules -%} + {{rule}} + {% endfor -%} + {% endif -%} + {% if rec.track_ifp %} + Tracked interfaces: + {% for ifp in rec.track_ifp -%} + {{ifp}} + {% endfor -%} + {% endif -%} + {% if rec.track_script %} + Tracked scripts: + {% for script in rec.track_script -%} + {{script}} + {% endfor -%} + {% endif %} + Using smtp notification: {{rec.smtp_alert}} + Notify deleted: {{rec.notify_deleted}} +{% endfor %} +""") + +# https://github.com/acassen/keepalived/blob/59c39afe7410f927c9894a1bafb87e398c6f02be/keepalived/include/vrrp.h#L126 +VRRP_AUTH_NONE = 0 +VRRP_AUTH_PASS = 1 +VRRP_AUTH_AH = 2 + +# https://github.com/acassen/keepalived/blob/59c39afe7410f927c9894a1bafb87e398c6f02be/keepalived/include/vrrp.h#L417 +VRRP_STATE_INIT = 0 +VRRP_STATE_BACK = 1 +VRRP_STATE_MAST = 2 +VRRP_STATE_FAULT = 3 + +VRRP_AUTH_TO_NAME = { + VRRP_AUTH_NONE: 'NONE', + VRRP_AUTH_PASS: 'SIMPLE_PASSWORD', + VRRP_AUTH_AH: 'IPSEC_AH', +} + +VRRP_STATE_TO_NAME = { + VRRP_STATE_INIT: 'INIT', + VRRP_STATE_BACK: 'BACKUP', + VRRP_STATE_MAST: 'MASTER', + VRRP_STATE_FAULT: 'FAULT', +} + + +def _get_raw_data(group_name: str = None) -> list: + """ + Retrieve raw JSON data for all VRRP groups. + + Args: + group_name (str, optional): If provided, filters the data to only + include the specified vrrp group. + + Returns: + list: A list of raw JSON data for VRRP groups, filtered by group_name + if specified. + """ + try: + output = VRRP.collect('json') + except VRRPNoData as e: + raise vyos.opmode.DataUnavailable(f'{e}') + + data = json.loads(output) + + if not data: + return [] + + if group_name is not None: + for rec in data: + if rec['data'].get('iname') == group_name: + return [rec] + return [] + return data + + +def _get_formatted_statistics_output(data: list) -> str: + """ + Prepare formatted statistics output from the given data. + + Args: + data (list): A list of dictionaries containing vrrp grop information + and statistics. + + Returns: + str: Rendered statistics output based on the provided data. + """ + instances = list() + for instance in data: + instances.append( + {'instance': instance['data'].get('iname'), **instance['stats']} + ) + + return stat_template.render(instances=instances) + + +def _process_field(data: dict, field: str, true_value: str, false_value: str): + """ + Updates the given field in the data dictionary with a specified value based + on its truthiness. + + Args: + data (dict): The dictionary containing the field to be processed. + field (str): The key representing the field in the dictionary. + true_value (str): The value to set if the field's value is truthy. + false_value (str): The value to set if the field's value is falsy. + + Returns: + None: The function modifies the dictionary in place. + """ + data[field] = true_value if data.get(field) else false_value + + +def _get_formatted_detail_output(data: list) -> str: + """ + Prepare formatted detail information output from the given data. + + Args: + data (list): A list of dictionaries containing vrrp grop information + and statistics. + + Returns: + str: Rendered detail info output based on the provided data. + """ + instances = list() + for instance in data: + instance['data']['state'] = VRRP_STATE_TO_NAME.get( + instance['data'].get('state'), 'unknown' + ) + instance['data']['wantstate'] = VRRP_STATE_TO_NAME.get( + instance['data'].get('wantstate'), 'unknown' + ) + instance['data']['auth_type'] = VRRP_AUTH_TO_NAME.get( + instance['data'].get('auth_type'), 'unknown' + ) + _process_field(instance['data'], 'lower_prio_no_advert', 'false', 'true') + _process_field(instance['data'], 'higher_prio_send_advert', 'true', 'false') + _process_field(instance['data'], 'accept', 'Enabled', 'Disabled') + _process_field(instance['data'], 'notify_deleted', 'Deleted', 'Fault') + _process_field(instance['data'], 'smtp_alert', 'yes', 'no') + _process_field(instance['data'], 'nopreempt', 'Disabled', 'Enabled') + _process_field(instance['data'], 'promote_secondaries', 'Enabled', 'Disabled') + instance['data']['vips'] = instance['data'].get('vips', False) + instance['data']['evips'] = instance['data'].get('evips', False) + instance['data']['vroutes'] = instance['data'].get('vroutes', False) + instance['data']['vrules'] = instance['data'].get('vrules', False) + + instances.append(instance['data']) + + return detail_template.render(instances=instances) + + +def show_detail( + raw: bool, group_name: typing.Optional[str] = None +) -> typing.Union[list, str]: + """ + Display detailed information about the VRRP group. + + Args: + raw (bool): If True, return raw data instead of formatted output. + group_name (str, optional): Filter the data by a specific group name, + if provided. + + Returns: + list or str: Raw data if `raw` is True, otherwise a formatted detail + output. + """ + data = _get_raw_data(group_name) + + if raw: + return data + + return _get_formatted_detail_output(data) + + +def show_statistics( + raw: bool, group_name: typing.Optional[str] = None +) -> typing.Union[list, str]: + """ + Display VRRP group statistics. + + Args: + raw (bool): If True, return raw data instead of formatted output. + group_name (str, optional): Filter the data by a specific group name, + if provided. + + Returns: + list or str: Raw data if `raw` is True, otherwise a formatted statistic + output. + """ + data = _get_raw_data(group_name) + + if raw: + return data + + return _get_formatted_statistics_output(data) + + +def show_summary(raw: bool) -> typing.Union[list, str]: + """ + Display a summary of VRRP group. + + Args: + raw (bool): If True, return raw data instead of formatted output. + + Returns: + list or str: Raw data if `raw` is True, otherwise a formatted summary output. + """ + data = _get_raw_data() + + if raw: + return data + + return VRRP.format(data) + + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) sys.exit(1) -except VRRPNoData as e: - print(e) - sys.exit(1) diff --git a/src/op_mode/vtysh_wrapper.sh b/src/op_mode/vtysh_wrapper.sh index 25d09ce77..bc472f7bb 100755 --- a/src/op_mode/vtysh_wrapper.sh +++ b/src/op_mode/vtysh_wrapper.sh @@ -2,5 +2,5 @@ declare -a tmp # FRR uses ospf6 where we use ospfv3, and we use reset over clear for BGP, # thus alter the commands -tmp=$(echo $@ | sed -e "s/ospfv3/ospf6/" | sed -e "s/^reset bgp/clear bgp/" | sed -e "s/^reset ip bgp/clear ip bgp/") +tmp=$(echo $@ | sed -e "s/ospfv3/ospf6/" | sed -e "s/^reset bgp/clear bgp/" | sed -e "s/^reset ip bgp/clear ip bgp/"| sed -e "s/^reset ip nhrp/clear ip nhrp/") vtysh -c "$tmp" diff --git a/src/op_mode/zone.py b/src/op_mode/zone.py index 49fecdf28..df39549d2 100644 --- a/src/op_mode/zone.py +++ b/src/op_mode/zone.py @@ -56,10 +56,15 @@ def _convert_one_zone_data(zone: str, zone_config: dict) -> dict: from_zone_dict['firewall_v6'] = dict_search( 'firewall.ipv6_name', from_zone_config) list_of_rules.append(from_zone_dict) + zone_members =[] + interface_members = dict_search('member.interface', zone_config) + vrf_members = dict_search('member.vrf', zone_config) + zone_members += interface_members if interface_members is not None else [] + zone_members += vrf_members if vrf_members is not None else [] zone_dict = { 'name': zone, - 'interface': dict_search('interface', zone_config), + 'members': zone_members, 'type': 'LOCAL' if dict_search('local_zone', zone_config) is not None else None, } @@ -126,7 +131,7 @@ def output_zone_list(zone_conf: dict) -> list: if zone_conf['type'] == 'LOCAL': zone_info.append('LOCAL') else: - zone_info.append("\n".join(zone_conf['interface'])) + zone_info.append("\n".join(zone_conf['members'])) from_zone = [] firewall = [] @@ -175,7 +180,7 @@ def get_formatted_output(zone_policy: list) -> str: :rtype: str """ headers = ["Zone", - "Interfaces", + "Members", "From Zone", "Firewall IPv4", "Firewall IPv6" diff --git a/src/services/api/__init__.py b/src/services/api/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/services/api/__init__.py diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py index ef4966466..ebf745f32 100644 --- a/src/services/api/graphql/bindings.py +++ b/src/services/api/graphql/bindings.py @@ -1,4 +1,4 @@ -# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -13,24 +13,40 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. + import vyos.defaults -from . graphql.queries import query -from . graphql.mutations import mutation -from . graphql.directives import directives_dict -from . graphql.errors import op_mode_error -from . graphql.auth_token_mutation import auth_token_mutation -from . libs.token_auth import init_secret -from . import state -from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers + +from ariadne import make_executable_schema +from ariadne import load_schema_from_path +from ariadne import snake_case_fallback_resolvers + +from .graphql.queries import query +from .graphql.mutations import mutation +from .graphql.directives import directives_dict +from .graphql.errors import op_mode_error +from .graphql.auth_token_mutation import auth_token_mutation +from .libs.token_auth import init_secret + +from ..session import SessionState + def generate_schema(): + state = SessionState() api_schema_dir = vyos.defaults.directories['api_schema'] - if state.settings['app'].state.vyos_auth_type == 'token': + if state.auth_type == 'token': init_secret() type_defs = load_schema_from_path(api_schema_dir) - schema = make_executable_schema(type_defs, query, op_mode_error, mutation, auth_token_mutation, snake_case_fallback_resolvers, directives=directives_dict) + schema = make_executable_schema( + type_defs, + query, + op_mode_error, + mutation, + auth_token_mutation, + snake_case_fallback_resolvers, + directives=directives_dict, + ) return schema diff --git a/src/services/api/graphql/graphql/auth_token_mutation.py b/src/services/api/graphql/graphql/auth_token_mutation.py index a53fa4d60..c74364603 100644 --- a/src/services/api/graphql/graphql/auth_token_mutation.py +++ b/src/services/api/graphql/graphql/auth_token_mutation.py @@ -19,11 +19,12 @@ from typing import Dict from ariadne import ObjectType from graphql import GraphQLResolveInfo -from .. libs.token_auth import generate_token -from .. session.session import get_user_info -from .. import state +from ..libs.token_auth import generate_token +from ..session.session import get_user_info +from ...session import SessionState + +auth_token_mutation = ObjectType('Mutation') -auth_token_mutation = ObjectType("Mutation") @auth_token_mutation.field('AuthToken') def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): @@ -31,10 +32,13 @@ def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): user = data['username'] passwd = data['password'] - secret = state.settings['secret'] - exp_interval = int(state.settings['app'].state.vyos_token_exp) - expiration = (datetime.datetime.now(tz=datetime.timezone.utc) + - datetime.timedelta(seconds=exp_interval)) + state = SessionState() + + secret = getattr(state, 'secret', '') + exp_interval = int(state.token_exp) + expiration = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta( + seconds=exp_interval + ) res = generate_token(user, passwd, secret, expiration) try: @@ -44,18 +48,9 @@ def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): pass if 'token' in res: data['result'] = res - return { - "success": True, - "data": data - } + return {'success': True, 'data': data} if 'errors' in res: - return { - "success": False, - "errors": res['errors'] - } - - return { - "success": False, - "errors": ['token generation failed'] - } + return {'success': False, 'errors': res['errors']} + + return {'success': False, 'errors': ['token generation failed']} diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py index d115a8e94..0b391c070 100644 --- a/src/services/api/graphql/graphql/mutations.py +++ b/src/services/api/graphql/graphql/mutations.py @@ -14,20 +14,23 @@ # along with this library. If not, see <http://www.gnu.org/licenses/>. from importlib import import_module -from ariadne import ObjectType, convert_camel_case_to_snake -from makefun import with_signature # used below by func_sig -from typing import Any, Dict, Optional # pylint: disable=W0611 -from graphql import GraphQLResolveInfo # pylint: disable=W0611 +from typing import Any, Dict, Optional # pylint: disable=W0611 # noqa: F401 +from graphql import GraphQLResolveInfo # pylint: disable=W0611 # noqa: F401 + +from ariadne import ObjectType, convert_camel_case_to_snake +from makefun import with_signature -from .. import state -from .. libs import key_auth -from api.graphql.session.session import Session -from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code from vyos.opmode import Error as OpModeError -mutation = ObjectType("Mutation") +from ...session import SessionState +from ..libs import key_auth +from ..session.session import Session +from ..session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code + +mutation = ObjectType('Mutation') + def make_mutation_resolver(mutation_name, class_name, session_func): """Dynamically generate a resolver for the mutation named in the @@ -45,12 +48,13 @@ def make_mutation_resolver(mutation_name, class_name, session_func): func_base_name = convert_camel_case_to_snake(class_name) resolver_name = f'resolve_{func_base_name}' func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Optional[Dict]=None)' + state = SessionState() @mutation.field(mutation_name) @with_signature(func_sig, func_name=resolver_name) async def func_impl(*args, **kwargs): try: - auth_type = state.settings['app'].state.vyos_auth_type + auth_type = state.auth_type if auth_type == 'key': data = kwargs['data'] @@ -58,10 +62,7 @@ def make_mutation_resolver(mutation_name, class_name, session_func): auth = key_auth.auth_required(key) if auth is None: - return { - "success": False, - "errors": ['invalid API key'] - } + return {'success': False, 'errors': ['invalid API key']} # We are finished with the 'key' entry, and may remove so as to # pass the rest of data (if any) to function. @@ -76,21 +77,15 @@ def make_mutation_resolver(mutation_name, class_name, session_func): if user is None: error = info.context.get('error') if error is not None: - return { - "success": False, - "errors": [error] - } - return { - "success": False, - "errors": ['not authenticated'] - } + return {'success': False, 'errors': [error]} + return {'success': False, 'errors': ['not authenticated']} else: # AtrributeError will have already been raised if no - # vyos_auth_type; validation and defaultValue ensure it is + # auth_type; validation and defaultValue ensure it is # one of the previous cases, so this is never reached. pass - session = state.settings['app'].state.vyos_session + session = state.session # one may override the session functions with a local subclass try: @@ -105,35 +100,36 @@ def make_mutation_resolver(mutation_name, class_name, session_func): result = method() data['result'] = result - return { - "success": True, - "data": data - } + return {'success': True, 'data': data} except OpModeError as e: typename = type(e).__name__ msg = str(e) return { - "success": False, - "errore": ['op_mode_error'], - "op_mode_error": {"name": f"{typename}", - "message": msg if msg else op_mode_err_msg.get(typename, "Unknown"), - "vyos_code": op_mode_err_code.get(typename, 9999)} + 'success': False, + 'errore': ['op_mode_error'], + 'op_mode_error': { + 'name': f'{typename}', + 'message': msg if msg else op_mode_err_msg.get(typename, 'Unknown'), + 'vyos_code': op_mode_err_code.get(typename, 9999), + }, } except Exception as error: - return { - "success": False, - "errors": [repr(error)] - } + return {'success': False, 'errors': [repr(error)]} return func_impl + def make_config_session_mutation_resolver(mutation_name): - return make_mutation_resolver(mutation_name, mutation_name, - convert_camel_case_to_snake(mutation_name)) + return make_mutation_resolver( + mutation_name, mutation_name, convert_camel_case_to_snake(mutation_name) + ) + def make_gen_op_mutation_resolver(mutation_name): return make_mutation_resolver(mutation_name, mutation_name, 'gen_op_mutation') + def make_composite_mutation_resolver(mutation_name): - return make_mutation_resolver(mutation_name, mutation_name, - convert_camel_case_to_snake(mutation_name)) + return make_mutation_resolver( + mutation_name, mutation_name, convert_camel_case_to_snake(mutation_name) + ) diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py index 717098259..9303fe909 100644 --- a/src/services/api/graphql/graphql/queries.py +++ b/src/services/api/graphql/graphql/queries.py @@ -14,20 +14,23 @@ # along with this library. If not, see <http://www.gnu.org/licenses/>. from importlib import import_module -from ariadne import ObjectType, convert_camel_case_to_snake -from makefun import with_signature # used below by func_sig -from typing import Any, Dict, Optional # pylint: disable=W0611 -from graphql import GraphQLResolveInfo # pylint: disable=W0611 +from typing import Any, Dict, Optional # pylint: disable=W0611 # noqa: F401 +from graphql import GraphQLResolveInfo # pylint: disable=W0611 # noqa: F401 + +from ariadne import ObjectType, convert_camel_case_to_snake +from makefun import with_signature -from .. import state -from .. libs import key_auth -from api.graphql.session.session import Session -from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code from vyos.opmode import Error as OpModeError -query = ObjectType("Query") +from ...session import SessionState +from ..libs import key_auth +from ..session.session import Session +from ..session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code + +query = ObjectType('Query') + def make_query_resolver(query_name, class_name, session_func): """Dynamically generate a resolver for the query named in the @@ -45,12 +48,13 @@ def make_query_resolver(query_name, class_name, session_func): func_base_name = convert_camel_case_to_snake(class_name) resolver_name = f'resolve_{func_base_name}' func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Optional[Dict]=None)' + state = SessionState() @query.field(query_name) @with_signature(func_sig, func_name=resolver_name) async def func_impl(*args, **kwargs): try: - auth_type = state.settings['app'].state.vyos_auth_type + auth_type = state.auth_type if auth_type == 'key': data = kwargs['data'] @@ -58,10 +62,7 @@ def make_query_resolver(query_name, class_name, session_func): auth = key_auth.auth_required(key) if auth is None: - return { - "success": False, - "errors": ['invalid API key'] - } + return {'success': False, 'errors': ['invalid API key']} # We are finished with the 'key' entry, and may remove so as to # pass the rest of data (if any) to function. @@ -76,21 +77,15 @@ def make_query_resolver(query_name, class_name, session_func): if user is None: error = info.context.get('error') if error is not None: - return { - "success": False, - "errors": [error] - } - return { - "success": False, - "errors": ['not authenticated'] - } + return {'success': False, 'errors': [error]} + return {'success': False, 'errors': ['not authenticated']} else: # AtrributeError will have already been raised if no - # vyos_auth_type; validation and defaultValue ensure it is + # auth_type; validation and defaultValue ensure it is # one of the previous cases, so this is never reached. pass - session = state.settings['app'].state.vyos_session + session = state.session # one may override the session functions with a local subclass try: @@ -105,35 +100,36 @@ def make_query_resolver(query_name, class_name, session_func): result = method() data['result'] = result - return { - "success": True, - "data": data - } + return {'success': True, 'data': data} except OpModeError as e: typename = type(e).__name__ msg = str(e) return { - "success": False, - "errors": ['op_mode_error'], - "op_mode_error": {"name": f"{typename}", - "message": msg if msg else op_mode_err_msg.get(typename, "Unknown"), - "vyos_code": op_mode_err_code.get(typename, 9999)} + 'success': False, + 'errors': ['op_mode_error'], + 'op_mode_error': { + 'name': f'{typename}', + 'message': msg if msg else op_mode_err_msg.get(typename, 'Unknown'), + 'vyos_code': op_mode_err_code.get(typename, 9999), + }, } except Exception as error: - return { - "success": False, - "errors": [repr(error)] - } + return {'success': False, 'errors': [repr(error)]} return func_impl + def make_config_session_query_resolver(query_name): - return make_query_resolver(query_name, query_name, - convert_camel_case_to_snake(query_name)) + return make_query_resolver( + query_name, query_name, convert_camel_case_to_snake(query_name) + ) + def make_gen_op_query_resolver(query_name): return make_query_resolver(query_name, query_name, 'gen_op_query') + def make_composite_query_resolver(query_name): - return make_query_resolver(query_name, query_name, - convert_camel_case_to_snake(query_name)) + return make_query_resolver( + query_name, query_name, convert_camel_case_to_snake(query_name) + ) diff --git a/src/services/api/graphql/libs/__init__.py b/src/services/api/graphql/libs/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/services/api/graphql/libs/__init__.py diff --git a/src/services/api/graphql/libs/key_auth.py b/src/services/api/graphql/libs/key_auth.py index 2db0f7d48..ffd7f32b2 100644 --- a/src/services/api/graphql/libs/key_auth.py +++ b/src/services/api/graphql/libs/key_auth.py @@ -1,5 +1,21 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + + +from ...session import SessionState -from .. import state def check_auth(key_list, key): if not key_list: @@ -10,9 +26,11 @@ def check_auth(key_list, key): key_id = k['id'] return key_id + def auth_required(key): + state = SessionState() api_keys = None - api_keys = state.settings['app'].state.vyos_keys + api_keys = state.keys key_id = check_auth(api_keys, key) - state.settings['app'].state.vyos_id = key_id + state.id = key_id return key_id diff --git a/src/services/api/graphql/libs/token_auth.py b/src/services/api/graphql/libs/token_auth.py index 8585485c9..4f743a096 100644 --- a/src/services/api/graphql/libs/token_auth.py +++ b/src/services/api/graphql/libs/token_auth.py @@ -1,46 +1,67 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + + import jwt import uuid import pam from secrets import token_hex -from .. import state +from ...session import SessionState + def _check_passwd_pam(username: str, passwd: str) -> bool: if pam.authenticate(username, passwd): return True return False + def init_secret(): - length = int(state.settings['app'].state.vyos_secret_len) + state = SessionState() + length = int(state.secret_len) secret = token_hex(length) - state.settings['secret'] = secret + state.secret = secret + def generate_token(user: str, passwd: str, secret: str, exp: int) -> dict: if user is None or passwd is None: return {} + state = SessionState() if _check_passwd_pam(user, passwd): - app = state.settings['app'] try: - users = app.state.vyos_token_users + users = state.token_users except AttributeError: - app.state.vyos_token_users = {} - users = app.state.vyos_token_users + users = state.token_users = {} user_id = uuid.uuid1().hex payload_data = {'iss': user, 'sub': user_id, 'exp': exp} - secret = state.settings.get('secret') + secret = getattr(state, 'secret', None) if secret is None: - return {"errors": ['missing secret']} - token = jwt.encode(payload=payload_data, key=secret, algorithm="HS256") + return {'errors': ['missing secret']} + token = jwt.encode(payload=payload_data, key=secret, algorithm='HS256') users |= {user_id: user} return {'token': token} else: - return {"errors": ['failed pam authentication']} + return {'errors': ['failed pam authentication']} + def get_user_context(request): context = {} context['request'] = request context['user'] = None + state = SessionState() if 'Authorization' in request.headers: auth = request.headers['Authorization'] scheme, token = auth.split() @@ -48,8 +69,8 @@ def get_user_context(request): return context try: - secret = state.settings.get('secret') - payload = jwt.decode(token, secret, algorithms=["HS256"]) + secret = getattr(state, 'secret', None) + payload = jwt.decode(token, secret, algorithms=['HS256']) user_id: str = payload.get('sub') if user_id is None: return context @@ -59,7 +80,7 @@ def get_user_context(request): except jwt.PyJWTError: return context try: - users = state.settings['app'].state.vyos_token_users + users = state.token_users except AttributeError: return context diff --git a/src/services/api/graphql/routers.py b/src/services/api/graphql/routers.py new file mode 100644 index 000000000..ed3ee1e8c --- /dev/null +++ b/src/services/api/graphql/routers.py @@ -0,0 +1,77 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# pylint: disable=import-outside-toplevel + + +import typing + +from ariadne.asgi import GraphQL +from starlette.middleware.cors import CORSMiddleware + + +if typing.TYPE_CHECKING: + from fastapi import FastAPI + + +def graphql_init(app: 'FastAPI'): + from ..session import SessionState + from .libs.token_auth import get_user_context + + state = SessionState() + + # import after initializaion of state + from .bindings import generate_schema + + schema = generate_schema() + + in_spec = state.introspection + + # remove route and reinstall below, for any changes; alternatively, test + # for config_diff before proceeding + graphql_clear(app) + + if state.origins: + origins = state.origins + app.add_route( + '/graphql', + CORSMiddleware( + GraphQL( + schema, + context_value=get_user_context, + debug=True, + introspection=in_spec, + ), + allow_origins=origins, + allow_methods=('GET', 'POST', 'OPTIONS'), + allow_headers=('Authorization',), + ), + ) + else: + app.add_route( + '/graphql', + GraphQL( + schema, + context_value=get_user_context, + debug=True, + introspection=in_spec, + ), + ) + + +def graphql_clear(app: 'FastAPI'): + for r in app.routes: + if r.path == '/graphql': + app.routes.remove(r) diff --git a/src/services/api/graphql/session/session.py b/src/services/api/graphql/session/session.py index 6ae44b9bf..619534f43 100644 --- a/src/services/api/graphql/session/session.py +++ b/src/services/api/graphql/session/session.py @@ -28,34 +28,45 @@ from api.graphql.libs.op_mode import normalize_output op_mode_include_file = os.path.join(directories['data'], 'op-mode-standardized.json') -def get_config_dict(path=[], effective=False, key_mangling=None, - get_first_key=False, no_multi_convert=False, - no_tag_node_value_mangle=False): + +def get_config_dict( + path=[], + effective=False, + key_mangling=None, + get_first_key=False, + no_multi_convert=False, + no_tag_node_value_mangle=False, +): config = Config() - return config.get_config_dict(path=path, effective=effective, - key_mangling=key_mangling, - get_first_key=get_first_key, - no_multi_convert=no_multi_convert, - no_tag_node_value_mangle=no_tag_node_value_mangle) + return config.get_config_dict( + path=path, + effective=effective, + key_mangling=key_mangling, + get_first_key=get_first_key, + no_multi_convert=no_multi_convert, + no_tag_node_value_mangle=no_tag_node_value_mangle, + ) + def get_user_info(user): user_info = {} - info = get_config_dict(['system', 'login', 'user', user], - get_first_key=True) + info = get_config_dict(['system', 'login', 'user', user], get_first_key=True) if not info: - raise ValueError("No such user") + raise ValueError('No such user') user_info['user'] = user user_info['full_name'] = info.get('full-name', '') return user_info + class Session: """ Wrapper for calling configsession functions based on GraphQL requests. Non-nullable fields in the respective schema allow avoiding a key check in 'data'. """ + def __init__(self, session, data): self._session = session self._data = data @@ -138,7 +149,6 @@ class Session: return res def show_user_info(self): - session = self._session data = self._data user_info = {} @@ -151,10 +161,9 @@ class Session: return user_info def system_status(self): - import api.graphql.session.composite.system_status as system_status + from api.graphql.session.composite import system_status session = self._session - data = self._data status = {} status['host_name'] = session.show(['host', 'name']).strip() @@ -165,7 +174,6 @@ class Session: return status def gen_op_query(self): - session = self._session data = self._data name = self._name op_mode_list = self._op_mode_list @@ -189,7 +197,6 @@ class Session: return res def gen_op_mutation(self): - session = self._session data = self._data name = self._name op_mode_list = self._op_mode_list diff --git a/src/services/api/graphql/state.py b/src/services/api/graphql/state.py deleted file mode 100644 index 63db9f4ef..000000000 --- a/src/services/api/graphql/state.py +++ /dev/null @@ -1,4 +0,0 @@ - -def init(): - global settings - settings = {} diff --git a/src/services/api/rest/__init__.py b/src/services/api/rest/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/services/api/rest/__init__.py diff --git a/src/services/api/rest/models.py b/src/services/api/rest/models.py new file mode 100644 index 000000000..dda50010f --- /dev/null +++ b/src/services/api/rest/models.py @@ -0,0 +1,320 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + + +# pylint: disable=too-few-public-methods + +import json +from html import escape +from enum import Enum +from typing import List +from typing import Union +from typing import Dict +from typing import Self + +from pydantic import BaseModel +from pydantic import StrictStr +from pydantic import field_validator +from pydantic import model_validator +from fastapi.responses import HTMLResponse + + +def error(code, msg): + msg = escape(msg, quote=False) + resp = {'success': False, 'error': msg, 'data': None} + resp = json.dumps(resp) + return HTMLResponse(resp, status_code=code) + + +def success(data): + resp = {'success': True, 'data': data, 'error': None} + resp = json.dumps(resp) + return HTMLResponse(resp) + + +# Pydantic models for validation +# Pydantic will cast when possible, so use StrictStr validators added as +# needed for additional constraints +# json_schema_extra adds anotations to OpenAPI to add examples + + +class ApiModel(BaseModel): + key: StrictStr + + +class BasePathModel(BaseModel): + op: StrictStr + path: List[StrictStr] + + @field_validator('path') + @classmethod + def check_non_empty(cls, path: str) -> str: + if not len(path) > 0: + raise ValueError('path must be non-empty') + return path + + +class BaseConfigureModel(BasePathModel): + value: StrictStr = None + + +class ConfigureModel(ApiModel, BaseConfigureModel): + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'set | delete | comment', + 'path': ['config', 'mode', 'path'], + } + } + + +class ConfigureListModel(ApiModel): + commands: List[BaseConfigureModel] + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'commands': 'list of commands', + } + } + + +class BaseConfigSectionModel(BasePathModel): + section: Dict + + +class ConfigSectionModel(ApiModel, BaseConfigSectionModel): + pass + + +class ConfigSectionListModel(ApiModel): + commands: List[BaseConfigSectionModel] + + +class BaseConfigSectionTreeModel(BaseModel): + op: StrictStr + mask: Dict + config: Dict + + +class ConfigSectionTreeModel(ApiModel, BaseConfigSectionTreeModel): + pass + + +class RetrieveModel(ApiModel): + op: StrictStr + path: List[StrictStr] + configFormat: StrictStr = None + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'returnValue | returnValues | exists | showConfig', + 'path': ['config', 'mode', 'path'], + 'configFormat': 'json (default) | json_ast | raw', + } + } + + +class ConfigFileModel(ApiModel): + op: StrictStr + file: StrictStr = None + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'save | load', + 'file': 'filename', + } + } + + +class ImageOp(str, Enum): + add = 'add' + delete = 'delete' + show = 'show' + set_default = 'set_default' + + +class ImageModel(ApiModel): + op: ImageOp + url: StrictStr = None + name: StrictStr = None + + @model_validator(mode='after') + def check_data(self) -> Self: + if self.op == 'add': + if not self.url: + raise ValueError('Missing required field "url"') + elif self.op in ['delete', 'set_default']: + if not self.name: + raise ValueError('Missing required field "name"') + + return self + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'add | delete | show | set_default', + 'url': 'imagelocation', + 'name': 'imagename', + } + } + + +class ImportPkiModel(ApiModel): + op: StrictStr + path: List[StrictStr] + passphrase: StrictStr = None + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'import_pki', + 'path': ['op', 'mode', 'path'], + 'passphrase': 'passphrase', + } + } + + +class ContainerImageModel(ApiModel): + op: StrictStr + name: StrictStr = None + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'add | delete | show', + 'name': 'imagename', + } + } + + +class GenerateModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'generate', + 'path': ['op', 'mode', 'path'], + } + } + + +class ShowModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'show', + 'path': ['op', 'mode', 'path'], + } + } + + +class RebootModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'reboot', + 'path': ['op', 'mode', 'path'], + } + } + + +class ResetModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'reset', + 'path': ['op', 'mode', 'path'], + } + } + + +class PoweroffModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + json_schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'poweroff', + 'path': ['op', 'mode', 'path'], + } + } + + +class TracerouteModel(ApiModel): + op: StrictStr + host: StrictStr + + class Config: + schema_extra = { + 'example': { + 'key': 'id_key', + 'op': 'traceroute', + 'host': 'host', + } + } + + +class InfoQueryParams(BaseModel): + model_config = {"extra": "forbid"} + + version: bool = True + hostname: bool = True + + +class Success(BaseModel): + success: bool + data: Union[str, bool, Dict] + error: str + + +class Error(BaseModel): + success: bool = False + data: Union[str, bool, Dict] + error: str + + +responses = { + 200: {'model': Success}, + 400: {'model': Error}, + 422: {'model': Error, 'description': 'Validation Error'}, + 500: {'model': Error}, +} diff --git a/src/services/api/rest/routers.py b/src/services/api/rest/routers.py new file mode 100644 index 000000000..e52c77fda --- /dev/null +++ b/src/services/api/rest/routers.py @@ -0,0 +1,778 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + + +# pylint: disable=line-too-long,raise-missing-from,invalid-name +# pylint: disable=wildcard-import,unused-wildcard-import +# pylint: disable=broad-exception-caught + +import json +import copy +import logging +import traceback +from threading import Lock +from typing import Union +from typing import Callable +from typing import TYPE_CHECKING + +from fastapi import Depends +from fastapi import Request +from fastapi import Response +from fastapi import HTTPException +from fastapi import APIRouter +from fastapi import BackgroundTasks +from fastapi.routing import APIRoute +from starlette.datastructures import FormData +from starlette.formparsers import FormParser +from starlette.formparsers import MultiPartParser +from starlette.formparsers import MultiPartException +from multipart.multipart import parse_options_header + +from vyos.config import Config +from vyos.configtree import ConfigTree +from vyos.configdiff import get_config_diff +from vyos.configsession import ConfigSessionError + +from ..session import SessionState +from .models import success +from .models import error +from .models import responses +from .models import ApiModel +from .models import ConfigureModel +from .models import ConfigureListModel +from .models import ConfigSectionModel +from .models import ConfigSectionListModel +from .models import ConfigSectionTreeModel +from .models import BaseConfigSectionTreeModel +from .models import BaseConfigureModel +from .models import BaseConfigSectionModel +from .models import RetrieveModel +from .models import ConfigFileModel +from .models import ImageModel +from .models import ContainerImageModel +from .models import GenerateModel +from .models import ShowModel +from .models import RebootModel +from .models import ResetModel +from .models import ImportPkiModel +from .models import PoweroffModel +from .models import TracerouteModel + + +if TYPE_CHECKING: + from fastapi import FastAPI + + +LOG = logging.getLogger('http_api.routers') + +lock = Lock() + + +def check_auth(key_list, key): + key_id = None + for k in key_list: + if k['key'] == key: + key_id = k['id'] + return key_id + + +def auth_required(data: ApiModel): + session = SessionState() + key = data.key + api_keys = session.keys + key_id = check_auth(api_keys, key) + if not key_id: + raise HTTPException(status_code=401, detail='Valid API key is required') + session.id = key_id + + +# override Request and APIRoute classes in order to convert form request to json; +# do all explicit validation here, for backwards compatability of error messages; +# the explicit validation may be dropped, if desired, in favor of native +# validation by FastAPI/Pydantic, as is used for application/json requests +class MultipartRequest(Request): + """Override Request class to convert form request to json""" + + # pylint: disable=attribute-defined-outside-init + # pylint: disable=too-many-branches,too-many-statements + + _form_err = () + + @property + def form_err(self): + return self._form_err + + @form_err.setter + def form_err(self, val): + if not self._form_err: + self._form_err = val + + @property + def orig_headers(self): + self._orig_headers = super().headers + return self._orig_headers + + @property + def headers(self): + self._headers = super().headers.mutablecopy() + self._headers['content-type'] = 'application/json' + return self._headers + + async def _get_form( + self, *, max_files: int | float = 1000, max_fields: int | float = 1000 + ) -> FormData: + if self._form is None: + assert ( + parse_options_header is not None + ), 'The `python-multipart` library must be installed to use form parsing.' + content_type_header = self.orig_headers.get('Content-Type') + content_type: bytes + content_type, _ = parse_options_header(content_type_header) + if content_type == b'multipart/form-data': + try: + multipart_parser = MultiPartParser( + self.orig_headers, + self.stream(), + max_files=max_files, + max_fields=max_fields, + ) + self._form = await multipart_parser.parse() + except MultiPartException as exc: + if 'app' in self.scope: + raise HTTPException(status_code=400, detail=exc.message) + raise exc + elif content_type == b'application/x-www-form-urlencoded': + form_parser = FormParser(self.orig_headers, self.stream()) + self._form = await form_parser.parse() + else: + self._form = FormData() + return self._form + + async def body(self) -> bytes: + if not hasattr(self, '_body'): + forms = {} + merge = {} + body = await super().body() + self._body = body + + form_data = await self.form() + if form_data: + endpoint = self.url.path + LOG.debug('processing form data') + for k, v in form_data.multi_items(): + forms[k] = v + + if 'data' not in forms: + self.form_err = (422, 'Non-empty data field is required') + return self._body + try: + tmp = json.loads(forms['data']) + except json.JSONDecodeError as e: + self.form_err = (400, f'Failed to parse JSON: {e}') + return self._body + if isinstance(tmp, list): + merge['commands'] = tmp + else: + merge = tmp + + if 'commands' in merge: + cmds = merge['commands'] + else: + cmds = copy.deepcopy(merge) + cmds = [cmds] + + for c in cmds: + if not isinstance(c, dict): + self.form_err = ( + 400, + f"Malformed command '{c}': any command must be JSON of dict", + ) + return self._body + if 'op' not in c: + self.form_err = ( + 400, + f"Malformed command '{c}': missing 'op' field", + ) + if endpoint not in ( + '/config-file', + '/container-image', + '/image', + '/configure-section', + '/traceroute', + ): + if 'path' not in c: + self.form_err = ( + 400, + f"Malformed command '{c}': missing 'path' field", + ) + elif not isinstance(c['path'], list): + self.form_err = ( + 400, + f"Malformed command '{c}': 'path' field must be a list", + ) + elif not all(isinstance(el, str) for el in c['path']): + self.form_err = ( + 400, + f"Malformed command '{0}': 'path' field must be a list of strings", + ) + if endpoint in ('/configure'): + if not c['path']: + self.form_err = ( + 400, + f"Malformed command '{c}': 'path' list must be non-empty", + ) + if 'value' in c and not isinstance(c['value'], str): + self.form_err = ( + 400, + f"Malformed command '{c}': 'value' field must be a string", + ) + if endpoint in ('/configure-section'): + if 'section' not in c and 'config' not in c: + self.form_err = ( + 400, + f"Malformed command '{c}': missing 'section' or 'config' field", + ) + + if 'key' not in forms and 'key' not in merge: + self.form_err = (401, 'Valid API key is required') + if 'key' in forms and 'key' not in merge: + merge['key'] = forms['key'] + + new_body = json.dumps(merge) + new_body = new_body.encode() + self._body = new_body + + return self._body + + +class MultipartRoute(APIRoute): + """Override APIRoute class to convert form request to json""" + + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + request = MultipartRequest(request.scope, request.receive) + try: + response: Response = await original_route_handler(request) + except HTTPException as e: + return error(e.status_code, e.detail) + except Exception as e: + form_err = request.form_err + if form_err: + return error(*form_err) + raise e + + return response + + return custom_route_handler + + +router = APIRouter( + route_class=MultipartRoute, + responses={**responses}, + dependencies=[Depends(auth_required)], +) + + +self_ref_msg = 'Requested HTTP API server configuration change; commit will be called in the background' + + +def call_commit(s: SessionState): + try: + s.session.commit() + except ConfigSessionError as e: + s.session.discard() + if s.debug: + LOG.warning(f'ConfigSessionError:\n {traceback.format_exc()}') + else: + LOG.warning(f'ConfigSessionError: {e}') + + +def _configure_op( + data: Union[ + ConfigureModel, + ConfigureListModel, + ConfigSectionModel, + ConfigSectionListModel, + ConfigSectionTreeModel, + ], + _request: Request, + background_tasks: BackgroundTasks, +): + # pylint: disable=too-many-branches,too-many-locals,too-many-nested-blocks,too-many-statements + # pylint: disable=consider-using-with + + state = SessionState() + session = state.session + env = session.get_session_env() + + # Allow users to pass just one command + if not isinstance(data, (ConfigureListModel, ConfigSectionListModel)): + data = [data] + else: + data = data.commands + + # We don't want multiple people/apps to be able to commit at once, + # or modify the shared session while someone else is doing the same, + # so the lock is really global + lock.acquire() + + config = Config(session_env=env) + + status = 200 + msg = None + error_msg = None + try: + for c in data: + op = c.op + if not isinstance(c, BaseConfigSectionTreeModel): + path = c.path + + if isinstance(c, BaseConfigureModel): + if c.value: + value = c.value + else: + value = '' + # For vyos.configsession calls that have no separate value arguments, + # and for type checking too + cfg_path = ' '.join(path + [value]).strip() + + elif isinstance(c, BaseConfigSectionModel): + section = c.section + + elif isinstance(c, BaseConfigSectionTreeModel): + mask = c.mask + config = c.config + + if isinstance(c, BaseConfigureModel): + if op == 'set': + session.set(path, value=value) + elif op == 'delete': + if state.strict and not config.exists(cfg_path): + raise ConfigSessionError( + f'Cannot delete [{cfg_path}]: path/value does not exist' + ) + session.delete(path, value=value) + elif op == 'comment': + session.comment(path, value=value) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + + elif isinstance(c, BaseConfigSectionModel): + if op == 'set': + session.set_section(path, section) + elif op == 'load': + session.load_section(path, section) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + + elif isinstance(c, BaseConfigSectionTreeModel): + if op == 'set': + session.set_section_tree(config) + elif op == 'load': + session.load_section_tree(mask, config) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + # end for + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, state) + msg = self_ref_msg + else: + # capture non-fatal warnings + out = session.commit() + msg = out if out else msg + + LOG.info(f"Configuration modified via HTTP API using key '{state.id}'") + except ConfigSessionError as e: + session.discard() + status = 400 + if state.debug: + LOG.critical(f'ConfigSessionError:\n {traceback.format_exc()}') + error_msg = str(e) + except Exception: + session.discard() + LOG.critical(traceback.format_exc()) + status = 500 + + # Don't give the details away to the outer world + error_msg = 'An internal error occured. Check the logs for details.' + finally: + lock.release() + + if status != 200: + return error(status, error_msg) + + return success(msg) + + +def create_path_import_pki_no_prompt(path): + correct_paths = ['ca', 'certificate', 'key-pair'] + if path[1] not in correct_paths: + return False + path[3] = '--key-filename' + path.insert(2, '--name') + return ['--pki-type'] + path[1:] + + +@router.post('/configure') +def configure_op( + data: Union[ConfigureModel, ConfigureListModel], + request: Request, + background_tasks: BackgroundTasks, +): + return _configure_op(data, request, background_tasks) + + +@router.post('/configure-section') +def configure_section_op( + data: Union[ConfigSectionModel, ConfigSectionListModel, ConfigSectionTreeModel], + request: Request, + background_tasks: BackgroundTasks, +): + return _configure_op(data, request, background_tasks) + + +@router.post('/retrieve') +async def retrieve_op(data: RetrieveModel): + state = SessionState() + session = state.session + env = session.get_session_env() + config = Config(session_env=env) + + op = data.op + path = ' '.join(data.path) + + try: + if op == 'returnValue': + res = config.return_value(path) + elif op == 'returnValues': + res = config.return_values(path) + elif op == 'exists': + res = config.exists(path) + elif op == 'showConfig': + config_format = 'json' + if data.configFormat: + config_format = data.configFormat + + res = session.show_config(path=data.path) + if config_format == 'json': + config_tree = ConfigTree(res) + res = json.loads(config_tree.to_json()) + elif config_format == 'json_ast': + config_tree = ConfigTree(res) + res = json.loads(config_tree.to_json_ast()) + elif config_format == 'raw': + pass + else: + return error(400, f"'{config_format}' is not a valid config format") + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/config-file') +def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): + state = SessionState() + session = state.session + env = session.get_session_env() + op = data.op + msg = None + + try: + if op == 'save': + if data.file: + path = data.file + else: + path = '/config/config.boot' + msg = session.save_config(path) + elif op == 'load': + if data.file: + path = data.file + else: + return error(400, 'Missing required field "file"') + + session.migrate_and_load_config(path) + + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, state) + msg = self_ref_msg + else: + session.commit() + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(msg) + + +@router.post('/image') +def image_op(data: ImageModel): + state = SessionState() + session = state.session + + op = data.op + + try: + if op == 'add': + res = session.install_image(data.url) + elif op == 'delete': + res = session.remove_image(data.name) + elif op == 'show': + res = session.show(['system', 'image']) + elif op == 'set_default': + res = session.set_default_image(data.name) + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/container-image') +def container_image_op(data: ContainerImageModel): + state = SessionState() + session = state.session + + op = data.op + + try: + if op == 'add': + if data.name: + name = data.name + else: + return error(400, 'Missing required field "name"') + res = session.add_container_image(name) + elif op == 'delete': + if data.name: + name = data.name + else: + return error(400, 'Missing required field "name"') + res = session.delete_container_image(name) + elif op == 'show': + res = session.show_container_image() + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/generate') +def generate_op(data: GenerateModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'generate': + res = session.generate(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/show') +def show_op(data: ShowModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'show': + res = session.show(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/reboot') +def reboot_op(data: RebootModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'reboot': + res = session.reboot(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/reset') +def reset_op(data: ResetModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'reset': + res = session.reset(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/import-pki') +def import_pki(data: ImportPkiModel): + # pylint: disable=consider-using-with + + state = SessionState() + session = state.session + + op = data.op + path = data.path + + lock.acquire() + + try: + if op == 'import-pki': + # need to get rid or interactive mode for private key + if len(path) == 5 and path[3] in ['key-file', 'private-key']: + path_no_prompt = create_path_import_pki_no_prompt(path) + if not path_no_prompt: + return error(400, f"Invalid command: {' '.join(path)}") + if data.passphrase: + path_no_prompt += ['--passphrase', data.passphrase] + res = session.import_pki_no_prompt(path_no_prompt) + else: + res = session.import_pki(path) + if not res[0].isdigit(): + return error(400, res) + # commit changes + session.commit() + res = res.split('. ')[0] + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + finally: + lock.release() + + return success(res) + + +@router.post('/poweroff') +def poweroff_op(data: PoweroffModel): + state = SessionState() + session = state.session + + op = data.op + path = data.path + + try: + if op == 'poweroff': + res = session.poweroff(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') + + return success(res) + + +@router.post('/traceroute') +def traceroute_op(data: TracerouteModel): + state = SessionState() + session = state.session + + op = data.op + host = data.host + + try: + if op == 'traceroute': + res = session.traceroute(host) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occurred. Check the logs for details.') + + return success(res) + + +def rest_init(app: 'FastAPI'): + if all(r in app.routes for r in router.routes): + return + app.include_router(router) + + +def rest_clear(app: 'FastAPI'): + for r in router.routes: + if r in app.routes: + app.routes.remove(r) diff --git a/src/services/api/session.py b/src/services/api/session.py new file mode 100644 index 000000000..ad3ef660c --- /dev/null +++ b/src/services/api/session.py @@ -0,0 +1,41 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + + +class SessionState: + # pylint: disable=attribute-defined-outside-init + # pylint: disable=too-many-instance-attributes,too-few-public-methods + + _instance = None + + def __new__(cls): + if cls._instance is None: + cls._instance = super(SessionState, cls).__new__(cls) + cls._instance._initialize() + return cls._instance + + def _initialize(self): + self.session = None + self.keys = [] + self.id = None + self.rest = False + self.debug = False + self.strict = False + self.graphql = False + self.origins = [] + self.introspection = False + self.auth_type = None + self.token_exp = None + self.secret_len = None diff --git a/src/services/vyos-commitd b/src/services/vyos-commitd new file mode 100755 index 000000000..e7f2d82c7 --- /dev/null +++ b/src/services/vyos-commitd @@ -0,0 +1,457 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +import os +import sys +import grp +import json +import signal +import socket +import typing +import logging +import traceback +import importlib.util +import io +from contextlib import redirect_stdout +from dataclasses import dataclass +from dataclasses import fields +from dataclasses import field +from dataclasses import asdict +from pathlib import Path + +import tomli + +from google.protobuf.json_format import MessageToDict +from google.protobuf.json_format import ParseDict + +from vyos.defaults import directories +from vyos.utils.boot import boot_configuration_complete +from vyos.configsource import ConfigSourceCache +from vyos.configsource import ConfigSourceError +from vyos.config import Config +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict +from vyos import ConfigError + +from vyos.proto import vycall_pb2 + + +@dataclass +class Status: + success: bool = False + out: str = '' + + +@dataclass +class Call: + script_name: str = '' + tag_value: str = None + arg_value: str = None + reply: Status = None + + def set_reply(self, success: bool, out: str): + self.reply = Status(success=success, out=out) + + +@dataclass +class Session: + # pylint: disable=too-many-instance-attributes + + session_id: str = '' + dry_run: bool = False + atomic: bool = False + background: bool = False + config: Config = None + init: Status = None + calls: list[Call] = field(default_factory=list) + + def set_init(self, success: bool, out: str): + self.init = Status(success=success, out=out) + + +@dataclass +class ServerConf: + commitd_socket: str = '' + session_dir: str = '' + running_cache: str = '' + session_cache: str = '' + + +server_conf = None +SOCKET_PATH = None +conf_mode_scripts = None +frr = None + +CFG_GROUP = 'vyattacfg' + +script_stdout_log = '/tmp/vyos-commitd-script-stdout' + +debug = True + +logger = logging.getLogger(__name__) +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) + +if debug: + logger.setLevel(logging.DEBUG) +else: + logger.setLevel(logging.INFO) + + +vyos_conf_scripts_dir = directories['conf_mode'] +commitd_include_file = os.path.join(directories['data'], 'configd-include.json') + + +def key_name_from_file_name(f): + return os.path.splitext(f)[0] + + +def module_name_from_key(k): + return k.replace('-', '_') + + +def path_from_file_name(f): + return os.path.join(vyos_conf_scripts_dir, f) + + +def load_conf_mode_scripts(): + with open(commitd_include_file) as f: + try: + include = json.load(f) + except OSError as e: + logger.critical(f'configd include file error: {e}') + sys.exit(1) + except json.JSONDecodeError as e: + logger.critical(f'JSON load error: {e}') + sys.exit(1) + + # import conf_mode scripts + (_, _, filenames) = next(iter(os.walk(vyos_conf_scripts_dir))) + filenames.sort() + + # this is redundant, as all scripts are currently in the include file; + # leave it as an inexpensive check for future changes + load_filenames = [f for f in filenames if f in include] + imports = [key_name_from_file_name(f) for f in load_filenames] + module_names = [module_name_from_key(k) for k in imports] + paths = [path_from_file_name(f) for f in load_filenames] + to_load = list(zip(module_names, paths)) + + modules = [] + + for x in to_load: + spec = importlib.util.spec_from_file_location(x[0], x[1]) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + modules.append(module) + + scripts = dict(zip(imports, modules)) + + return scripts + + +def get_session_out(session: Session) -> str: + out = '' + if session.init and session.init.out: + out = f'{out} + init: {session.init.out} + \n' + for call in session.calls: + reply = call.reply + if reply and reply.out: + out = f'{out} + {call.script_name}: {reply.out} + \n' + return out + + +def write_stdout_log(file_name, session): + if boot_configuration_complete(): + return + with open(file_name, 'a') as f: + f.write(get_session_out(session)) + + +def msg_to_commit_data(msg: vycall_pb2.Commit) -> Session: + # pylint: disable=no-member + + d = MessageToDict(msg, preserving_proto_field_name=True) + + # wrap in dataclasses + session = Session(**d) + session.init = Status(**session.init) if session.init else None + session.calls = list(map(lambda x: Call(**x), session.calls)) + for call in session.calls: + call.reply = Status(**call.reply) if call.reply else None + + return session + + +def commit_data_to_msg(obj: Session) -> vycall_pb2.Commit: + # pylint: disable=no-member + + # avoid asdict attempt of deepcopy on Config obj + obj.config = None + + msg = vycall_pb2.Commit() + msg = ParseDict(asdict(obj), msg, ignore_unknown_fields=True) + + return msg + + +def initialization(session: Session) -> Session: + running_cache = os.path.join(server_conf.session_dir, server_conf.running_cache) + session_cache = os.path.join(server_conf.session_dir, server_conf.session_cache) + try: + configsource = ConfigSourceCache( + running_config_cache=running_cache, + session_config_cache=session_cache, + ) + except ConfigSourceError as e: + fail_msg = f'Failed to read config caches: {e}' + logger.critical(fail_msg) + session.set_init(False, fail_msg) + return session + + session.set_init(True, '') + + config = Config(config_source=configsource) + + dependent_func: dict[str, list[typing.Callable]] = {} + setattr(config, 'dependent_func', dependent_func) + + scripts_called = [] + setattr(config, 'scripts_called', scripts_called) + + dry_run = session.dry_run + config.set_bool_attr('dry_run', dry_run) + logger.debug(f'commit dry_run is {dry_run}') + + session.config = config + + return session + + +def run_script(script_name: str, config: Config, args: list) -> tuple[bool, str]: + # pylint: disable=broad-exception-caught + + script = conf_mode_scripts[script_name] + script.argv = args + config.set_level([]) + dry_run = config.get_bool_attr('dry_run') + try: + c = script.get_config(config) + script.verify(c) + if not dry_run: + script.generate(c) + script.apply(c) + else: + if hasattr(script, 'call_dependents'): + script.call_dependents() + except ConfigError as e: + logger.error(e) + return False, str(e) + except Exception: + tb = traceback.format_exc() + logger.error(tb) + return False, tb + + return True, '' + + +def process_call_data(call: Call, config: Config, last: bool = False) -> None: + # pylint: disable=too-many-locals + + script_name = key_name_from_file_name(call.script_name) + + if script_name not in conf_mode_scripts: + fail_msg = f'No such script: {call.script_name}' + logger.critical(fail_msg) + call.set_reply(False, fail_msg) + return + + config.dependency_list.clear() + + tag_value = call.tag_value if call.tag_value is not None else '' + os.environ['VYOS_TAGNODE_VALUE'] = tag_value + + args = call.arg_value.split() if call.arg_value else [] + args.insert(0, f'{script_name}.py') + + tag_ext = f'_{tag_value}' if tag_value else '' + script_record = f'{script_name}{tag_ext}' + scripts_called = getattr(config, 'scripts_called', []) + scripts_called.append(script_record) + + with redirect_stdout(io.StringIO()) as o: + success, err_out = run_script(script_name, config, args) + amb_out = o.getvalue() + o.close() + + out = amb_out + err_out + + call.set_reply(success, out) + + logger.info(f'[{script_name}] {out}') + + if last: + scripts_called = getattr(config, 'scripts_called', []) + logger.debug(f'scripts_called: {scripts_called}') + + if last and success: + tmp = get_frrender_dict(config) + if frr.generate(tmp): + # only apply a new FRR configuration if anything changed + # in comparison to the previous applied configuration + frr.apply() + + +def process_session_data(session: Session) -> Session: + if session.init is None or not session.init.success: + return session + + config = session.config + len_calls = len(session.calls) + for index, call in enumerate(session.calls): + process_call_data(call, config, last=len_calls == index + 1) + + return session + + +def read_message(msg: bytes) -> Session: + """Read message into Session instance""" + + message = vycall_pb2.Commit() # pylint: disable=no-member + message.ParseFromString(msg) + session = msg_to_commit_data(message) + + session = initialization(session) + session = process_session_data(session) + + write_stdout_log(script_stdout_log, session) + + return session + + +def write_reply(session: Session) -> bytearray: + """Serialize modified object to bytearray, prepending data length + header""" + + reply = commit_data_to_msg(session) + encoded_data = reply.SerializeToString() + byte_size = reply.ByteSize() + length_bytes = byte_size.to_bytes(4) + arr = bytearray(length_bytes) + arr.extend(encoded_data) + + return arr + + +def load_server_conf() -> ServerConf: + # pylint: disable=import-outside-toplevel + # pylint: disable=broad-exception-caught + from vyos.defaults import vyconfd_conf + + try: + with open(vyconfd_conf, 'rb') as f: + vyconfd_conf_d = tomli.load(f) + + except Exception as e: + logger.critical(f'Failed to open the vyconfd.conf file {vyconfd_conf}: {e}') + sys.exit(1) + + app = vyconfd_conf_d.get('appliance', {}) + + conf_data = { + k: v for k, v in app.items() if k in [_.name for _ in fields(ServerConf)] + } + + conf = ServerConf(**conf_data) + + return conf + + +def remove_if_exists(f: str): + try: + os.unlink(f) + except FileNotFoundError: + pass + + +def sig_handler(_signum, _frame): + logger.info('stopping server') + raise KeyboardInterrupt + + +def run_server(): + # pylint: disable=global-statement + + global server_conf + global SOCKET_PATH + global conf_mode_scripts + global frr + + signal.signal(signal.SIGTERM, sig_handler) + signal.signal(signal.SIGINT, sig_handler) + + logger.info('starting server') + + server_conf = load_server_conf() + SOCKET_PATH = server_conf.commitd_socket + conf_mode_scripts = load_conf_mode_scripts() + + cfg_group = grp.getgrnam(CFG_GROUP) + os.setgid(cfg_group.gr_gid) + + server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + remove_if_exists(SOCKET_PATH) + server_socket.bind(SOCKET_PATH) + Path(SOCKET_PATH).chmod(0o775) + + # We only need one long-lived instance of FRRender + frr = FRRender() + + server_socket.listen(2) + while True: + try: + conn, _ = server_socket.accept() + logger.debug('connection accepted') + while True: + # receive size of data + data_length = conn.recv(4) + if not data_length: + logger.debug('no data') + # if no data break + break + + length = int.from_bytes(data_length) + # receive data + data = conn.recv(length) + + session = read_message(data) + reply = write_reply(session) + conn.sendall(reply) + + conn.close() + logger.debug('connection closed') + + except KeyboardInterrupt: + break + + server_socket.close() + sys.exit(0) + + +if __name__ == '__main__': + run_server() diff --git a/src/services/vyos-configd b/src/services/vyos-configd index 3674d9627..28acccd2c 100755 --- a/src/services/vyos-configd +++ b/src/services/vyos-configd @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +# pylint: disable=redefined-outer-name + import os import sys import grp @@ -22,9 +24,13 @@ import json import typing import logging import signal +import traceback import importlib.util +import io +from contextlib import redirect_stdout +from enum import Enum + import zmq -from contextlib import contextmanager from vyos.defaults import directories from vyos.utils.boot import boot_configuration_complete @@ -32,6 +38,8 @@ from vyos.configsource import ConfigSourceString from vyos.configsource import ConfigSourceError from vyos.configdiff import get_commit_scripts from vyos.config import Config +from vyos.frrender import FRRender +from vyos.frrender import get_frrender_dict from vyos import ConfigError CFG_GROUP = 'vyattacfg' @@ -49,13 +57,18 @@ if debug: else: logger.setLevel(logging.INFO) -SOCKET_PATH = "ipc:///run/vyos-configd.sock" +SOCKET_PATH = 'ipc:///run/vyos-configd.sock' +MAX_MSG_SIZE = 65535 +PAD_MSG_SIZE = 6 + # Response error codes -R_SUCCESS = 1 -R_ERROR_COMMIT = 2 -R_ERROR_DAEMON = 4 -R_PASS = 8 +class Response(Enum): + SUCCESS = 1 + ERROR_COMMIT = 2 + ERROR_DAEMON = 4 + PASS = 8 + vyos_conf_scripts_dir = directories['conf_mode'] configd_include_file = os.path.join(directories['data'], 'configd-include.json') @@ -64,29 +77,31 @@ configd_env_unset_file = os.path.join(directories['data'], 'vyos-configd-env-uns # sourced on entering config session configd_env_file = '/etc/default/vyos-configd-env' -session_out = None -session_mode = None def key_name_from_file_name(f): return os.path.splitext(f)[0] + def module_name_from_key(k): return k.replace('-', '_') + def path_from_file_name(f): return os.path.join(vyos_conf_scripts_dir, f) + # opt-in to be run by daemon with open(configd_include_file) as f: try: include = json.load(f) except OSError as e: - logger.critical(f"configd include file error: {e}") + logger.critical(f'configd include file error: {e}') sys.exit(1) except json.JSONDecodeError as e: - logger.critical(f"JSON load error: {e}") + logger.critical(f'JSON load error: {e}') sys.exit(1) + # import conf_mode scripts (_, _, filenames) = next(iter(os.walk(vyos_conf_scripts_dir))) filenames.sort() @@ -110,31 +125,17 @@ conf_mode_scripts = dict(zip(imports, modules)) exclude_set = {key_name_from_file_name(f) for f in filenames if f not in include} include_set = {key_name_from_file_name(f) for f in filenames if f in include} -@contextmanager -def stdout_redirected(filename, mode): - saved_stdout_fd = None - destination_file = None - try: - sys.stdout.flush() - saved_stdout_fd = os.dup(sys.stdout.fileno()) - destination_file = open(filename, mode) - os.dup2(destination_file.fileno(), sys.stdout.fileno()) - yield - finally: - if saved_stdout_fd is not None: - os.dup2(saved_stdout_fd, sys.stdout.fileno()) - os.close(saved_stdout_fd) - if destination_file is not None: - destination_file.close() - -def explicit_print(path, mode, msg): - try: - with open(path, mode) as f: - f.write(f"\n{msg}\n\n") - except OSError: - logger.critical("error explicit_print") -def run_script(script_name, config, args) -> int: +def write_stdout_log(file_name, msg): + if boot_configuration_complete(): + return + with open(file_name, 'a') as f: + f.write(msg) + + +def run_script(script_name, config, args) -> tuple[Response, str]: + # pylint: disable=broad-exception-caught + script = conf_mode_scripts[script_name] script.argv = args config.set_level([]) @@ -145,64 +146,54 @@ def run_script(script_name, config, args) -> int: script.apply(c) except ConfigError as e: logger.error(e) - explicit_print(session_out, session_mode, str(e)) - return R_ERROR_COMMIT - except Exception as e: - logger.critical(e) - return R_ERROR_DAEMON + return Response.ERROR_COMMIT, str(e) + except Exception: + tb = traceback.format_exc() + logger.error(tb) + return Response.ERROR_COMMIT, tb + + return Response.SUCCESS, '' - return R_SUCCESS def initialization(socket): - global session_out - global session_mode + # pylint: disable=broad-exception-caught,too-many-locals + # Reset config strings: active_string = '' session_string = '' # check first for resent init msg, in case of client timeout while True: - msg = socket.recv().decode("utf-8", "ignore") + msg = socket.recv().decode('utf-8', 'ignore') try: message = json.loads(msg) - if message["type"] == "init": - resp = "init" + if message['type'] == 'init': + resp = 'init' socket.send(resp.encode()) - except: + except Exception: break # zmq synchronous for ipc from single client: active_string = msg - resp = "active" + resp = 'active' socket.send(resp.encode()) - session_string = socket.recv().decode("utf-8", "ignore") - resp = "session" + session_string = socket.recv().decode('utf-8', 'ignore') + resp = 'session' socket.send(resp.encode()) - pid_string = socket.recv().decode("utf-8", "ignore") - resp = "pid" + pid_string = socket.recv().decode('utf-8', 'ignore') + resp = 'pid' socket.send(resp.encode()) - sudo_user_string = socket.recv().decode("utf-8", "ignore") - resp = "sudo_user" + sudo_user_string = socket.recv().decode('utf-8', 'ignore') + resp = 'sudo_user' socket.send(resp.encode()) - temp_config_dir_string = socket.recv().decode("utf-8", "ignore") - resp = "temp_config_dir" + temp_config_dir_string = socket.recv().decode('utf-8', 'ignore') + resp = 'temp_config_dir' socket.send(resp.encode()) - changes_only_dir_string = socket.recv().decode("utf-8", "ignore") - resp = "changes_only_dir" + changes_only_dir_string = socket.recv().decode('utf-8', 'ignore') + resp = 'changes_only_dir' socket.send(resp.encode()) - logger.debug(f"config session pid is {pid_string}") - logger.debug(f"config session sudo_user is {sudo_user_string}") - - try: - session_out = os.readlink(f"/proc/{pid_string}/fd/1") - session_mode = 'w' - except FileNotFoundError: - session_out = None - - # if not a 'live' session, for example on boot, write to file - if not session_out or not boot_configuration_complete(): - session_out = script_stdout_log - session_mode = 'a' + logger.debug(f'config session pid is {pid_string}') + logger.debug(f'config session sudo_user is {sudo_user_string}') os.environ['SUDO_USER'] = sudo_user_string if temp_config_dir_string: @@ -211,8 +202,9 @@ def initialization(socket): os.environ['VYATTA_CHANGES_ONLY_DIR'] = changes_only_dir_string try: - configsource = ConfigSourceString(running_config_text=active_string, - session_config_text=session_string) + configsource = ConfigSourceString( + running_config_text=active_string, session_config_text=session_string + ) except ConfigSourceError as e: logger.debug(e) return None @@ -229,10 +221,12 @@ def initialization(socket): return config -def process_node_data(config, data, last: bool = False) -> int: + +def process_node_data(config, data, _last: bool = False) -> tuple[Response, str]: if not config: - logger.critical(f"Empty config") - return R_ERROR_DAEMON + out = 'Empty config' + logger.critical(out) + return Response.ERROR_DAEMON, out script_name = None os.environ['VYOS_TAGNODE_VALUE'] = '' @@ -246,8 +240,9 @@ def process_node_data(config, data, last: bool = False) -> int: if res.group(2): script_name = res.group(2) if not script_name: - logger.critical(f"Missing script_name") - return R_ERROR_DAEMON + out = 'Missing script_name' + logger.critical(out) + return Response.ERROR_DAEMON, out if res.group(3): args = res.group(3).split() args.insert(0, f'{script_name}.py') @@ -259,26 +254,46 @@ def process_node_data(config, data, last: bool = False) -> int: scripts_called.append(script_record) if script_name not in include_set: - return R_PASS + return Response.PASS, '' + + with redirect_stdout(io.StringIO()) as o: + result, err_out = run_script(script_name, config, args) + amb_out = o.getvalue() + o.close() + + out = amb_out + err_out + + return result, out + + +def send_result(sock, err, msg): + err_no = err.value + err_name = err.name + msg = msg if msg else '' + msg_size = min(MAX_MSG_SIZE, len(msg)) - with stdout_redirected(session_out, session_mode): - result = run_script(script_name, config, args) + err_rep = err_no.to_bytes(1) + msg_size_rep = f'{msg_size:#0{PAD_MSG_SIZE}x}' + + logger.debug(f'Sending reply: {err_name} with output') + sock.send_multipart([err_rep, msg_size_rep.encode(), msg.encode()]) + + write_stdout_log(script_stdout_log, msg) - return result def remove_if_file(f: str): try: os.remove(f) except FileNotFoundError: pass - except OSError: - raise + def shutdown(): remove_if_file(configd_env_file) os.symlink(configd_env_unset_file, configd_env_file) sys.exit(0) + if __name__ == '__main__': context = zmq.Context() socket = context.socket(zmq.REP) @@ -294,6 +309,7 @@ if __name__ == '__main__': os.environ['VYOS_CONFIGD'] = 't' def sig_handler(signum, frame): + # pylint: disable=unused-argument shutdown() signal.signal(signal.SIGTERM, sig_handler) @@ -303,25 +319,33 @@ if __name__ == '__main__': remove_if_file(configd_env_file) os.symlink(configd_env_set_file, configd_env_file) - config = None + # We only need one long-lived instance of FRRender + frr = FRRender() + config = None while True: # Wait for next request from client msg = socket.recv().decode() - logger.debug(f"Received message: {msg}") + logger.debug(f'Received message: {msg}') message = json.loads(msg) - if message["type"] == "init": - resp = "init" + if message['type'] == 'init': + resp = 'init' socket.send(resp.encode()) config = initialization(socket) - elif message["type"] == "node": - res = process_node_data(config, message["data"], message["last"]) - response = res.to_bytes(1, byteorder=sys.byteorder) - logger.debug(f"Sending response {res}") - socket.send(response) - if message["last"] and config: + elif message['type'] == 'node': + res, out = process_node_data(config, message['data'], message['last']) + send_result(socket, res, out) + + if message['last'] and config: scripts_called = getattr(config, 'scripts_called', []) logger.debug(f'scripts_called: {scripts_called}') + + if res == Response.SUCCESS: + tmp = get_frrender_dict(config) + if frr.generate(tmp): + # only apply a new FRR configuration if anything changed + # in comparison to the previous applied configuration + frr.apply() else: - logger.critical(f"Unexpected message: {message}") + logger.critical(f'Unexpected message: {message}') diff --git a/src/services/vyos-conntrack-logger b/src/services/vyos-conntrack-logger index 9c31b465f..ec0e1f717 100755 --- a/src/services/vyos-conntrack-logger +++ b/src/services/vyos-conntrack-logger @@ -15,10 +15,8 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import argparse -import grp import logging import multiprocessing -import os import queue import signal import socket diff --git a/src/services/vyos-domain-resolver b/src/services/vyos-domain-resolver new file mode 100755 index 000000000..fb18724af --- /dev/null +++ b/src/services/vyos-domain-resolver @@ -0,0 +1,313 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022-2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +import json +import time +import logging +import os + +from vyos.configdict import dict_merge +from vyos.configquery import ConfigTreeQuery +from vyos.firewall import fqdn_config_parse +from vyos.firewall import fqdn_resolve +from vyos.ifconfig import WireGuardIf +from vyos.remote import download +from vyos.utils.commit import commit_in_progress +from vyos.utils.dict import dict_search_args +from vyos.utils.kernel import WIREGUARD_REKEY_AFTER_TIME +from vyos.utils.file import makedir, chmod_775, write_file, read_file +from vyos.utils.network import is_valid_ipv4_address_or_range, is_valid_ipv6_address_or_range +from vyos.utils.process import cmd +from vyos.utils.process import run +from vyos.xml_ref import get_defaults + +base = ['firewall'] +timeout = 300 +cache = False +base_firewall = ['firewall'] +base_nat = ['nat'] +base_interfaces = ['interfaces'] + +firewall_config_dir = "/config/firewall" + +domain_state = {} + +ipv4_tables = { + 'ip vyos_mangle', + 'ip vyos_filter', + 'ip vyos_nat', + 'ip raw' +} + +ipv6_tables = { + 'ip6 vyos_mangle', + 'ip6 vyos_filter', + 'ip6 raw' +} + +logger = logging.getLogger(__name__) +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) +logger.setLevel(logging.INFO) + +def get_config(conf, node): + node_config = conf.get_config_dict(node, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + default_values = get_defaults(node, get_first_key=True) + + node_config = dict_merge(default_values, node_config) + + if node == base_firewall and 'global_options' in node_config: + global_config = node_config['global_options'] + global timeout, cache + + if 'resolver_interval' in global_config: + timeout = int(global_config['resolver_interval']) + + if 'resolver_cache' in global_config: + cache = True + + fqdn_config_parse(node_config, node[0]) + + return node_config + +def resolve(domains, ipv6=False): + global domain_state + + ip_list = set() + + for domain in domains: + resolved = fqdn_resolve(domain, ipv6=ipv6) + + cache_key = f'{domain}_ipv6' if ipv6 else domain + + if resolved and cache: + domain_state[cache_key] = resolved + elif not resolved: + if cache_key not in domain_state: + continue + resolved = domain_state[cache_key] + + ip_list = ip_list | resolved + return ip_list + +def nft_output(table, set_name, ip_list): + output = [f'flush set {table} {set_name}'] + if ip_list: + ip_str = ','.join(ip_list) + output.append(f'add element {table} {set_name} {{ {ip_str} }}') + return output + +def nft_valid_sets(): + try: + valid_sets = [] + sets_json = cmd('nft --json list sets') + sets_obj = json.loads(sets_json) + + for obj in sets_obj['nftables']: + if 'set' in obj: + family = obj['set']['family'] + table = obj['set']['table'] + name = obj['set']['name'] + valid_sets.append((f'{family} {table}', name)) + + return valid_sets + except: + return [] + +def update_remote_group(config): + conf_lines = [] + count = 0 + valid_sets = nft_valid_sets() + + remote_groups = dict_search_args(config, 'group', 'remote_group') + if remote_groups: + # Create directory for list files if necessary + if not os.path.isdir(firewall_config_dir): + makedir(firewall_config_dir, group='vyattacfg') + chmod_775(firewall_config_dir) + + for set_name, remote_config in remote_groups.items(): + if 'url' not in remote_config: + continue + nft_ip_set_name = f'R_{set_name}' + nft_ip6_set_name = f'R6_{set_name}' + + # Create list file if necessary + list_file = os.path.join(firewall_config_dir, f"{nft_ip_set_name}.txt") + if not os.path.exists(list_file): + write_file(list_file, '', user="root", group="vyattacfg", mode=0o644) + + # Attempt to download file, use cached version if download fails + try: + download(list_file, remote_config['url'], raise_error=True) + except: + logger.error(f'Failed to download list-file for {set_name} remote group') + logger.info(f'Using cached list-file for {set_name} remote group') + + # Read list file + ip_list = [] + ip6_list = [] + invalid_list = [] + for line in read_file(list_file).splitlines(): + line_first_word = line.strip().partition(' ')[0] + + if is_valid_ipv4_address_or_range(line_first_word): + ip_list.append(line_first_word) + elif is_valid_ipv6_address_or_range(line_first_word): + ip6_list.append(line_first_word) + else: + if line_first_word[0].isalnum(): + invalid_list.append(line_first_word) + + # Load ip tables + for table in ipv4_tables: + if (table, nft_ip_set_name) in valid_sets: + conf_lines += nft_output(table, nft_ip_set_name, ip_list) + + # Load ip6 tables + for table in ipv6_tables: + if (table, nft_ip6_set_name) in valid_sets: + conf_lines += nft_output(table, nft_ip6_set_name, ip6_list) + + invalid_str = ", ".join(invalid_list) + if invalid_str: + logger.info(f'Invalid address for set {set_name}: {invalid_str}') + + count += 1 + + nft_conf_str = "\n".join(conf_lines) + "\n" + code = run(f'nft --file -', input=nft_conf_str) + + logger.info(f'Updated {count} remote-groups in firewall - result: {code}') + + +def update_fqdn(config, node): + conf_lines = [] + count = 0 + valid_sets = nft_valid_sets() + + if node == 'firewall': + domain_groups = dict_search_args(config, 'group', 'domain_group') + if domain_groups: + for set_name, domain_config in domain_groups.items(): + if 'address' not in domain_config: + continue + nft_set_name = f'D_{set_name}' + domains = domain_config['address'] + + ip_list = resolve(domains, ipv6=False) + for table in ipv4_tables: + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + ip6_list = resolve(domains, ipv6=True) + for table in ipv6_tables: + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip6_list) + count += 1 + + for set_name, domain in config['ip_fqdn'].items(): + table = 'ip vyos_filter' + nft_set_name = f'FQDN_{set_name}' + ip_list = resolve([domain], ipv6=False) + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + count += 1 + + for set_name, domain in config['ip6_fqdn'].items(): + table = 'ip6 vyos_filter' + nft_set_name = f'FQDN_{set_name}' + ip_list = resolve([domain], ipv6=True) + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + count += 1 + + else: + # It's NAT + for set_name, domain in config['ip_fqdn'].items(): + table = 'ip vyos_nat' + nft_set_name = f'FQDN_nat_{set_name}' + ip_list = resolve([domain], ipv6=False) + if (table, nft_set_name) in valid_sets: + conf_lines += nft_output(table, nft_set_name, ip_list) + count += 1 + + nft_conf_str = "\n".join(conf_lines) + "\n" + code = run(f'nft --file -', input=nft_conf_str) + + logger.info(f'Updated {count} sets in {node} - result: {code}') + +def update_interfaces(config, node): + if node == 'interfaces': + wg_interfaces = dict_search_args(config, 'wireguard') + if wg_interfaces: + + peer_public_keys = {} + # for each wireguard interfaces + for interface, wireguard in wg_interfaces.items(): + peer_public_keys[interface] = [] + for peer, peer_config in wireguard['peer'].items(): + # check peer if peer host-name or address is set + if 'host_name' in peer_config or 'address' in peer_config: + # check latest handshake + peer_public_keys[interface].append( + peer_config['public_key'] + ) + + now_time = time.time() + for (interface, check_peer_public_keys) in peer_public_keys.items(): + if len(check_peer_public_keys) == 0: + continue + + intf = WireGuardIf(interface, create=False, debug=False) + handshakes = intf.operational.get_latest_handshakes() + + # WireGuard performs a handshake every WIREGUARD_REKEY_AFTER_TIME + # if data is being transmitted between the peers. If no data is + # transmitted, the handshake will not be initiated unless new + # data begins to flow. Each handshake generates a new session + # key, and the key is rotated at least every 120 seconds or + # upon data transmission after a prolonged silence. + for public_key, handshake_time in handshakes.items(): + if public_key in check_peer_public_keys and ( + handshake_time == 0 + or (now_time - handshake_time > 3*WIREGUARD_REKEY_AFTER_TIME) + ): + intf.operational.reset_peer(public_key=public_key) + +if __name__ == '__main__': + logger.info('VyOS domain resolver') + + count = 1 + while commit_in_progress(): + if ( count % 60 == 0 ): + logger.info(f'Commit still in progress after {count}s - waiting') + count += 1 + time.sleep(1) + + conf = ConfigTreeQuery() + firewall = get_config(conf, base_firewall) + nat = get_config(conf, base_nat) + interfaces = get_config(conf, base_interfaces) + + logger.info(f'interval: {timeout}s - cache: {cache}') + + while True: + update_fqdn(firewall, 'firewall') + update_fqdn(nat, 'nat') + update_remote_group(firewall) + update_interfaces(interfaces, 'interfaces') + time.sleep(timeout) diff --git a/src/services/vyos-hostsd b/src/services/vyos-hostsd index 1ba90471e..44f03586c 100755 --- a/src/services/vyos-hostsd +++ b/src/services/vyos-hostsd @@ -233,10 +233,7 @@ # } import os -import sys -import time import json -import signal import traceback import re import logging @@ -245,7 +242,6 @@ import zmq from voluptuous import Schema, MultipleInvalid, Required, Any from collections import OrderedDict from vyos.utils.file import makedir -from vyos.utils.permission import chown from vyos.utils.permission import chmod_755 from vyos.utils.process import popen from vyos.utils.process import process_named_running diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 97633577d..be3dd5051 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -17,946 +17,135 @@ import os import sys import grp -import copy import json import logging import signal import traceback -import threading -from enum import Enum - from time import sleep -from typing import List, Union, Callable, Dict, Self +from typing import Annotated -from fastapi import FastAPI, Depends, Request, Response, HTTPException -from fastapi import BackgroundTasks -from fastapi.responses import HTMLResponse +from fastapi import FastAPI, Query from fastapi.exceptions import RequestValidationError -from fastapi.routing import APIRoute -from pydantic import BaseModel, StrictStr, validator, model_validator -from starlette.middleware.cors import CORSMiddleware -from starlette.datastructures import FormData -from starlette.formparsers import FormParser, MultiPartParser -from multipart.multipart import parse_options_header from uvicorn import Config as UvicornConfig from uvicorn import Server as UvicornServer -from ariadne.asgi import GraphQL - -from vyos.config import Config -from vyos.configtree import ConfigTree -from vyos.configdiff import get_config_diff from vyos.configsession import ConfigSession -from vyos.configsession import ConfigSessionError from vyos.defaults import api_config_state +from vyos.utils.file import read_file +from vyos.version import get_version -import api.graphql.state +from api.session import SessionState +from api.rest.models import error, InfoQueryParams, success CFG_GROUP = 'vyattacfg' debug = True -logger = logging.getLogger(__name__) +LOG = logging.getLogger('http_api') logs_handler = logging.StreamHandler() -logger.addHandler(logs_handler) +LOG.addHandler(logs_handler) if debug: - logger.setLevel(logging.DEBUG) + LOG.setLevel(logging.DEBUG) else: - logger.setLevel(logging.INFO) + LOG.setLevel(logging.INFO) -# Giant lock! -lock = threading.Lock() def load_server_config(): with open(api_config_state) as f: config = json.load(f) return config -def check_auth(key_list, key): - key_id = None - for k in key_list: - if k['key'] == key: - key_id = k['id'] - return key_id - -def error(code, msg): - resp = {"success": False, "error": msg, "data": None} - resp = json.dumps(resp) - return HTMLResponse(resp, status_code=code) - -def success(data): - resp = {"success": True, "data": data, "error": None} - resp = json.dumps(resp) - return HTMLResponse(resp) - -# Pydantic models for validation -# Pydantic will cast when possible, so use StrictStr -# validators added as needed for additional constraints -# schema_extra adds anotations to OpenAPI, to add examples - -class ApiModel(BaseModel): - key: StrictStr - -class BasePathModel(BaseModel): - op: StrictStr - path: List[StrictStr] - - @validator("path") - def check_non_empty(cls, path): - if not len(path) > 0: - raise ValueError('path must be non-empty') - return path - -class BaseConfigureModel(BasePathModel): - value: StrictStr = None - -class ConfigureModel(ApiModel, BaseConfigureModel): - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "set | delete | comment", - "path": ['config', 'mode', 'path'], - } - } - -class ConfigureListModel(ApiModel): - commands: List[BaseConfigureModel] - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "commands": "list of commands", - } - } - -class BaseConfigSectionModel(BasePathModel): - section: Dict - -class ConfigSectionModel(ApiModel, BaseConfigSectionModel): - pass - -class ConfigSectionListModel(ApiModel): - commands: List[BaseConfigSectionModel] - -class BaseConfigSectionTreeModel(BaseModel): - op: StrictStr - mask: Dict - config: Dict - -class ConfigSectionTreeModel(ApiModel, BaseConfigSectionTreeModel): - pass - -class RetrieveModel(ApiModel): - op: StrictStr - path: List[StrictStr] - configFormat: StrictStr = None - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "returnValue | returnValues | exists | showConfig", - "path": ['config', 'mode', 'path'], - "configFormat": "json (default) | json_ast | raw", - - } - } - -class ConfigFileModel(ApiModel): - op: StrictStr - file: StrictStr = None - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "save | load", - "file": "filename", - } - } - - -class ImageOp(str, Enum): - add = "add" - delete = "delete" - show = "show" - set_default = "set_default" - - -class ImageModel(ApiModel): - op: ImageOp - url: StrictStr = None - name: StrictStr = None - - @model_validator(mode='after') - def check_data(self) -> Self: - if self.op == 'add': - if not self.url: - raise ValueError("Missing required field \"url\"") - elif self.op in ['delete', 'set_default']: - if not self.name: - raise ValueError("Missing required field \"name\"") - - return self - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "add | delete | show | set_default", - "url": "imagelocation", - "name": "imagename", - } - } - -class ImportPkiModel(ApiModel): - op: StrictStr - path: List[StrictStr] - passphrase: StrictStr = None - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "import_pki", - "path": ["op", "mode", "path"], - "passphrase": "passphrase", - } - } - - -class ContainerImageModel(ApiModel): - op: StrictStr - name: StrictStr = None - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "add | delete | show", - "name": "imagename", - } - } - -class GenerateModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "generate", - "path": ["op", "mode", "path"], - } - } - -class ShowModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "show", - "path": ["op", "mode", "path"], - } - } - -class RebootModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "reboot", - "path": ["op", "mode", "path"], - } - } - -class ResetModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "reset", - "path": ["op", "mode", "path"], - } - } - -class PoweroffModel(ApiModel): - op: StrictStr - path: List[StrictStr] - - class Config: - schema_extra = { - "example": { - "key": "id_key", - "op": "poweroff", - "path": ["op", "mode", "path"], - } - } - - -class Success(BaseModel): - success: bool - data: Union[str, bool, Dict] - error: str - -class Error(BaseModel): - success: bool = False - data: Union[str, bool, Dict] - error: str - -responses = { - 200: {'model': Success}, - 400: {'model': Error}, - 422: {'model': Error, 'description': 'Validation Error'}, - 500: {'model': Error} -} - -def auth_required(data: ApiModel): - key = data.key - api_keys = app.state.vyos_keys - key_id = check_auth(api_keys, key) - if not key_id: - raise HTTPException(status_code=401, detail="Valid API key is required") - app.state.vyos_id = key_id - -# override Request and APIRoute classes in order to convert form request to json; -# do all explicit validation here, for backwards compatability of error messages; -# the explicit validation may be dropped, if desired, in favor of native -# validation by FastAPI/Pydantic, as is used for application/json requests -class MultipartRequest(Request): - _form_err = () - @property - def form_err(self): - return self._form_err - - @form_err.setter - def form_err(self, val): - if not self._form_err: - self._form_err = val - - @property - def orig_headers(self): - self._orig_headers = super().headers - return self._orig_headers - - @property - def headers(self): - self._headers = super().headers.mutablecopy() - self._headers['content-type'] = 'application/json' - return self._headers - - async def form(self) -> FormData: - if self._form is None: - assert ( - parse_options_header is not None - ), "The `python-multipart` library must be installed to use form parsing." - content_type_header = self.orig_headers.get("Content-Type") - content_type, options = parse_options_header(content_type_header) - if content_type == b"multipart/form-data": - multipart_parser = MultiPartParser(self.orig_headers, self.stream()) - self._form = await multipart_parser.parse() - elif content_type == b"application/x-www-form-urlencoded": - form_parser = FormParser(self.orig_headers, self.stream()) - self._form = await form_parser.parse() - else: - self._form = FormData() - return self._form - - async def body(self) -> bytes: - if not hasattr(self, "_body"): - forms = {} - merge = {} - body = await super().body() - self._body = body - - form_data = await self.form() - if form_data: - endpoint = self.url.path - logger.debug("processing form data") - for k, v in form_data.multi_items(): - forms[k] = v - - if 'data' not in forms: - self.form_err = (422, "Non-empty data field is required") - return self._body - else: - try: - tmp = json.loads(forms['data']) - except json.JSONDecodeError as e: - self.form_err = (400, f'Failed to parse JSON: {e}') - return self._body - if isinstance(tmp, list): - merge['commands'] = tmp - else: - merge = tmp - - if 'commands' in merge: - cmds = merge['commands'] - else: - cmds = copy.deepcopy(merge) - cmds = [cmds] - - for c in cmds: - if not isinstance(c, dict): - self.form_err = (400, - f"Malformed command '{c}': any command must be JSON of dict") - return self._body - if 'op' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'op' field") - if endpoint not in ('/config-file', '/container-image', - '/image', '/configure-section'): - if 'path' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'path' field") - elif not isinstance(c['path'], list): - self.form_err = (400, - f"Malformed command '{c}': 'path' field must be a list") - elif not all(isinstance(el, str) for el in c['path']): - self.form_err = (400, - f"Malformed command '{0}': 'path' field must be a list of strings") - if endpoint in ('/configure'): - if not c['path']: - self.form_err = (400, - f"Malformed command '{c}': 'path' list must be non-empty") - if 'value' in c and not isinstance(c['value'], str): - self.form_err = (400, - f"Malformed command '{c}': 'value' field must be a string") - if endpoint in ('/configure-section'): - if 'section' not in c and 'config' not in c: - self.form_err = (400, - f"Malformed command '{c}': missing 'section' or 'config' field") - - if 'key' not in forms and 'key' not in merge: - self.form_err = (401, "Valid API key is required") - if 'key' in forms and 'key' not in merge: - merge['key'] = forms['key'] - - new_body = json.dumps(merge) - new_body = new_body.encode() - self._body = new_body - - return self._body - -class MultipartRoute(APIRoute): - def get_route_handler(self) -> Callable: - original_route_handler = super().get_route_handler() - - async def custom_route_handler(request: Request) -> Response: - request = MultipartRequest(request.scope, request.receive) - try: - response: Response = await original_route_handler(request) - except HTTPException as e: - return error(e.status_code, e.detail) - except Exception as e: - form_err = request.form_err - if form_err: - return error(*form_err) - raise e - - return response - - return custom_route_handler app = FastAPI(debug=True, title="VyOS API", - version="0.1.0", - responses={**responses}, - dependencies=[Depends(auth_required)]) + version="0.1.0") -app.router.route_class = MultipartRoute @app.exception_handler(RequestValidationError) -async def validation_exception_handler(request, exc): +async def validation_exception_handler(_request, exc): return error(400, str(exc.errors()[0])) -self_ref_msg = "Requested HTTP API server configuration change; commit will be called in the background" - -def call_commit(s: ConfigSession): - try: - s.commit() - except ConfigSessionError as e: - s.discard() - if app.state.vyos_debug: - logger.warning(f"ConfigSessionError:\n {traceback.format_exc()}") - else: - logger.warning(f"ConfigSessionError: {e}") - -def _configure_op(data: Union[ConfigureModel, ConfigureListModel, - ConfigSectionModel, ConfigSectionListModel, - ConfigSectionTreeModel], - request: Request, background_tasks: BackgroundTasks): - session = app.state.vyos_session - env = session.get_session_env() - - endpoint = request.url.path - - # Allow users to pass just one command - if not isinstance(data, (ConfigureListModel, ConfigSectionListModel)): - data = [data] - else: - data = data.commands - - # We don't want multiple people/apps to be able to commit at once, - # or modify the shared session while someone else is doing the same, - # so the lock is really global - lock.acquire() - - config = Config(session_env=env) - - status = 200 - msg = None - error_msg = None - try: - for c in data: - op = c.op - if not isinstance(c, BaseConfigSectionTreeModel): - path = c.path - - if isinstance(c, BaseConfigureModel): - if c.value: - value = c.value - else: - value = "" - # For vyos.configsession calls that have no separate value arguments, - # and for type checking too - cfg_path = " ".join(path + [value]).strip() - - elif isinstance(c, BaseConfigSectionModel): - section = c.section - - elif isinstance(c, BaseConfigSectionTreeModel): - mask = c.mask - config = c.config - - if isinstance(c, BaseConfigureModel): - if op == 'set': - session.set(path, value=value) - elif op == 'delete': - if app.state.vyos_strict and not config.exists(cfg_path): - raise ConfigSessionError(f"Cannot delete [{cfg_path}]: path/value does not exist") - session.delete(path, value=value) - elif op == 'comment': - session.comment(path, value=value) - else: - raise ConfigSessionError(f"'{op}' is not a valid operation") - - elif isinstance(c, BaseConfigSectionModel): - if op == 'set': - session.set_section(path, section) - elif op == 'load': - session.load_section(path, section) - else: - raise ConfigSessionError(f"'{op}' is not a valid operation") - - elif isinstance(c, BaseConfigSectionTreeModel): - if op == 'set': - session.set_section_tree(config) - elif op == 'load': - session.load_section_tree(mask, config) - else: - raise ConfigSessionError(f"'{op}' is not a valid operation") - # end for - config = Config(session_env=env) - d = get_config_diff(config) - - if d.is_node_changed(['service', 'https']): - background_tasks.add_task(call_commit, session) - msg = self_ref_msg - else: - session.commit() - - logger.info(f"Configuration modified via HTTP API using key '{app.state.vyos_id}'") - except ConfigSessionError as e: - session.discard() - status = 400 - if app.state.vyos_debug: - logger.critical(f"ConfigSessionError:\n {traceback.format_exc()}") - error_msg = str(e) - except Exception as e: - session.discard() - logger.critical(traceback.format_exc()) - status = 500 - - # Don't give the details away to the outer world - error_msg = "An internal error occured. Check the logs for details." - finally: - lock.release() - - if status != 200: - return error(status, error_msg) - - return success(msg) - -def create_path_import_pki_no_prompt(path): - correct_paths = ['ca', 'certificate', 'key-pair'] - if path[1] not in correct_paths: - return False - path[1] = '--' + path[1].replace('-', '') - path[3] = '--key-filename' - return path[1:] - -@app.post('/configure') -def configure_op(data: Union[ConfigureModel, - ConfigureListModel], - request: Request, background_tasks: BackgroundTasks): - return _configure_op(data, request, background_tasks) - -@app.post('/configure-section') -def configure_section_op(data: Union[ConfigSectionModel, - ConfigSectionListModel, - ConfigSectionTreeModel], - request: Request, background_tasks: BackgroundTasks): - return _configure_op(data, request, background_tasks) -@app.post("/retrieve") -async def retrieve_op(data: RetrieveModel): - session = app.state.vyos_session - env = session.get_session_env() - config = Config(session_env=env) +@app.get('/info') +def info(q: Annotated[InfoQueryParams, Query()]): + show_version = q.version + show_hostname = q.hostname - op = data.op - path = " ".join(data.path) + prelogin_file = r'/etc/issue' + hostname_file = r'/etc/hostname' + default = 'Welcome to VyOS' try: - if op == 'returnValue': - res = config.return_value(path) - elif op == 'returnValues': - res = config.return_values(path) - elif op == 'exists': - res = config.exists(path) - elif op == 'showConfig': - config_format = 'json' - if data.configFormat: - config_format = data.configFormat - - res = session.show_config(path=data.path) - if config_format == 'json': - config_tree = ConfigTree(res) - res = json.loads(config_tree.to_json()) - elif config_format == 'json_ast': - config_tree = ConfigTree(res) - res = json.loads(config_tree.to_json_ast()) - elif config_format == 'raw': - pass - else: - return error(400, f"'{config_format}' is not a valid config format") - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/config-file') -def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): - session = app.state.vyos_session - env = session.get_session_env() - op = data.op - msg = None - - try: - if op == 'save': - if data.file: - path = data.file - else: - path = '/config/config.boot' - msg = session.save_config(path) - elif op == 'load': - if data.file: - path = data.file - else: - return error(400, "Missing required field \"file\"") - - session.migrate_and_load_config(path) - - config = Config(session_env=env) - d = get_config_diff(config) - - if d.is_node_changed(['service', 'https']): - background_tasks.add_task(call_commit, session) - msg = self_ref_msg - else: - session.commit() - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(msg) - -@app.post('/image') -def image_op(data: ImageModel): - session = app.state.vyos_session - - op = data.op - - try: - if op == 'add': - res = session.install_image(data.url) - elif op == 'delete': - res = session.remove_image(data.name) - elif op == 'show': - res = session.show(["system", "image"]) - elif op == 'set_default': - res = session.set_default_image(data.name) - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/container-image') -def container_image_op(data: ContainerImageModel): - session = app.state.vyos_session - - op = data.op - - try: - if op == 'add': - if data.name: - name = data.name - else: - return error(400, "Missing required field \"name\"") - res = session.add_container_image(name) - elif op == 'delete': - if data.name: - name = data.name - else: - return error(400, "Missing required field \"name\"") - res = session.delete_container_image(name) - elif op == 'show': - res = session.show_container_image() - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/generate') -def generate_op(data: GenerateModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'generate': - res = session.generate(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/show') -def show_op(data: ShowModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'show': - res = session.show(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/reboot') -def reboot_op(data: RebootModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'reboot': - res = session.reboot(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/reset') -def reset_op(data: ResetModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - try: - if op == 'reset': - res = session.reset(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - - return success(res) - -@app.post('/import-pki') -def import_pki(data: ImportPkiModel): - session = app.state.vyos_session - - op = data.op - path = data.path - - lock.acquire() - - try: - if op == 'import-pki': - # need to get rid or interactive mode for private key - if len(path) == 5 and path[3] in ['key-file', 'private-key']: - path_no_prompt = create_path_import_pki_no_prompt(path) - if not path_no_prompt: - return error(400, f"Invalid command: {' '.join(path)}") - if data.passphrase: - path_no_prompt += ['--passphrase', data.passphrase] - res = session.import_pki_no_prompt(path_no_prompt) - else: - res = session.import_pki(path) - if not res[0].isdigit(): - return error(400, res) - # commit changes - session.commit() - res = res.split('. ')[0] - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") - finally: - lock.release() + res = { + 'banner': '', + 'hostname': '', + 'version': '' + } + if show_version: + res.update(version=get_version()) - return success(res) + if show_hostname: + try: + hostname = read_file(hostname_file) + except Exception: + hostname = 'vyos' + res.update(hostname=hostname) -@app.post('/poweroff') -def poweroff_op(data: PoweroffModel): - session = app.state.vyos_session + banner = read_file(prelogin_file, defaultonfailure=default) + if banner == f'{default} - \\n \\l': + banner = banner.partition(default)[1] - op = data.op - path = data.path - - try: - if op == 'poweroff': - res = session.poweroff(path) - else: - return error(400, f"'{op}' is not a valid operation") - except ConfigSessionError as e: - return error(400, str(e)) - except Exception as e: - logger.critical(traceback.format_exc()) - return error(500, "An internal error occured. Check the logs for details.") + res.update(banner=banner) + except Exception: + LOG.critical(traceback.format_exc()) + return error(500, 'An internal error occured. Check the logs for details.') return success(res) ### -# GraphQL integration -### - -def graphql_init(app: FastAPI = app): - from api.graphql.libs.token_auth import get_user_context - api.graphql.state.init() - api.graphql.state.settings['app'] = app - - # import after initializaion of state - from api.graphql.bindings import generate_schema - schema = generate_schema() - - in_spec = app.state.vyos_introspection - - if app.state.vyos_origins: - origins = app.state.vyos_origins - app.add_route('/graphql', CORSMiddleware(GraphQL(schema, - context_value=get_user_context, - debug=True, - introspection=in_spec), - allow_origins=origins, - allow_methods=("GET", "POST", "OPTIONS"), - allow_headers=("Authorization",))) - else: - app.add_route('/graphql', GraphQL(schema, - context_value=get_user_context, - debug=True, - introspection=in_spec)) -### # Modify uvicorn to allow reloading server within the configsession ### server = None shutdown = False + class ApiServerConfig(UvicornConfig): pass + class ApiServer(UvicornServer): def install_signal_handlers(self): pass + def reload_handler(signum, frame): + # pylint: disable=global-statement + global server - logger.debug('Reload signal received...') + LOG.debug('Reload signal received...') if server is not None: server.handle_exit(signum, frame) server = None - logger.info('Server stopping for reload...') + LOG.info('Server stopping for reload...') else: - logger.warning('Reload called for non-running server...') + LOG.warning('Reload called for non-running server...') + def shutdown_handler(signum, frame): + # pylint: disable=global-statement + global shutdown - logger.debug('Shutdown signal received...') + LOG.debug('Shutdown signal received...') server.handle_exit(signum, frame) - logger.info('Server shutdown...') + LOG.info('Server shutdown...') shutdown = True +# end modify uvicorn + + def flatten_keys(d: dict) -> list[dict]: keys_list = [] for el in list(d['keys'].get('id', {})): @@ -965,49 +154,87 @@ def flatten_keys(d: dict) -> list[dict]: keys_list.append({'id': el, 'key': key}) return keys_list -def initialization(session: ConfigSession, app: FastAPI = app): + +def regenerate_docs(app: FastAPI) -> None: + docs = ('/openapi.json', '/docs', '/docs/oauth2-redirect', '/redoc') + remove = [] + for r in app.routes: + if r.path in docs: + remove.append(r) + for r in remove: + app.routes.remove(r) + + app.openapi_schema = None + app.setup() + + +def initialization(session: SessionState, app: FastAPI = app): + # pylint: disable=global-statement,broad-exception-caught,import-outside-toplevel + global server try: server_config = load_server_config() except Exception as e: - logger.critical(f'Failed to load the HTTP API server config: {e}') + LOG.critical(f'Failed to load the HTTP API server config: {e}') sys.exit(1) - app.state.vyos_session = session - app.state.vyos_keys = [] - if 'keys' in server_config: - app.state.vyos_keys = flatten_keys(server_config) + session.keys = flatten_keys(server_config) + + rest_config = server_config.get('rest', {}) + session.debug = bool('debug' in rest_config) + session.strict = bool('strict' in rest_config) + + graphql_config = server_config.get('graphql', {}) + session.origins = graphql_config.get('cors', {}).get('allow_origin', []) + + if 'rest' in server_config: + session.rest = True + else: + session.rest = False - app.state.vyos_debug = bool('debug' in server_config) - app.state.vyos_strict = bool('strict' in server_config) - app.state.vyos_origins = server_config.get('cors', {}).get('allow_origin', []) if 'graphql' in server_config: - app.state.vyos_graphql = True + session.graphql = True if isinstance(server_config['graphql'], dict): if 'introspection' in server_config['graphql']: - app.state.vyos_introspection = True + session.introspection = True else: - app.state.vyos_introspection = False + session.introspection = False # default values if not set explicitly - app.state.vyos_auth_type = server_config['graphql']['authentication']['type'] - app.state.vyos_token_exp = server_config['graphql']['authentication']['expiration'] - app.state.vyos_secret_len = server_config['graphql']['authentication']['secret_length'] + session.auth_type = server_config['graphql']['authentication']['type'] + session.token_exp = server_config['graphql']['authentication']['expiration'] + session.secret_len = server_config['graphql']['authentication']['secret_length'] else: - app.state.vyos_graphql = False + session.graphql = False + + # pass session state + app.state = session - if app.state.vyos_graphql: + # add REST routes + if session.rest: + from api.rest.routers import rest_init + rest_init(app) + else: + from api.rest.routers import rest_clear + rest_clear(app) + + # add GraphQL route + if session.graphql: + from api.graphql.routers import graphql_init graphql_init(app) + else: + from api.graphql.routers import graphql_clear + graphql_clear(app) + + regenerate_docs(app) + + LOG.debug('Active routes are:') + for r in app.routes: + LOG.debug(f'{r.path}') config = ApiServerConfig(app, uds="/run/api.sock", proxy_headers=True) server = ApiServer(config) -def run_server(): - try: - server.run() - except OSError as e: - logger.critical(e) - sys.exit(1) if __name__ == '__main__': # systemd's user and group options don't work, do it by hand here, @@ -1022,13 +249,14 @@ if __name__ == '__main__': signal.signal(signal.SIGHUP, reload_handler) signal.signal(signal.SIGTERM, shutdown_handler) - config_session = ConfigSession(os.getpid()) + session_state = SessionState() + session_state.session = ConfigSession(os.getpid()) while True: - logger.debug('Enter main loop...') + LOG.debug('Enter main loop...') if shutdown: break if server is None: - initialization(config_session) + initialization(session_state) server.run() sleep(1) diff --git a/src/services/vyos-network-event-logger b/src/services/vyos-network-event-logger new file mode 100644 index 000000000..840ff3cda --- /dev/null +++ b/src/services/vyos-network-event-logger @@ -0,0 +1,1218 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import logging +import multiprocessing +import queue +import signal +import socket +import threading +from pathlib import Path +from time import sleep +from typing import Dict, AnyStr, List, Union + +from pyroute2.common import AF_MPLS +from pyroute2.iproute import IPRoute +from pyroute2.netlink import rtnl, nlmsg +from pyroute2.netlink.nfnetlink.nfctsocket import nfct_msg +from pyroute2.netlink.rtnl import (rt_proto as RT_PROTO, rt_type as RT_TYPES, + rtypes as RTYPES + ) +from pyroute2.netlink.rtnl.fibmsg import FR_ACT_GOTO, FR_ACT_NOP, FR_ACT_TO_TBL, \ + fibmsg +from pyroute2.netlink.rtnl import ifaddrmsg +from pyroute2.netlink.rtnl import ifinfmsg +from pyroute2.netlink.rtnl import ndmsg +from pyroute2.netlink.rtnl import rtmsg +from pyroute2.netlink.rtnl.rtmsg import nh, rtmsg_base + +from vyos.include.uapi.linux.fib_rules import * +from vyos.include.uapi.linux.icmpv6 import * +from vyos.include.uapi.linux.if_arp import * +from vyos.include.uapi.linux.lwtunnel import * +from vyos.include.uapi.linux.neighbour import * +from vyos.include.uapi.linux.rtnetlink import * + +from vyos.utils.file import read_json + + +manager = multiprocessing.Manager() +cache = manager.dict() + + +class UnsupportedMessageType(Exception): + pass + +shutdown_event = multiprocessing.Event() + +logging.basicConfig(level=logging.INFO, format='%(message)s') +logger = logging.getLogger(__name__) + + +class DebugFormatter(logging.Formatter): + def format(self, record): + self._style._fmt = '[%(asctime)s] %(levelname)s: %(message)s' + return super().format(record) + + +def set_log_level(level: str) -> None: + if level == 'debug': + logger.setLevel(logging.DEBUG) + logger.parent.handlers[0].setFormatter(DebugFormatter()) + else: + logger.setLevel(logging.INFO) + +IFF_FLAGS = { + 'RUNNING': ifinfmsg.IFF_RUNNING, + 'LOOPBACK': ifinfmsg.IFF_LOOPBACK, + 'BROADCAST': ifinfmsg.IFF_BROADCAST, + 'POINTOPOINT': ifinfmsg.IFF_POINTOPOINT, + 'MULTICAST': ifinfmsg.IFF_MULTICAST, + 'NOARP': ifinfmsg.IFF_NOARP, + 'ALLMULTI': ifinfmsg.IFF_ALLMULTI, + 'PROMISC': ifinfmsg.IFF_PROMISC, + 'MASTER': ifinfmsg.IFF_MASTER, + 'SLAVE': ifinfmsg.IFF_SLAVE, + 'DEBUG': ifinfmsg.IFF_DEBUG, + 'DYNAMIC': ifinfmsg.IFF_DYNAMIC, + 'AUTOMEDIA': ifinfmsg.IFF_AUTOMEDIA, + 'PORTSEL': ifinfmsg.IFF_PORTSEL, + 'NOTRAILERS': ifinfmsg.IFF_NOTRAILERS, + 'UP': ifinfmsg.IFF_UP, + 'LOWER_UP': ifinfmsg.IFF_LOWER_UP, + 'DORMANT': ifinfmsg.IFF_DORMANT, + 'ECHO': ifinfmsg.IFF_ECHO, +} + +NEIGH_STATE_FLAGS = { + 'INCOMPLETE': ndmsg.NUD_INCOMPLETE, + 'REACHABLE': ndmsg.NUD_REACHABLE, + 'STALE': ndmsg.NUD_STALE, + 'DELAY': ndmsg.NUD_DELAY, + 'PROBE': ndmsg.NUD_PROBE, + 'FAILED': ndmsg.NUD_FAILED, + 'NOARP': ndmsg.NUD_NOARP, + 'PERMANENT': ndmsg.NUD_PERMANENT, +} + +IFA_FLAGS = { + 'secondary': ifaddrmsg.IFA_F_SECONDARY, + 'temporary': ifaddrmsg.IFA_F_SECONDARY, + 'nodad': ifaddrmsg.IFA_F_NODAD, + 'optimistic': ifaddrmsg.IFA_F_OPTIMISTIC, + 'dadfailed': ifaddrmsg.IFA_F_DADFAILED, + 'home': ifaddrmsg.IFA_F_HOMEADDRESS, + 'deprecated': ifaddrmsg.IFA_F_DEPRECATED, + 'tentative': ifaddrmsg.IFA_F_TENTATIVE, + 'permanent': ifaddrmsg.IFA_F_PERMANENT, + 'mngtmpaddr': ifaddrmsg.IFA_F_MANAGETEMPADDR, + 'noprefixroute': ifaddrmsg.IFA_F_NOPREFIXROUTE, + 'autojoin': ifaddrmsg.IFA_F_MCAUTOJOIN, + 'stable-privacy': ifaddrmsg.IFA_F_STABLE_PRIVACY, +} + +RT_SCOPE_TO_NAME = { + rtmsg.RT_SCOPE_UNIVERSE: 'global', + rtmsg.RT_SCOPE_SITE: 'site', + rtmsg.RT_SCOPE_LINK: 'link', + rtmsg.RT_SCOPE_HOST: 'host', + rtmsg.RT_SCOPE_NOWHERE: 'nowhere', +} + +FAMILY_TO_NAME = { + socket.AF_INET: 'inet', + socket.AF_INET6: 'inet6', + socket.AF_PACKET: 'link', + AF_MPLS: 'mpls', + socket.AF_BRIDGE: 'bridge', +} + +_INFINITY = 4294967295 + + +def _get_iif_name(idx: int) -> str: + """ + Retrieves the interface name associated with a given index. + """ + try: + if_info = IPRoute().link("get", index=idx) + if if_info: + return if_info[0].get_attr('IFLA_IFNAME') + except Exception as e: + pass + + return '' + + +def remember_if_index(idx: int, event_type: int) -> None: + """ + Manages the caching of network interface names based on their index and event type. + + - For RTM_DELLINK event, the interface name is removed from the cache if exists. + - For RTM_NEWLINK event, the interface name is retrieved and updated in the cache. + """ + name = cache.get(idx) + if name: + if event_type == rtnl.RTM_DELLINK: + del cache[idx] + else: + name = _get_iif_name(idx) + if name: + cache[idx] = name + else: + cache[idx] = _get_iif_name(idx) + + +class BaseFormatter: + """ + A base class providing utility methods for formatting network message data. + """ + def _get_if_name_by_index(self, idx: int) -> str: + """ + Retrieves the name of a network interface based on its index. + + Uses a cached lookup for efficiency. If the name is not found in the cache, + it queries the system and updates the cache. + """ + if_name = cache.get(idx) + if not if_name: + if_name = _get_iif_name(idx) + cache[idx] = if_name + + return if_name + + def _format_rttable(self, idx: int) -> str: + """ + Formats a route table identifier into a readable name. + """ + return f'{RT_TABLE_TO_NAME.get(idx, idx)}' + + def _parse_flag(self, data: int, flags: dict) -> list: + """ + Extracts and returns flag names equal the bits set in a numeric value. + """ + result = list() + if data: + for key, val in flags.items(): + if data & val: + result.append(key) + data &= ~val + + if data: + result.append(f"{data:#x}") + + return result + + def af_bit_len(self, af: int) -> int: + """ + Gets the bit length of a given address family. + Supports common address families like IPv4, IPv6, and MPLS. + """ + _map = { + socket.AF_INET6: 128, + socket.AF_INET: 32, + AF_MPLS: 20, + } + + return _map.get(af) + + def _format_simple_field(self, data: str, prefix: str='') -> str: + """ + Formats a simple field with an optional prefix. + + A simple field represents a value that does not require additional + parsing and is used as is. + """ + return self._output(f'{prefix} {data}') if data is not None else '' + + def _output(self, data: str) -> str: + """ + Standardizes the output format. + + Ensures that the output is enclosed with single spaces and has no leading + or trailing whitespace. + """ + return f' {data.strip()} ' if data else '' + + +class BaseMSGFormatter(BaseFormatter): + """ + A base formatter class for network messages. + This class provides common methods for formatting network-related messages, + """ + + def _prepare_start_message(self, event: str) -> str: + """ + Prepares a starting message string based on the event type. + """ + if event in ['RTM_DELROUTE', 'RTM_DELLINK', 'RTM_DELNEIGH', + 'RTM_DELADDR', 'RTM_DELADDRLABEL', 'RTM_DELRULE', + 'RTM_DELNETCONF']: + return 'Deleted ' + if event == 'RTM_GETNEIGH': + return 'Miss ' + return '' + + def _format_flow_field(self, data: int) -> str: + """ + Formats a flow field to represent traffic realms. + """ + to = data & 0xFFFF + from_ = data >> 16 + result = f"realm{'s' if from_ else ''} " + if from_: + result += f'{from_}/' + result += f'{to}' + + return self._output(result) + + def format(self, msg: nlmsg) -> str: + """ + Abstract method to format a complete message. + + This method must be implemented by subclasses to provide specific formatting + logic for different types of messages. + """ + raise NotImplementedError(f'{msg.get("event")}: {msg}') + + +class LinkFormatter(BaseMSGFormatter): + """ + A formatter class for handling link-related network messages + `RTM_NEWLINK` and `RTM_DELLINK`. + """ + def _format_iff_flags(self, flags: int) -> str: + """ + Formats interface flags into a human-readable string. + """ + result = list() + if flags: + if flags & IFF_FLAGS['UP'] and not flags & IFF_FLAGS['RUNNING']: + result.append('NO-CARRIER') + + flags &= ~IFF_FLAGS['RUNNING'] + + result.extend(self._parse_flag(flags, IFF_FLAGS)) + + return self._output(f'<{(",").join(result)}>') + + def _format_if_props(self, data: ifinfmsg.ifinfbase.proplist) -> str: + """ + Formats interface alternative name properties. + """ + result = '' + for rec in data.altnames(): + result += f'[altname {rec}] ' + return self._output(result) + + def _format_link(self, msg: ifinfmsg.ifinfmsg) -> str: + """ + Formats the link attribute of a network interface message. + """ + if msg.get_attr("IFLA_LINK") is not None: + iflink = msg.get_attr("IFLA_LINK") + if iflink: + if msg.get_attr("IFLA_LINK_NETNSID"): + return f'if{iflink}' + else: + return self._get_if_name_by_index(iflink) + return 'NONE' + + def _format_link_info(self, msg: ifinfmsg.ifinfmsg) -> str: + """ + Formats detailed information about the link, including type, address, + broadcast address, and permanent address. + """ + result = f'link/{ARPHRD_TO_NAME.get(msg.get("ifi_type"), msg.get("ifi_type"))}' + result += self._format_simple_field(msg.get_attr('IFLA_ADDRESS')) + + if msg.get_attr("IFLA_BROADCAST"): + if msg.get('flags') & ifinfmsg.IFF_POINTOPOINT: + result += f' peer' + else: + result += f' brd' + result += f' {msg.get_attr("IFLA_BROADCAST")}' + + if msg.get_attr("IFLA_PERM_ADDRESS"): + if not msg.get_attr("IFLA_ADDRESS") or \ + msg.get_attr("IFLA_ADDRESS") != msg.get_attr("IFLA_PERM_ADDRESS"): + result += f' permaddr {msg.get_attr("IFLA_PERM_ADDRESS")}' + + return self._output(result) + + def format(self, msg: ifinfmsg.ifinfmsg): + """ + Formats a network link message into a structured output string. + """ + if msg.get("family") not in [socket.AF_UNSPEC, socket.AF_BRIDGE]: + return None + + message = self._prepare_start_message(msg.get('event')) + + link = self._format_link(msg) + + message += f'{msg.get("index")}: {msg.get_attr("IFLA_IFNAME")}' + message += f'@{link}' if link else '' + message += f': {self._format_iff_flags(msg.get("flags"))}' + + message += self._format_simple_field(msg.get_attr('IFLA_MTU'), prefix='mtu') + message += self._format_simple_field(msg.get_attr('IFLA_QDISC'), prefix='qdisc') + message += self._format_simple_field(msg.get_attr('IFLA_OPERSTATE'), prefix='state') + message += self._format_simple_field(msg.get_attr('IFLA_GROUP'), prefix='group') + message += self._format_simple_field(msg.get_attr('IFLA_MASTER'), prefix='master') + + message += self._format_link_info(msg) + + if msg.get_attr('IFLA_PROP_LIST'): + message += self._format_if_props(msg.get_attr('IFLA_PROP_LIST')) + + return self._output(message) + + +class EncapFormatter(BaseFormatter): + """ + A formatter class for handling encapsulation attributes in routing messages. + """ + # TODO: implement other lwtunnel decoder in pyroute2 + # https://github.com/svinota/pyroute2/blob/78cfe838bec8d96324811a3962bda15fb028e0ce/pyroute2/netlink/rtnl/rtmsg.py#L657 + def __init__(self): + """ + Initializes the EncapFormatter with supported encapsulation types. + """ + self.formatters = { + rtmsg.LWTUNNEL_ENCAP_MPLS: self.mpls_format, + rtmsg.LWTUNNEL_ENCAP_SEG6: self.seg6_format, + rtmsg.LWTUNNEL_ENCAP_BPF: self.bpf_format, + rtmsg.LWTUNNEL_ENCAP_SEG6_LOCAL: self.seg6local_format, + } + + def _format_srh(self, data: rtmsg_base.seg6_encap_info.ipv6_sr_hdr): + """ + Formats Segment Routing Header (SRH) attributes. + """ + result = '' + # pyroute2 decode mode only as inline or encap (encap, l2encap, encap.red, l2encap.red") + # https://github.com/svinota/pyroute2/blob/78cfe838bec8d96324811a3962bda15fb028e0ce/pyroute2/netlink/rtnl/rtmsg.py#L220 + for key in ['mode', 'segs']: + + val = data.get(key) + + if val: + if key == 'segs': + result += f'{key} {len(val)} {val} ' + else: + result += f'{key} {val} ' + + return self._output(result) + + def _format_bpf_object(self, data: rtmsg_base.bpf_encap_info, attr_name: str, attr_key: str): + """ + Formats eBPF program attributes. + """ + attr = data.get_attr(attr_name) + if not attr: + return '' + result = '' + if attr.get_attr("LWT_BPF_PROG_NAME"): + result += f'{attr.get_attr("LWT_BPF_PROG_NAME")} ' + if attr.get_attr("LWT_BPF_PROG_FD"): + result += f'{attr.get_attr("LWT_BPF_PROG_FD")} ' + + return self._output(f'{attr_key} {result.strip()}') + + def mpls_format(self, data: rtmsg_base.mpls_encap_info): + """ + Formats MPLS encapsulation attributes. + """ + result = '' + if data.get_attr("MPLS_IPTUNNEL_DST"): + for rec in data.get_attr("MPLS_IPTUNNEL_DST"): + for key, val in rec.items(): + if val: + result += f'{key} {val} ' + + if data.get_attr("MPLS_IPTUNNEL_TTL"): + result += f' ttl {data.get_attr("MPLS_IPTUNNEL_TTL")}' + + return self._output(result) + + def bpf_format(self, data: rtmsg_base.bpf_encap_info): + """ + Formats eBPF encapsulation attributes. + """ + result = '' + result += self._format_bpf_object(data, 'LWT_BPF_IN', 'in') + result += self._format_bpf_object(data, 'LWT_BPF_OUT', 'out') + result += self._format_bpf_object(data, 'LWT_BPF_XMIT', 'xmit') + + if data.get_attr('LWT_BPF_XMIT_HEADROOM'): + result += f'headroom {data.get_attr("LWT_BPF_XMIT_HEADROOM")} ' + + return self._output(result) + + def seg6_format(self, data: rtmsg_base.seg6_encap_info): + """ + Formats Segment Routing (SEG6) encapsulation attributes. + """ + result = '' + if data.get_attr("SEG6_IPTUNNEL_SRH"): + result += self._format_srh(data.get_attr("SEG6_IPTUNNEL_SRH")) + + return self._output(result) + + def seg6local_format(self, data: rtmsg_base.seg6local_encap_info): + """ + Formats SEG6 local encapsulation attributes. + """ + result = '' + formatters = { + 'SEG6_LOCAL_ACTION': lambda val: f' action {next((k for k, v in data.action.actions.items() if v == val), "unknown")}', + 'SEG6_LOCAL_SRH': lambda val: f' {self._format_srh(val)}', + 'SEG6_LOCAL_TABLE': lambda val: f' table {self._format_rttable(val)}', + 'SEG6_LOCAL_NH4': lambda val: f' nh4 {val}', + 'SEG6_LOCAL_NH6': lambda val: f' nh6 {val}', + 'SEG6_LOCAL_IIF': lambda val: f' iif {self._get_if_name_by_index(val)}', + 'SEG6_LOCAL_OIF': lambda val: f' oif {self._get_if_name_by_index(val)}', + 'SEG6_LOCAL_BPF': lambda val: f' endpoint {val.get("LWT_BPF_PROG_NAME")}', + 'SEG6_LOCAL_VRFTABLE': lambda val: f' vrftable {self._format_rttable(val)}', + } + + for rec in data.get('attrs'): + if rec[0] in formatters: + result += formatters[rec[0]](rec[1]) + + return self._output(result) + + def format(self, type: int, data: Union[rtmsg_base.mpls_encap_info, + rtmsg_base.bpf_encap_info, + rtmsg_base.seg6_encap_info, + rtmsg_base.seg6local_encap_info]): + """ + Formats encapsulation attributes based on their type. + """ + result = '' + formatter = self.formatters.get(type) + + result += f'encap {ENCAP_TO_NAME.get(type, "unknown")}' + + if formatter: + result += f' {formatter(data)}' + + return self._output(result) + + +class RouteFormatter(BaseMSGFormatter): + """ + A formatter class for handling network routing messages + `RTM_NEWROUTE` and `RTM_DELROUTE`. + """ + + def _format_rt_flags(self, flags: int) -> str: + """ + Formats route flags into a comma-separated string. + """ + result = list() + result.extend(self._parse_flag(flags, RT_FlAGS)) + + return self._output(",".join(result)) + + def _format_rta_encap(self, type: int, data: Union[rtmsg_base.mpls_encap_info, + rtmsg_base.bpf_encap_info, + rtmsg_base.seg6_encap_info, + rtmsg_base.seg6local_encap_info]) -> str: + """ + Formats encapsulation attributes. + """ + return EncapFormatter().format(type, data) + + def _format_rta_newdest(self, data: str) -> str: + """ + Formats a new destination attribute. + """ + return self._output(f'as to {data}') + + def _format_rta_gateway(self, data: str) -> str: + """ + Formats a gateway attribute. + """ + return self._output(f'via {data}') + + def _format_rta_via(self, data: str) -> str: + """ + Formats a 'via' route attribute. + """ + return self._output(f'{data}') + + def _format_rta_metrics(self, data: rtmsg_base.metrics): + """ + Formats routing metrics. + """ + result = '' + + def __format_metric_time(_val: int) -> str: + """Formats metric time values into seconds or milliseconds.""" + return f"{_val / 1000}s" if _val >= 1000 else f"{_val}ms" + + def __format_reatures(_val: int) -> str: + """Parse and formats routing feature flags.""" + result = self._parse_flag(_val, {'ecn': RTAX_FEATURE_ECN, + 'tcp_usec_ts': RTAX_FEATURE_TCP_USEC_TS}) + return ",".join(result) + + formatters = { + 'RTAX_MTU': lambda val: f' mtu {val}', + 'RTAX_WINDOW': lambda val: f' window {val}', + 'RTAX_RTT': lambda val: f' rtt {__format_metric_time(val / 8)}', + 'RTAX_RTTVAR': lambda val: f' rttvar {__format_metric_time(val / 4)}', + 'RTAX_SSTHRESH': lambda val: f' ssthresh {val}', + 'RTAX_CWND': lambda val: f' cwnd {val}', + 'RTAX_ADVMSS': lambda val: f' advmss {val}', + 'RTAX_REORDERING': lambda val: f' reordering {val}', + 'RTAX_HOPLIMIT': lambda val: f' hoplimit {val}', + 'RTAX_INITCWND': lambda val: f' initcwnd {val}', + 'RTAX_FEATURES': lambda val: f' features {__format_reatures(val)}', + 'RTAX_RTO_MIN': lambda val: f' rto_min {__format_metric_time(val)}', + 'RTAX_INITRWND': lambda val: f' initrwnd {val}', + 'RTAX_QUICKACK': lambda val: f' quickack {val}', + } + + for rec in data.get('attrs'): + if rec[0] in formatters: + result += formatters[rec[0]](rec[1]) + + return self._output(result) + + def _format_rta_pref(self, data: int) -> str: + """ + Formats a pref attribute. + """ + pref = { + ICMPV6_ROUTER_PREF_LOW: "low", + ICMPV6_ROUTER_PREF_MEDIUM: "medium", + ICMPV6_ROUTER_PREF_HIGH: "high", + } + + return self._output(f' pref {pref.get(data, data)}') + + def _format_rta_multipath(self, mcast_cloned: bool, family: int, data: List[nh]) -> str: + """ + Formats multipath route attributes. + """ + result = '' + first = True + for rec in data: + if mcast_cloned: + if first: + result += ' Oifs: ' + first = False + else: + result += ' ' + else: + result += ' nexthop ' + + if rec.get_attr('RTA_ENCAP'): + result += self._format_rta_encap(rec.get_attr('RTA_ENCAP_TYPE'), + rec.get_attr('RTA_ENCAP')) + + if rec.get_attr('RTA_NEWDST'): + result += self._format_rta_newdest(rec.get_attr('RTA_NEWDST')) + + if rec.get_attr('RTA_GATEWAY'): + result += self._format_rta_gateway(rec.get_attr('RTA_GATEWAY')) + + if rec.get_attr('RTA_VIA'): + result += self._format_rta_via(rec.get_attr('RTA_VIA')) + + if rec.get_attr('RTA_FLOW'): + result += self._format_flow_field(rec.get_attr('RTA_FLOW')) + + result += f' dev {self._get_if_name_by_index(rec.get("oif"))}' + if mcast_cloned: + if rec.get("hops") != 1: + result += f' (ttl>{rec.get("hops")})' + else: + if family != AF_MPLS: + result += f' weight {rec.get("hops") + 1}' + + result += self._format_rt_flags(rec.get("flags")) + + return self._output(result) + + def format(self, msg: rtmsg.rtmsg) -> str: + """ + Formats a network route message into a human-readable string representation. + """ + message = self._prepare_start_message(msg.get('event')) + + message += RT_TYPES.get(msg.get('type')) + + if msg.get_attr('RTA_DST'): + host_len = self.af_bit_len(msg.get('family')) + if msg.get('dst_len') != host_len: + message += f' {msg.get_attr("RTA_DST")}/{msg.get("dst_len")}' + else: + message += f' {msg.get_attr("RTA_DST")}' + elif msg.get('dst_len'): + message += f' 0/{msg.get("dst_len")}' + else: + message += ' default' + + if msg.get_attr('RTA_SRC'): + message += f' from {msg.get_attr("RTA_SRC")}' + elif msg.get('src_len'): + message += f' from 0/{msg.get("src_len")}' + + message += self._format_simple_field(msg.get_attr('RTA_NH_ID'), prefix='nhid') + + if msg.get_attr('RTA_NEWDST'): + message += self._format_rta_newdest(msg.get_attr('RTA_NEWDST')) + + if msg.get_attr('RTA_ENCAP'): + message += self._format_rta_encap(msg.get_attr('RTA_ENCAP_TYPE'), + msg.get_attr('RTA_ENCAP')) + + message += self._format_simple_field(msg.get('tos'), prefix='tos') + + if msg.get_attr('RTA_GATEWAY'): + message += self._format_rta_gateway(msg.get_attr('RTA_GATEWAY')) + + if msg.get_attr('RTA_VIA'): + message += self._format_rta_via(msg.get_attr('RTA_VIA')) + + if msg.get_attr('RTA_OIF') is not None: + message += f' dev {self._get_if_name_by_index(msg.get_attr("RTA_OIF"))}' + + if msg.get_attr("RTA_TABLE"): + message += f' table {self._format_rttable(msg.get_attr("RTA_TABLE"))}' + + if not msg.get('flags') & RTM_F_CLONED: + message += f' proto {RT_PROTO.get(msg.get("proto"))}' + + if not msg.get('scope') == rtmsg.RT_SCOPE_UNIVERSE: + message += f' scope {RT_SCOPE_TO_NAME.get(msg.get("scope"))}' + + message += self._format_simple_field(msg.get_attr('RTA_PREFSRC'), prefix='src') + message += self._format_simple_field(msg.get_attr('RTA_PRIORITY'), prefix='metric') + + message += self._format_rt_flags(msg.get("flags")) + + if msg.get_attr('RTA_MARK'): + mark = msg.get_attr("RTA_MARK") + if mark >= 16: + message += f' mark 0x{mark:x}' + else: + message += f' mark {mark}' + + if msg.get_attr('RTA_FLOW'): + message += self._format_flow_field(msg.get_attr('RTA_FLOW')) + + message += self._format_simple_field(msg.get_attr('RTA_UID'), prefix='uid') + + if msg.get_attr('RTA_METRICS'): + message += self._format_rta_metrics(msg.get_attr("RTA_METRICS")) + + if msg.get_attr('RTA_IIF') is not None: + message += f' iif {self._get_if_name_by_index(msg.get_attr("RTA_IIF"))}' + + if msg.get_attr('RTA_PREF') is not None: + message += self._format_rta_pref(msg.get_attr("RTA_PREF")) + + if msg.get_attr('RTA_TTL_PROPAGATE') is not None: + message += f' ttl-propogate {"enabled" if msg.get_attr("RTA_TTL_PROPAGATE") else "disabled"}' + + if msg.get_attr('RTA_MULTIPATH') is not None: + _tmp = self._format_rta_multipath( + mcast_cloned=msg.get('flags') & RTM_F_CLONED and msg.get('type') == RTYPES['RTN_MULTICAST'], + family=msg.get('family'), + data=msg.get_attr("RTA_MULTIPATH")) + message += f' {_tmp}' + + return self._output(message) + + +class AddrFormatter(BaseMSGFormatter): + """ + A formatter class for handling address-related network messages + `RTM_NEWADDR` and `RTM_DELADDR`. + """ + INFINITY_LIFE_TIME = _INFINITY + + def _format_ifa_flags(self, flags: int, family: int) -> str: + """ + Formats address flags into a human-readable string. + """ + result = list() + if flags: + if not flags & IFA_FLAGS['permanent']: + result.append('dynamic') + flags &= ~IFA_FLAGS['permanent'] + + if flags & IFA_FLAGS['temporary'] and family == socket.AF_INET6: + result.append('temporary') + flags &= ~IFA_FLAGS['temporary'] + + result.extend(self._parse_flag(flags, IFA_FLAGS)) + + return self._output(",".join(result)) + + def _format_ifa_addr(self, local: str, addr: str, preflen: int, priority: int) -> str: + """ + Formats address information into a shuman-readable string. + """ + result = '' + local = local or addr + addr = addr or local + + if local: + result += f'{local}' + if addr and addr != local: + result += f' peer {addr}' + result += f'/{preflen}' + + if priority: + result += f' {priority}' + + return self._output(result) + + def _format_ifa_cacheinfo(self, data: ifaddrmsg.ifaddrmsg.cacheinfo) -> str: + """ + Formats cache information for an address. + """ + result = '' + _map = { + 'ifa_valid': 'valid_lft', + 'ifa_preferred': 'preferred_lft', + } + + for key in ['ifa_valid', 'ifa_preferred']: + val = data.get(key) + if val == self.INFINITY_LIFE_TIME: + result += f'{_map.get(key)} forever ' + else: + result += f'{_map.get(key)} {val}sec ' + + return self._output(result) + + def format(self, msg: ifaddrmsg.ifaddrmsg) -> str: + """ + Formats a full network address message. + Combine attributes such as index, family, address, flags, and cache + information into a structured output string. + """ + message = self._prepare_start_message(msg.get('event')) + + message += f'{msg.get("index")}: {self._get_if_name_by_index(msg.get("index"))} ' + message += f'{FAMILY_TO_NAME.get(msg.get("family"), msg.get("family"))} ' + + message += self._format_ifa_addr( + msg.get_attr('IFA_LOCAL'), + msg.get_attr('IFA_ADDRESS'), + msg.get('prefixlen'), + msg.get_attr('IFA_RT_PRIORITY') + ) + message += self._format_simple_field(msg.get_attr('IFA_BROADCAST'), prefix='brd') + message += self._format_simple_field(msg.get_attr('IFA_ANYCAST'), prefix='any') + + if msg.get('scope') is not None: + message += f' scope {RT_SCOPE_TO_NAME.get(msg.get("scope"))}' + + message += self._format_ifa_flags(msg.get_attr("IFA_FLAGS"), msg.get("family")) + message += self._format_simple_field(msg.get_attr('IFA_LABEL'), prefix='label:') + + if msg.get_attr('IFA_CACHEINFO'): + message += self._format_ifa_cacheinfo(msg.get_attr('IFA_CACHEINFO')) + + return self._output(message) + + +class NeighFormatter(BaseMSGFormatter): + """ + A formatter class for handling neighbor-related network messages + `RTM_NEWNEIGH`, `RTM_DELNEIGH` and `RTM_GETNEIGH` + """ + def _format_ntf_flags(self, flags: int) -> str: + """ + Formats neighbor table entry flags into a human-readable string. + """ + result = list() + result.extend(self._parse_flag(flags, NTF_FlAGS)) + + return self._output(",".join(result)) + + def _format_neigh_state(self, data: int) -> str: + """ + Formats the state of a neighbor entry. + """ + result = list() + result.extend(self._parse_flag(data, NEIGH_STATE_FLAGS)) + + return self._output(",".join(result)) + + def format(self, msg: ndmsg.ndmsg) -> str: + """ + Formats a full neighbor-related network message. + Combine attributes such as destination, device, link-layer address, + flags, state, and protocol into a structured output string. + """ + message = self._prepare_start_message(msg.get('event')) + message += self._format_simple_field(msg.get_attr('NDA_DST'), prefix='') + + if msg.get("ifindex") is not None: + message += f' dev {self._get_if_name_by_index(msg.get("ifindex"))}' + + message += self._format_simple_field(msg.get_attr('NDA_LLADDR'), prefix='lladdr') + message += f' {self._format_ntf_flags(msg.get("flags"))}' + message += f' {self._format_neigh_state(msg.get("state"))}' + + if msg.get_attr('NDA_PROTOCOL'): + message += f' proto {RT_PROTO.get(msg.get_attr("NDA_PROTOCOL"), msg.get_attr("NDA_PROTOCOL"))}' + + return self._output(message) + + +class RuleFormatter(BaseMSGFormatter): + """ + A formatter class for handling ruting tule network messages + `RTM_NEWRULE` and `RTM_DELRULE` + """ + def _format_direction(self, data: str, length: int, host_len: int): + """ + Formats the direction of traffic based on source or destination and prefix length. + """ + result = '' + if data: + result += f' {data}' + if length != host_len: + result += f'/{length}' + elif length: + result += f' 0/{length}' + + return self._output(result) + + def _format_fra_interface(self, data: str, flags: int, prefix: str): + """ + Formats interface-related attributes. + """ + result = f'{prefix} {data}' + if flags & FIB_RULE_IIF_DETACHED: + result += '[detached]' + + return self._output(result) + + def _format_fra_range(self, data: [str, dict], prefix: str): + """ + Formats a range of values (e.g., UID, sport, or dport). + """ + result = '' + if data: + if isinstance(data, str): + result += f' {prefix} {data}' + else: + result += f' {prefix} {data.get("start")}:{data.get("end")}' + return self._output(result) + + def _format_fra_table(self, msg: fibmsg): + """ + Formats the lookup table and associated attributes in the message. + """ + def __format_field(data: int, prefix: str): + if data and data not in [-1, _INFINITY]: + return f' {prefix} {data}' + return '' + + result = '' + table = msg.get_attr('FRA_TABLE') or msg.get('table') + if table: + result += f' lookup {self._format_rttable(table)}' + result += __format_field(msg.get_attr('FRA_SUPPRESS_PREFIXLEN'), 'suppress_prefixlength') + result += __format_field(msg.get_attr('FRA_SUPPRESS_IFGROUP'), 'suppress_ifgroup') + + return self._output(result) + + def _format_fra_action(self, msg: fibmsg): + """ + Formats the action associated with the rule. + """ + result = '' + if msg.get('action') == RTYPES.get('RTN_NAT'): + if msg.get_attr('RTA_GATEWAY'): # looks like deprecated but still use in iproute2 + result += f' map-to {msg.get_attr("RTA_GATEWAY")}' + else: + result += ' masquerade' + + elif msg.get('action') == FR_ACT_GOTO: + result += f' goto {msg.get_attr("FRA_GOTO") or "none"}' + if msg.get('flags') & FIB_RULE_UNRESOLVED: + result += ' [unresolved]' + + elif msg.get('action') == FR_ACT_NOP: + result += ' nop' + + elif msg.get('action') != FR_ACT_TO_TBL: + result += f' {RTYPES.get(msg.get("action"))}' + + return self._output(result) + + def format(self, msg: fibmsg): + """ + Formats a complete routing rule message. + Combines information about source, destination, interfaces, actions, + and other attributes into a single formatted string. + """ + message = self._prepare_start_message(msg.get('event')) + host_len = self.af_bit_len(msg.get('family')) + message += self._format_simple_field(msg.get_attr('FRA_PRIORITY'), prefix='') + + if msg.get('flags') & FIB_RULE_INVERT: + message += ' not' + + tmp = self._format_direction(msg.get_attr('FRA_SRC'), msg.get('src_len'), host_len) + message += ' from' + (tmp if tmp else ' all ') + + if msg.get_attr('FRA_DST'): + tmp = self._format_direction(msg.get_attr('FRA_DST'), msg.get('dst_len'), host_len) + message += ' to' + tmp + + if msg.get('tos'): + message += f' tos {hex(msg.get("tos"))}' + + if msg.get_attr('FRA_FWMARK') or msg.get_attr('FRA_FWMASK'): + mark = msg.get_attr('FRA_FWMARK') or 0 + mask = msg.get_attr('FRA_FWMASK') or 0 + if mask != 0xFFFFFFFF: + message += f' fwmark {mark}/{mask}' + else: + message += f' fwmark {mark}' + + if msg.get_attr('FRA_IIFNAME'): + message += self._format_fra_interface( + msg.get_attr('FRA_IIFNAME'), + msg.get('flags'), + 'iif' + ) + + if msg.get_attr('FRA_OIFNAME'): + message += self._format_fra_interface( + msg.get_attr('FRA_OIFNAME'), + msg.get('flags'), + 'oif' + ) + + if msg.get_attr('FRA_L3MDEV'): + message += f' lookup [l3mdev-table]' + + if msg.get_attr('FRA_UID_RANGE'): + message += self._format_fra_range(msg.get_attr('FRA_UID_RANGE'), 'uidrange') + + message += self._format_simple_field(msg.get_attr('FRA_IP_PROTO'), prefix='ipproto') + + if msg.get_attr('FRA_SPORT_RANGE'): + message += self._format_fra_range(msg.get_attr('FRA_SPORT_RANGE'), 'sport') + + if msg.get_attr('FRA_DPORT_RANGE'): + message += self._format_fra_range(msg.get_attr('FRA_DPORT_RANGE'), 'dport') + + message += self._format_simple_field(msg.get_attr('FRA_TUN_ID'), prefix='tun_id') + + message += self._format_fra_table(msg) + + if msg.get_attr('FRA_FLOW'): + message += self._format_flow_field(msg.get_attr('FRA_FLOW')) + + message += self._format_fra_action(msg) + + if msg.get_attr('FRA_PROTOCOL'): + message += f' proto {RT_PROTO.get(msg.get_attr("FRA_PROTOCOL"), msg.get_attr("FRA_PROTOCOL"))}' + + return self._output(message) + + +class AddrlabelFormatter(BaseMSGFormatter): + # Not implemented decoder on pytroute2 but ip monitor use it message + pass + + +class PrefixFormatter(BaseMSGFormatter): + # Not implemented decoder on pytroute2 but ip monitor use it message + pass + + +class NetconfFormatter(BaseMSGFormatter): + # Not implemented decoder on pytroute2 but ip monitor use it message + pass + + +EVENT_MAP = { + rtnl.RTM_NEWROUTE: {'parser': RouteFormatter, 'event': 'route'}, + rtnl.RTM_DELROUTE: {'parser': RouteFormatter, 'event': 'route'}, + rtnl.RTM_NEWLINK: {'parser': LinkFormatter, 'event': 'link'}, + rtnl.RTM_DELLINK: {'parser': LinkFormatter, 'event': 'link'}, + rtnl.RTM_NEWADDR: {'parser': AddrFormatter, 'event': 'addr'}, + rtnl.RTM_DELADDR: {'parser': AddrFormatter, 'event': 'addr'}, + # rtnl.RTM_NEWADDRLABEL: {'parser': AddrlabelFormatter, 'event': 'addrlabel'}, + # rtnl.RTM_DELADDRLABEL: {'parser': AddrlabelFormatter, 'event': 'addrlabel'}, + rtnl.RTM_NEWNEIGH: {'parser': NeighFormatter, 'event': 'neigh'}, + rtnl.RTM_DELNEIGH: {'parser': NeighFormatter, 'event': 'neigh'}, + rtnl.RTM_GETNEIGH: {'parser': NeighFormatter, 'event': 'neigh'}, + # rtnl.RTM_NEWPREFIX: {'parser': PrefixFormatter, 'event': 'prefix'}, + rtnl.RTM_NEWRULE: {'parser': RuleFormatter, 'event': 'rule'}, + rtnl.RTM_DELRULE: {'parser': RuleFormatter, 'event': 'rule'}, + # rtnl.RTM_NEWNETCONF: {'parser': NetconfFormatter, 'event': 'netconf'}, + # rtnl.RTM_DELNETCONF: {'parser': NetconfFormatter, 'event': 'netconf'}, +} + + +def sig_handler(signum, frame): + process_name = multiprocessing.current_process().name + logger.debug( + f'[{process_name}]: {"Shutdown" if signum == signal.SIGTERM else "Reload"} signal received...' + ) + shutdown_event.set() + + +def parse_event_type(header: Dict) -> tuple: + """ + Extract event type and parser. + """ + event_type = EVENT_MAP.get(header['type'], {}).get('event', 'unknown') + _parser = EVENT_MAP.get(header['type'], {}).get('parser') + + if _parser is None: + raise UnsupportedMessageType(f'Unsupported message type: {header["type"]}') + + return event_type, _parser + + +def is_need_to_log(event_type: AnyStr, conf_event: Dict): + """ + Filter message by event type and protocols + """ + conf = conf_event.get(event_type) + if conf == {}: + return True + return False + + +def parse_event(msg: nfct_msg, conf_event: Dict) -> str: + """ + Convert nfct_msg to internal data dict. + """ + data = '' + event_type, parser = parse_event_type(msg['header']) + if event_type == 'link': + remember_if_index(idx=msg.get('index'), event_type=msg['header'].get('type')) + + if not is_need_to_log(event_type, conf_event): + return data + + message = parser().format(msg) + if message: + data = f'{f"[{event_type}]".upper():<{7}} {message}' + + return data + + +def worker(ct: IPRoute, shutdown_event: multiprocessing.Event, conf_event: Dict) -> None: + """ + Main function of parser worker process + """ + process_name = multiprocessing.current_process().name + logger.debug(f'[{process_name}] started') + timeout = 0.1 + while not shutdown_event.is_set(): + if not ct.buffer_queue.empty(): + msg = None + try: + for msg in ct.get(): + message = parse_event(msg, conf_event) + if message: + if logger.level == logging.DEBUG: + logger.debug(f'[{process_name}]: {message} raw: {msg}') + else: + logger.info(message) + except queue.Full: + logger.error('IPRoute message queue if full.') + except UnsupportedMessageType as e: + logger.debug(f'{e} =====> raw msg: {msg}') + except Exception as e: + logger.error(f'Unexpected error: {e.__class__} {e} [{msg}]') + else: + sleep(timeout) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '-c', + '--config', + action='store', + help='Path to vyos-network-event-logger configuration', + required=True, + type=Path, + ) + + args = parser.parse_args() + try: + config = read_json(args.config) + except Exception as err: + logger.error(f'Configuration file "{args.config}" does not exist or malformed: {err}') + exit(1) + + set_log_level(config.get('log_level', 'info')) + + signal.signal(signal.SIGHUP, sig_handler) + signal.signal(signal.SIGTERM, sig_handler) + + if 'event' in config: + event_groups = list(config.get('event').keys()) + else: + logger.error(f'Configuration is wrong. Event filter is empty.') + exit(1) + + conf_event = config['event'] + qsize = config.get('queue_size') + ct = IPRoute(async_qsize=int(qsize) if qsize else None) + ct.buffer_queue = multiprocessing.Queue(ct.async_qsize) + ct.bind(async_cache=True) + + processes = list() + try: + for _ in range(multiprocessing.cpu_count()): + p = multiprocessing.Process(target=worker, args=(ct, shutdown_event, conf_event)) + processes.append(p) + p.start() + logger.info('IPRoute socket bound and listening for messages.') + + while not shutdown_event.is_set(): + if not ct.pthread.is_alive(): + if ct.buffer_queue.qsize() / ct.async_qsize < 0.9: + if not shutdown_event.is_set(): + logger.debug('Restart listener thread') + # restart listener thread after queue overloaded when queue size low than 90% + ct.pthread = threading.Thread(name='Netlink async cache', target=ct.async_recv) + ct.pthread.daemon = True + ct.pthread.start() + else: + sleep(0.1) + finally: + for p in processes: + p.join() + if not p.is_alive(): + logger.debug(f'[{p.name}]: finished') + ct.close() + logging.info('IPRoute socket closed.') + exit() diff --git a/src/shim/vyshim.c b/src/shim/vyshim.c index a78f62a7b..1eb653cbf 100644 --- a/src/shim/vyshim.c +++ b/src/shim/vyshim.c @@ -67,6 +67,8 @@ void timer_handler(int); double get_posix_clock_time(void); +static char * s_recv_string (void *, int); + int main(int argc, char* argv[]) { // string for node data: conf_mode script and tagnode, if applicable @@ -117,33 +119,42 @@ int main(int argc, char* argv[]) zmq_send(requester, string_node_data_msg, strlen(string_node_data_msg), 0); zmq_recv(requester, error_code, 1, 0); - debug_print("Received node data receipt\n"); + debug_print("Received node data receipt with error_code\n"); - int err = (int)error_code[0]; + char msg_size_str[7]; + zmq_recv(requester, msg_size_str, 6, 0); + msg_size_str[6] = '\0'; + int msg_size = (int)strtol(msg_size_str, NULL, 16); + debug_print("msg_size: %d\n", msg_size); + + char *msg = s_recv_string(requester, msg_size); + printf("%s", msg); + free(msg); free(string_node_data_msg); - zmq_close(requester); - zmq_ctx_destroy(context); + int err = (int)error_code[0]; + int ret = 0; if (err & PASS) { debug_print("Received PASS\n"); - int ret = pass_through(argv, ex_index); - return ret; + ret = pass_through(argv, ex_index); } if (err & ERROR_DAEMON) { debug_print("Received ERROR_DAEMON\n"); - int ret = pass_through(argv, ex_index); - return ret; + ret = pass_through(argv, ex_index); } if (err & ERROR_COMMIT) { debug_print("Received ERROR_COMMIT\n"); - return -1; + ret = -1; } - return 0; + zmq_close(requester); + zmq_ctx_destroy(context); + + return ret; } int initialization(void* Requester) @@ -342,3 +353,15 @@ double get_posix_clock_time(void) double get_posix_clock_time(void) {return (double)0;} #endif + +// Receive string from socket and convert into C string +static char * s_recv_string (void *socket, int bufsize) { + char * buffer = (char *)malloc(bufsize+1); + int size = zmq_recv(socket, buffer, bufsize, 0); + if (size == -1) + return NULL; + if (size > bufsize) + size = bufsize; + buffer[size] = '\0'; + return buffer; +} diff --git a/src/system/sync-dhcp-lease-to-hosts.py b/src/system/sync-dhcp-lease-to-hosts.py new file mode 100755 index 000000000..5c8b18faf --- /dev/null +++ b/src/system/sync-dhcp-lease-to-hosts.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2025 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import logging + +import vyos.opmode +import vyos.hostsd_client + +from vyos.configquery import ConfigTreeQuery + +from vyos.kea import kea_get_active_config +from vyos.kea import kea_get_dhcp_pools +from vyos.kea import kea_get_server_leases + +# Configure logging +logger = logging.getLogger(__name__) +# set stream as output +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) + + +def _get_all_server_leases(inet_suffix='4') -> list: + mappings = [] + try: + active_config = kea_get_active_config(inet_suffix) + except Exception: + raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server configuration') + + try: + pools = kea_get_dhcp_pools(active_config, inet_suffix) + mappings = kea_get_server_leases( + active_config, inet_suffix, pools, state=[], origin=None + ) + except Exception: + raise vyos.opmode.DataUnavailable('Cannot fetch DHCP server leases') + + return mappings + + +if __name__ == '__main__': + # Parse command arguments + parser = argparse.ArgumentParser() + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--inet', action='store_true', help='Use IPv4 DHCP leases') + group.add_argument('--inet6', action='store_true', help='Use IPv6 DHCP leases') + args = parser.parse_args() + + inet_suffix = '4' if args.inet else '6' + service_suffix = '' if args.inet else 'v6' + + if inet_suffix == '6': + raise vyos.opmode.UnsupportedOperation( + 'Syncing IPv6 DHCP leases are not supported yet' + ) + + # Load configuration + config = ConfigTreeQuery() + + # Check if DHCP server is configured + # Using warning instead of error since this check may fail during first-time + # DHCP server setup when the service is not yet configured in the config tree. + # This happens when called from systemd's ExecStartPost the first time. + if not config.exists(f'service dhcp{service_suffix}-server'): + logger.warning(f'DHCP{service_suffix} server is not configured') + + # Check if hostfile-update is enabled + if not config.exists(f'service dhcp{service_suffix}-server hostfile-update'): + logger.debug( + f'Hostfile update is disabled for DHCP{service_suffix} server, skipping hosts update' + ) + exit(0) + + lease_data = _get_all_server_leases(inet_suffix) + + try: + hc = vyos.hostsd_client.Client() + + for mapping in lease_data: + ip_addr = mapping.get('ip') + mac_addr = mapping.get('mac') + name = mapping.get('hostname') + name = name if name else f'host-{mac_addr.replace(":", "-")}' + domain = mapping.get('domain') + fqdn = f'{name}.{domain}' if domain else name + hc.add_hosts( + { + f'dhcp-server-{ip_addr}': { + fqdn: {'address': [ip_addr], 'aliases': []} + } + } + ) + + hc.apply() + + logger.debug('Hosts store updated successfully') + + except vyos.hostsd_client.VyOSHostsdError as e: + raise vyos.opmode.InternalError(str(e)) diff --git a/src/systemd/netplug.service b/src/systemd/netplug.service new file mode 100644 index 000000000..928c553e8 --- /dev/null +++ b/src/systemd/netplug.service @@ -0,0 +1,9 @@ +[Unit] +Description=Network cable hotplug management daemon +Documentation=man:netplugd(8) +After=vyos-router.service + +[Service] +Type=forking +PIDFile=/run/netplugd.pid +ExecStart=/sbin/netplugd -c /etc/netplug/netplugd.conf -p /run/netplugd.pid diff --git a/src/systemd/vyconfd.service b/src/systemd/vyconfd.service new file mode 100644 index 000000000..ab2280263 --- /dev/null +++ b/src/systemd/vyconfd.service @@ -0,0 +1,21 @@ +[Unit] +Description=VyOS vyconf daemon + +# Without this option, lots of default dependencies are added, +# among them network.target, which creates a dependency cycle +DefaultDependencies=no + +After=systemd-remount-fs.service + +[Service] +ExecStart=/usr/libexec/vyos/vyconf/vyconfd --log-file /var/run/log/vyconfd.log +Type=exec +SyslogIdentifier=vyconfd +SyslogFacility=daemon +Restart=on-failure + +User=root +Group=vyattacfg + +[Install] +WantedBy=vyos.target diff --git a/src/systemd/vyos-commitd.service b/src/systemd/vyos-commitd.service new file mode 100644 index 000000000..5b083f500 --- /dev/null +++ b/src/systemd/vyos-commitd.service @@ -0,0 +1,27 @@ +[Unit] +Description=VyOS commit daemon + +# Without this option, lots of default dependencies are added, +# among them network.target, which creates a dependency cycle +DefaultDependencies=no + +# Seemingly sensible way to say "as early as the system is ready" +# All vyos-configd needs is read/write mounted root +After=systemd-remount-fs.service +Before=vyos-router.service + +[Service] +ExecStart=/usr/bin/python3 -u /usr/libexec/vyos/services/vyos-commitd +Type=idle + +SyslogIdentifier=vyos-commitd +SyslogFacility=daemon + +Restart=on-failure + +# Does't work in Jessie but leave it here +User=root +Group=vyattacfg + +[Install] +WantedBy=vyos.target diff --git a/src/systemd/vyos-domain-resolver.service b/src/systemd/vyos-domain-resolver.service index c56b51f0c..87a4748f4 100644 --- a/src/systemd/vyos-domain-resolver.service +++ b/src/systemd/vyos-domain-resolver.service @@ -1,11 +1,14 @@ [Unit] Description=VyOS firewall domain resolver After=vyos-router.service +ConditionPathExistsGlob=/run/use-vyos-domain-resolver* [Service] Type=simple Restart=always -ExecStart=/usr/bin/python3 -u /usr/libexec/vyos/vyos-domain-resolver.py +ExecStart=/usr/bin/python3 -u /usr/libexec/vyos/services/vyos-domain-resolver +SyslogIdentifier=vyos-domain-resolver +SyslogFacility=daemon StandardError=journal StandardOutput=journal diff --git a/src/systemd/vyos-network-event-logger.service b/src/systemd/vyos-network-event-logger.service new file mode 100644 index 000000000..990dc43ba --- /dev/null +++ b/src/systemd/vyos-network-event-logger.service @@ -0,0 +1,21 @@ +[Unit] +Description=VyOS network-event logger daemon + +# Seemingly sensible way to say "as early as the system is ready" +# All vyos-configd needs is read/write mounted root +After=vyos.target + +[Service] +ExecStart=/usr/bin/python3 -u /usr/libexec/vyos/services/vyos-network-event-logger -c /run/vyos-network-event-logger.conf +Type=idle + +SyslogIdentifier=vyos-network-event-logger +SyslogFacility=daemon + +Restart=on-failure + +User=root +Group=vyattacfg + +[Install] +WantedBy=multi-user.target diff --git a/src/systemd/vyos-wan-load-balance.service b/src/systemd/vyos-wan-load-balance.service index 7d62a2ff6..a59f2c3ae 100644 --- a/src/systemd/vyos-wan-load-balance.service +++ b/src/systemd/vyos-wan-load-balance.service @@ -1,15 +1,11 @@ [Unit] -Description=VyOS WAN load-balancing service +Description=VyOS WAN Load Balancer After=vyos-router.service [Service] -ExecStart=/opt/vyatta/sbin/wan_lb -f /run/load-balance/wlb.conf -d -i /var/run/vyatta/wlb.pid -ExecReload=/bin/kill -s SIGTERM $MAINPID && sleep 5 && /opt/vyatta/sbin/wan_lb -f /run/load-balance/wlb.conf -d -i /var/run/vyatta/wlb.pid -ExecStop=/bin/kill -s SIGTERM $MAINPID -PIDFile=/var/run/vyatta/wlb.pid -KillMode=process -Restart=on-failure -RestartSec=5s +Type=simple +Restart=always +ExecStart=/usr/bin/python3 /usr/libexec/vyos/vyos-load-balancer.py [Install] WantedBy=multi-user.target diff --git a/src/systemd/vyos.target b/src/systemd/vyos.target index 47c91c1cc..ea1593fe9 100644 --- a/src/systemd/vyos.target +++ b/src/systemd/vyos.target @@ -1,3 +1,3 @@ [Unit] Description=VyOS target -After=multi-user.target +After=multi-user.target vyos-grub-update.service systemd-sysctl.service diff --git a/src/tests/test_config_diff.py b/src/tests/test_config_diff.py index 39e17613a..4017fff4d 100644 --- a/src/tests/test_config_diff.py +++ b/src/tests/test_config_diff.py @@ -31,11 +31,11 @@ class TestConfigDiff(TestCase): def test_unit(self): diff = vyos.configtree.DiffTree(self.config_left, self.config_null) sub = diff.sub - self.assertEqual(sub.to_string(), self.config_left.to_string()) + self.assertEqual(sub, self.config_left) diff = vyos.configtree.DiffTree(self.config_null, self.config_left) add = diff.add - self.assertEqual(add.to_string(), self.config_left.to_string()) + self.assertEqual(add, self.config_left) def test_symmetry(self): lr_diff = vyos.configtree.DiffTree(self.config_left, @@ -45,10 +45,10 @@ class TestConfigDiff(TestCase): sub = lr_diff.sub add = rl_diff.add - self.assertEqual(sub.to_string(), add.to_string()) + self.assertEqual(sub, add) add = lr_diff.add sub = rl_diff.sub - self.assertEqual(add.to_string(), sub.to_string()) + self.assertEqual(add, sub) def test_identity(self): lr_diff = vyos.configtree.DiffTree(self.config_left, @@ -61,6 +61,9 @@ class TestConfigDiff(TestCase): r_union = vyos.configtree.union(add, inter) l_union = vyos.configtree.union(sub, inter) + # here we must compare string representations instead of using + # dunder equal, as we assert equivalence of the values list, which + # is optionally ordered at render self.assertEqual(r_union.to_string(), self.config_right.to_string(ordered_values=True)) self.assertEqual(l_union.to_string(), diff --git a/src/tests/test_config_parser.py b/src/tests/test_config_parser.py index 9a4f02859..1b4a57311 100644 --- a/src/tests/test_config_parser.py +++ b/src/tests/test_config_parser.py @@ -51,3 +51,7 @@ class TestConfigParser(TestCase): def test_rename_duplicate(self): with self.assertRaises(vyos.configtree.ConfigTreeError): self.config.rename(["top-level-tag-node", "foo"], "bar") + + def test_leading_slashes(self): + self.assertTrue(self.config.exists(["normal-node", "value-with-leading-slashes"])) + self.assertEqual(self.config.return_value(["normal-node", "value-with-leading-slashes"]), "//other-value") diff --git a/src/tests/test_configd_inspect.py b/src/tests/test_configd_inspect.py index ccd631893..a0470221d 100644 --- a/src/tests/test_configd_inspect.py +++ b/src/tests/test_configd_inspect.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020-2024 VyOS maintainers and contributors +# Copyright (C) 2020-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -12,93 +12,151 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os -import re +import ast import json -import warnings -import importlib.util -from inspect import signature -from inspect import getsource -from functools import wraps from unittest import TestCase INC_FILE = 'data/configd-include.json' CONF_DIR = 'src/conf_mode' -f_list = ['get_config', 'verify', 'generate', 'apply'] - -def import_script(s): - path = os.path.join(CONF_DIR, s) - name = os.path.splitext(s)[0].replace('-', '_') - spec = importlib.util.spec_from_file_location(name, path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - -# importing conf_mode scripts imports jinja2 with deprecation warning -def ignore_deprecation_warning(f): - @wraps(f) - def decorated_function(*args, **kwargs): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - f(*args, **kwargs) - return decorated_function +funcs = ['get_config', 'verify', 'generate', 'apply'] + + +class FunctionSig(ast.NodeVisitor): + def __init__(self): + self.func_sig_len = dict.fromkeys(funcs, None) + self.get_config_default_values = [] + + def visit_FunctionDef(self, node): + func_name = node.name + if func_name in funcs: + self.func_sig_len[func_name] = len(node.args.args) + + if func_name == 'get_config': + for default in node.args.defaults: + if isinstance(default, ast.Constant): + self.get_config_default_values.append(default.value) + + self.generic_visit(node) + + def get_sig_lengths(self): + return self.func_sig_len + + def get_config_default(self): + return self.get_config_default_values[0] + + +class LegacyCall(ast.NodeVisitor): + def __init__(self): + self.legacy_func_count = 0 + + def visit_Constant(self, node): + value = node.value + if isinstance(value, str): + if 'my_set' in value or 'my_delete' in value: + self.legacy_func_count += 1 + + self.generic_visit(node) + + def get_legacy_func_count(self): + return self.legacy_func_count + + +class ConfigInstance(ast.NodeVisitor): + def __init__(self): + self.count = 0 + + def visit_Call(self, node): + if isinstance(node.func, ast.Name): + name = node.func.id + if name == 'Config': + self.count += 1 + self.generic_visit(node) + + def get_count(self): + return self.count + + +class FunctionConfigInstance(ast.NodeVisitor): + def __init__(self): + self.func_config_instance = dict.fromkeys(funcs, 0) + + def visit_FunctionDef(self, node): + func_name = node.name + if func_name in funcs: + config_instance = ConfigInstance() + config_instance.visit(node) + self.func_config_instance[func_name] = config_instance.get_count() + self.generic_visit(node) + + def get_func_config_instance(self): + return self.func_config_instance + class TestConfigdInspect(TestCase): def setUp(self): + self.ast_list = [] + with open(INC_FILE) as f: self.inc_list = json.load(f) - @ignore_deprecation_warning - def test_signatures(self): for s in self.inc_list: - m = import_script(s) - for i in f_list: - f = getattr(m, i, None) - self.assertIsNotNone(f, f"'{s}': missing function '{i}'") - sig = signature(f) - par = sig.parameters - l = len(par) - self.assertEqual(l, 1, - f"'{s}': '{i}' incorrect signature") - if i == 'get_config': - for p in par.values(): - self.assertTrue(p.default is None, - f"'{s}': '{i}' incorrect signature") - - @ignore_deprecation_warning - def test_function_instance(self): - for s in self.inc_list: - m = import_script(s) - for i in f_list: - f = getattr(m, i, None) - if not f: - continue - str_f = getsource(f) - # Regex not XXXConfig() T3108 - n = len(re.findall(r'[^a-zA-Z]Config\(\)', str_f)) - if i == 'get_config': - self.assertEqual(n, 1, - f"'{s}': '{i}' no instance of Config") - if i != 'get_config': - self.assertEqual(n, 0, - f"'{s}': '{i}' instance of Config") - - @ignore_deprecation_warning - def test_file_instance(self): - for s in self.inc_list: - m = import_script(s) - str_m = getsource(m) - # Regex not XXXConfig T3108 - n = len(re.findall(r'[^a-zA-Z]Config\(\)', str_m)) - self.assertEqual(n, 1, - f"'{s}' more than one instance of Config") - - @ignore_deprecation_warning + s_path = f'{CONF_DIR}/{s}' + with open(s_path) as f: + s_str = f.read() + s_tree = ast.parse(s_str) + self.ast_list.append((s, s_tree)) + + def test_signatures(self): + for s, t in self.ast_list: + visitor = FunctionSig() + visitor.visit(t) + sig_lens = visitor.get_sig_lengths() + + for f in funcs: + self.assertIsNotNone(sig_lens[f], f"'{s}': '{f}' missing") + self.assertEqual(sig_lens[f], 1, f"'{s}': '{f}' incorrect signature") + + self.assertEqual( + visitor.get_config_default(), + None, + f"'{s}': 'get_config' incorrect signature", + ) + + def test_file_config_instance(self): + for s, t in self.ast_list: + visitor = ConfigInstance() + visitor.visit(t) + count = visitor.get_count() + + self.assertEqual(count, 1, f"'{s}' more than one instance of Config") + + def test_function_config_instance(self): + for s, t in self.ast_list: + visitor = FunctionConfigInstance() + visitor.visit(t) + func_config_instance = visitor.get_func_config_instance() + + for f in funcs: + if f == 'get_config': + self.assertTrue( + func_config_instance[f] > 0, + f"'{s}': '{f}' no instance of Config", + ) + self.assertTrue( + func_config_instance[f] < 2, + f"'{s}': '{f}' more than one instance of Config", + ) + else: + self.assertEqual( + func_config_instance[f], 0, f"'{s}': '{f}' instance of Config" + ) + def test_config_modification(self): - for s in self.inc_list: - m = import_script(s) - str_m = getsource(m) - n = str_m.count('my_set') - self.assertEqual(n, 0, f"'{s}' modifies config") + for s, t in self.ast_list: + visitor = LegacyCall() + visitor.visit(t) + legacy_func_count = visitor.get_legacy_func_count() + + self.assertEqual(legacy_func_count, 0, f"'{s}' modifies config") diff --git a/src/tests/test_initial_setup.py b/src/tests/test_initial_setup.py index 4cd5fb169..7737f9df5 100644 --- a/src/tests/test_initial_setup.py +++ b/src/tests/test_initial_setup.py @@ -92,8 +92,8 @@ class TestInitialSetup(TestCase): vis.set_default_gateway(self.config, '192.0.2.1') self.assertTrue(self.config.exists(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop', '192.0.2.1'])) - self.assertTrue(self.xml.is_tag(['protocols', 'static', 'multicast', 'route', '0.0.0.0/0', 'next-hop'])) - self.assertTrue(self.xml.is_tag(['protocols', 'static', 'multicast', 'route'])) + self.assertTrue(self.xml.is_tag(['protocols', 'static', 'mroute', '0.0.0.0/0', 'next-hop'])) + self.assertTrue(self.xml.is_tag(['protocols', 'static', 'mroute'])) if __name__ == "__main__": unittest.main() diff --git a/src/tests/test_template.py b/src/tests/test_template.py index 6377f6da5..7cae867a0 100644 --- a/src/tests/test_template.py +++ b/src/tests/test_template.py @@ -190,3 +190,12 @@ class TestVyOSTemplate(TestCase): for group_name, group_config in data['ike_group'].items(): ciphers = vyos.template.get_esp_ike_cipher(group_config) self.assertIn(IKEv2_DEFAULT, ','.join(ciphers)) + + def test_get_default_port(self): + from vyos.defaults import internal_ports + + with self.assertRaises(RuntimeError): + vyos.template.get_default_port('UNKNOWN') + + self.assertEqual(vyos.template.get_default_port('certbot_haproxy'), + internal_ports['certbot_haproxy']) diff --git a/src/tests/test_utils_network.py b/src/tests/test_utils_network.py index d68dec16f..92fde447d 100644 --- a/src/tests/test_utils_network.py +++ b/src/tests/test_utils_network.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020-2024 VyOS maintainers and contributors +# Copyright (C) 2020-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -43,3 +43,12 @@ class TestVyOSUtilsNetwork(TestCase): self.assertFalse(vyos.utils.network.is_loopback_addr('::2')) self.assertFalse(vyos.utils.network.is_loopback_addr('192.0.2.1')) + + def test_check_port_availability(self): + self.assertTrue(vyos.utils.network.check_port_availability('::1', 8080)) + self.assertTrue(vyos.utils.network.check_port_availability('127.0.0.1', 8080)) + self.assertTrue(vyos.utils.network.check_port_availability(None, 8080, protocol='udp')) + # We do not have 192.0.2.1 configured on this system + self.assertFalse(vyos.utils.network.check_port_availability('192.0.2.1', 443)) + # We do not have 2001:db8::1 configured on this system + self.assertFalse(vyos.utils.network.check_port_availability('2001:db8::1', 80, protocol='udp')) diff --git a/src/utils/vyos-commands-to-config b/src/utils/vyos-commands-to-config new file mode 100755 index 000000000..927d9bd70 --- /dev/null +++ b/src/utils/vyos-commands-to-config @@ -0,0 +1,53 @@ +#! /usr/bin/python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import sys +import json + +from vyos.configtree import ConfigTree +from vyos.utils.config import parse_commands +from vyos.utils.config import set_tags + +def commands_to_config(cmds): + ct = ConfigTree('') + cmds = parse_commands(cmds) + + for c in cmds: + if c["op"] == "set": + if c["is_leaf"]: + replace = False if c["is_multi"] else True + ct.set(c["path"], value=c["value"], replace=replace) + set_tags(ct, c["path"]) + else: + ct.create_node(c["path"]) + set_tags(ct, c["path"]) + else: + raise ValueError( + f"\"{c['op']}\" is not a supported config operation") + + return ct + + +if __name__ == '__main__': + try: + cmds = sys.stdin.read() + ct = commands_to_config(cmds) + out = ConfigTree(ct.to_string()) + print(str(out)) + except Exception as e: + print(e) + sys.exit(1) diff --git a/src/utils/vyos-show-config b/src/utils/vyos-show-config new file mode 100755 index 000000000..152322fc1 --- /dev/null +++ b/src/utils/vyos-show-config @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import sys +import argparse + +from signal import signal, SIGPIPE, SIG_DFL + +def get_config(path): + from vyos.utils.process import rc_cmd + res, out = rc_cmd(f"cli-shell-api showCfg {path}") + if res > 0: + print("Error: failed to retrieve the config", file=sys.stderr) + sys.exit(1) + else: + return out + +def strip_config(config): + from vyos.utils.strip_config import strip_config_source + return strip_config_source(config) + +if __name__ == '__main__': + signal(SIGPIPE,SIG_DFL) + + parser = argparse.ArgumentParser() + parser.add_argument("--strip-private", + help="Strip private information from the config", + action="store_true") + + args, path_args = parser.parse_known_args() + + config = get_config(" ".join(path_args)) + + if args.strip_private: + edit_level = os.getenv("VYATTA_EDIT_LEVEL") + if (edit_level != "/") or (len(path_args) > 0): + print("Error: show --strip-private only works at the top level", + file=sys.stderr) + sys.exit(1) + else: + print(strip_config(config)) + else: + print(config) diff --git a/src/validators/base64 b/src/validators/base64 index e2b1e730d..a54168ef7 100755 --- a/src/validators/base64 +++ b/src/validators/base64 @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021 VyOS maintainers and contributors +# Copyright (C) 2021-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -15,13 +15,17 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import base64 -from sys import argv +import argparse -if __name__ == '__main__': - if len(argv) != 2: - exit(1) - try: - base64.b64decode(argv[1]) - except: +parser = argparse.ArgumentParser(description="Validate base64 input.") +parser.add_argument("base64", help="Base64 encoded string to validate") +parser.add_argument("--decoded-len", type=int, help="Optional list of valid lengths for the decoded input") +args = parser.parse_args() + +try: + decoded = base64.b64decode(args.base64) + if args.decoded_len and len(decoded) != args.decoded_len: exit(1) - exit(0) +except: + exit(1) +exit(0) diff --git a/src/validators/cpu b/src/validators/cpu new file mode 100755 index 000000000..959a49248 --- /dev/null +++ b/src/validators/cpu @@ -0,0 +1,43 @@ +#!/usr/bin/python3 + +import re +import sys + +MAX_CPU = 511 + + +def validate_isolcpus(value): + pattern = re.compile(r'^(\d{1,3}(-\d{1,3})?)(,(\d{1,3}(-\d{1,3})?))*$') + if not pattern.fullmatch(value): + return False + + flat_list = [] + for part in value.split(','): + if '-' in part: + start, end = map(int, part.split('-')) + if start > end or start < 0 or end > MAX_CPU: + return False + flat_list.extend(range(start, end + 1)) + else: + num = int(part) + if num < 0 or num > MAX_CPU: + return False + flat_list.append(num) + + for i in range(1, len(flat_list)): + if flat_list[i] <= flat_list[i - 1]: + return False + + return True + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python3 cpu.py <cpu_list>") + sys.exit(1) + + input_value = sys.argv[1] + if validate_isolcpus(input_value): + sys.exit(0) + else: + sys.exit(1) diff --git a/src/validators/ether-type b/src/validators/ether-type new file mode 100644 index 000000000..926db26d3 --- /dev/null +++ b/src/validators/ether-type @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import re +from sys import argv,exit + +if __name__ == '__main__': + if len(argv) != 2: + exit(1) + + input = argv[1] + try: + # ethertype can be in the range 1 - 65535 + if int(input) in range(1, 65536): + exit(0) + except ValueError: + pass + + pattern = "!?\\b(all|ip|ipv6|ipx|802.1Q|802_2|802_3|aarp|aoe|arp|atalk|dec|lat|localtalk|rarp|snap|x25)\\b" + if re.match(pattern, input): + exit(0) + + print(f'Error: {input} is not a valid ether type or protocol.') + exit(1) diff --git a/src/validators/ethernet-interface b/src/validators/ethernet-interface new file mode 100644 index 000000000..2bf92812e --- /dev/null +++ b/src/validators/ethernet-interface @@ -0,0 +1,13 @@ +#!/bin/sh + +if ! [[ "$1" =~ ^(lan|eth|eno|ens|enp|enx)[0-9]+$ ]]; then + echo "Error: $1 is not an ethernet interface" + exit 1 +fi + +if ! [ -d "/sys/class/net/$1" ]; then + echo "Error: $1 interface does not exist in the system" + exit 1 +fi + +exit 0 diff --git a/src/validators/interface-address b/src/validators/interface-address index 4c203956b..2a2583fc3 100755 --- a/src/validators/interface-address +++ b/src/validators/interface-address @@ -1,3 +1,3 @@ #!/bin/sh -ipaddrcheck --is-ipv4-host $1 || ipaddrcheck --is-ipv6-host $1 +ipaddrcheck --is-any-host "$1" diff --git a/src/validators/ip-address b/src/validators/ip-address index 11d6df09e..351f728a6 100755 --- a/src/validators/ip-address +++ b/src/validators/ip-address @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-any-single $1 +ipaddrcheck --is-any-single "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IP address" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ip-cidr b/src/validators/ip-cidr index 60d2ac295..8a01e7ad9 100755 --- a/src/validators/ip-cidr +++ b/src/validators/ip-cidr @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-any-cidr $1 +ipaddrcheck --is-any-cidr "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IP CIDR" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ip-host b/src/validators/ip-host index 77c578fa2..7c5ad2612 100755 --- a/src/validators/ip-host +++ b/src/validators/ip-host @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-any-host $1 +ipaddrcheck --is-any-host "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IP host" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ip-prefix b/src/validators/ip-prefix index e5a64fea8..25204ace5 100755 --- a/src/validators/ip-prefix +++ b/src/validators/ip-prefix @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-any-net $1 +ipaddrcheck --is-any-net "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IP prefix" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv4 b/src/validators/ipv4 index 8676d5800..11f854cf1 100755 --- a/src/validators/ipv4 +++ b/src/validators/ipv4 @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv4 $1 +ipaddrcheck --is-ipv4 "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not IPv4" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv4-address b/src/validators/ipv4-address index 058db088b..1cfd961ba 100755 --- a/src/validators/ipv4-address +++ b/src/validators/ipv4-address @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv4-single $1 +ipaddrcheck --is-ipv4-single "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv4 address" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv4-host b/src/validators/ipv4-host index 74b8c36a7..eb8faaa2a 100755 --- a/src/validators/ipv4-host +++ b/src/validators/ipv4-host @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv4-host $1 +ipaddrcheck --is-ipv4-host "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv4 host" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv4-multicast b/src/validators/ipv4-multicast index 3f28c51db..cf871bd59 100755 --- a/src/validators/ipv4-multicast +++ b/src/validators/ipv4-multicast @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv4-multicast $1 && ipaddrcheck --is-ipv4-single $1 +ipaddrcheck --is-ipv4-multicast "$1" && ipaddrcheck --is-ipv4-single "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv4 multicast address" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv4-prefix b/src/validators/ipv4-prefix index 7e1e0e8dd..f8d46c69c 100755 --- a/src/validators/ipv4-prefix +++ b/src/validators/ipv4-prefix @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv4-net $1 +ipaddrcheck --is-ipv4-net "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv4 prefix" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv4-range b/src/validators/ipv4-range index 6492bfc52..7bf271bbb 100755 --- a/src/validators/ipv4-range +++ b/src/validators/ipv4-range @@ -1,40 +1,10 @@ -#!/bin/bash +#!/bin/sh -# snippet from https://stackoverflow.com/questions/10768160/ip-address-converter -ip2dec () { - local a b c d ip=$@ - IFS=. read -r a b c d <<< "$ip" - printf '%d\n' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))" -} +ipaddrcheck --verbose --is-ipv4-range "$1" -error_exit() { - echo "Error: $1 is not a valid IPv4 address range" - exit 1 -} - -# Only run this if there is a hypen present in $1 -if [[ "$1" =~ "-" ]]; then - # This only works with real bash (<<<) - split IP addresses into array with - # hyphen as delimiter - readarray -d - -t strarr <<< $1 - - ipaddrcheck --is-ipv4-single ${strarr[0]} - if [ $? -gt 0 ]; then - error_exit $1 - fi - - ipaddrcheck --is-ipv4-single ${strarr[1]} - if [ $? -gt 0 ]; then - error_exit $1 - fi - - start=$(ip2dec ${strarr[0]}) - stop=$(ip2dec ${strarr[1]}) - if [ $start -ge $stop ]; then - error_exit $1 - fi - - exit 0 +if [ $? -gt 0 ]; then + echo "Error: $1 is not a valid IPv4 address range" + exit 1 fi -error_exit $1 +exit 0 diff --git a/src/validators/ipv6 b/src/validators/ipv6 index 4ae130eb5..57696add7 100755 --- a/src/validators/ipv6 +++ b/src/validators/ipv6 @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv6 $1 +ipaddrcheck --is-ipv6 "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not IPv6" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv6-address b/src/validators/ipv6-address index 1fca77668..460639090 100755 --- a/src/validators/ipv6-address +++ b/src/validators/ipv6-address @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv6-single $1 +ipaddrcheck --is-ipv6-single "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv6 address" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv6-host b/src/validators/ipv6-host index 7085809a9..1eb4d8e35 100755 --- a/src/validators/ipv6-host +++ b/src/validators/ipv6-host @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv6-host $1 +ipaddrcheck --is-ipv6-host "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv6 host" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv6-multicast b/src/validators/ipv6-multicast index 5aa7d734a..746ff7edf 100755 --- a/src/validators/ipv6-multicast +++ b/src/validators/ipv6-multicast @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv6-multicast $1 && ipaddrcheck --is-ipv6-single $1 +ipaddrcheck --is-ipv6-multicast "$1" && ipaddrcheck --is-ipv6-single "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv6 multicast address" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv6-prefix b/src/validators/ipv6-prefix index 890dda723..1bb9b42fe 100755 --- a/src/validators/ipv6-prefix +++ b/src/validators/ipv6-prefix @@ -1,10 +1,10 @@ #!/bin/sh -ipaddrcheck --is-ipv6-net $1 +ipaddrcheck --is-ipv6-net "$1" if [ $? -gt 0 ]; then echo "Error: $1 is not a valid IPv6 prefix" exit 1 fi -exit 0
\ No newline at end of file +exit 0 diff --git a/src/validators/ipv6-range b/src/validators/ipv6-range index 7080860c4..0d2eb6384 100755 --- a/src/validators/ipv6-range +++ b/src/validators/ipv6-range @@ -1,20 +1,10 @@ -#!/usr/bin/env python3 +#!/bin/sh -from ipaddress import IPv6Address -from sys import argv, exit +ipaddrcheck --verbose --is-ipv6-range "$1" -if __name__ == '__main__': - if len(argv) > 1: - # try to pass validation and raise an error if failed - try: - ipv6_range = argv[1] - range_left = ipv6_range.split('-')[0] - range_right = ipv6_range.split('-')[1] - if not IPv6Address(range_left) < IPv6Address(range_right): - raise ValueError(f'left element {range_left} must be less than right element {range_right}') - except Exception as err: - print(f'Error: {ipv6_range} is not a valid IPv6 range: {err}') - exit(1) - else: - print('Error: an IPv6 range argument must be provided') - exit(1) +if [ $? -gt 0 ]; then + echo "Error: $1 is not a valid IPv6 address range" + exit 1 +fi + +exit 0 |