diff options
Diffstat (limited to 'src')
-rwxr-xr-x | src/conf_mode/flow_accounting_conf.py | 34 | ||||
-rwxr-xr-x | src/conf_mode/http-api.py | 6 | ||||
-rwxr-xr-x | src/conf_mode/interfaces-bonding.py | 38 | ||||
-rwxr-xr-x | src/conf_mode/interfaces-ethernet.py | 237 | ||||
-rwxr-xr-x | src/conf_mode/interfaces-pppoe.py | 5 | ||||
-rwxr-xr-x | src/conf_mode/policy-local-route.py | 79 | ||||
-rwxr-xr-x | src/conf_mode/protocols_isis.py | 37 | ||||
-rw-r--r-- | src/etc/sysctl.d/30-vyos-router.conf | 3 | ||||
-rwxr-xr-x | src/helpers/config_dependency.py | 79 | ||||
-rwxr-xr-x | src/migration-scripts/interfaces/30-to-31 | 71 | ||||
-rwxr-xr-x | src/migration-scripts/openvpn/0-to-1 | 49 | ||||
-rwxr-xr-x | src/op_mode/generate_tech-support_archive.py | 148 | ||||
-rwxr-xr-x | src/op_mode/interfaces_wireless.py | 186 | ||||
-rw-r--r-- | src/op_mode/show-ssh-fingerprints.py | 49 | ||||
-rwxr-xr-x | src/op_mode/show_wireless.py | 149 | ||||
-rwxr-xr-x | src/services/vyos-http-api-server | 173 | ||||
-rwxr-xr-x | src/system/uacctd_stop.py | 68 |
17 files changed, 1159 insertions, 252 deletions
diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/flow_accounting_conf.py index 81ee39df1..206f513c8 100755 --- a/src/conf_mode/flow_accounting_conf.py +++ b/src/conf_mode/flow_accounting_conf.py @@ -28,6 +28,7 @@ from vyos.ifconfig import Section from vyos.template import render from vyos.utils.process import call from vyos.utils.process import cmd +from vyos.utils.process import run from vyos.utils.network import is_addr_assigned from vyos import ConfigError from vyos import airbag @@ -116,6 +117,30 @@ def _nftables_config(configured_ifaces, direction, length=None): cmd(command, raising=ConfigError) +def _nftables_trigger_setup(operation: str) -> None: + """Add a dummy rule to unlock the main pmacct loop with a packet-trigger + + Args: + operation (str): 'add' or 'delete' a trigger + """ + # check if a chain exists + table_exists = False + if run('nft -snj list table ip pmacct') == 0: + table_exists = True + + if operation == 'delete' and table_exists: + nft_cmd: str = 'nft delete table ip pmacct' + cmd(nft_cmd, raising=ConfigError) + if operation == 'add' and not table_exists: + nft_cmds: list[str] = [ + 'nft add table ip pmacct', + 'nft add chain ip pmacct pmacct_out { type filter hook output priority raw - 50 \\; policy accept \\; }', + 'nft add rule ip pmacct pmacct_out oif lo ip daddr 127.0.254.0 counter log group 2 snaplen 1 queue-threshold 0 comment NFLOG_TRIGGER' + ] + for nft_cmd in nft_cmds: + cmd(nft_cmd, raising=ConfigError) + + def get_config(config=None): if config: conf = config @@ -252,7 +277,6 @@ def generate(flow_config): call('systemctl daemon-reload') def apply(flow_config): - action = 'restart' # Check if flow-accounting was removed and define command if not flow_config: _nftables_config([], 'ingress') @@ -262,6 +286,10 @@ def apply(flow_config): call(f'systemctl stop {systemd_service}') if os.path.exists(uacctd_conf_path): os.unlink(uacctd_conf_path) + + # must be done after systemctl + _nftables_trigger_setup('delete') + return # Start/reload flow-accounting daemon @@ -277,6 +305,10 @@ def apply(flow_config): else: _nftables_config([], 'egress') + # add a trigger for signal processing + _nftables_trigger_setup('add') + + if __name__ == '__main__': try: config = get_config() diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py index 793a90d88..d8fe3b736 100755 --- a/src/conf_mode/http-api.py +++ b/src/conf_mode/http-api.py @@ -27,6 +27,7 @@ from vyos.config import Config from vyos.configdep import set_dependents, call_dependents from vyos.template import render from vyos.utils.process import call +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError from vyos import airbag airbag.enable() @@ -130,7 +131,10 @@ def apply(http_api): service_name = 'vyos-http-api.service' if http_api is not None: - call(f'systemctl restart {service_name}') + if is_systemd_service_running(f'{service_name}'): + call(f'systemctl reload {service_name}') + else: + call(f'systemctl restart {service_name}') else: call(f'systemctl stop {service_name}') diff --git a/src/conf_mode/interfaces-bonding.py b/src/conf_mode/interfaces-bonding.py index 0bd306ed0..1179e3e4f 100755 --- a/src/conf_mode/interfaces-bonding.py +++ b/src/conf_mode/interfaces-bonding.py @@ -18,7 +18,6 @@ import os from sys import exit from netifaces import interfaces - from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed @@ -34,10 +33,13 @@ from vyos.configverify import verify_source_interface from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf from vyos.ifconfig import BondIf +from vyos.ifconfig.ethernet import EthernetIf from vyos.ifconfig import Section from vyos.utils.dict import dict_search +from vyos.utils.dict import dict_to_paths_values from vyos.configdict import has_address_configured from vyos.configdict import has_vrf_configured +from vyos.configdep import set_dependents, call_dependents from vyos import ConfigError from vyos import airbag airbag.enable() @@ -90,7 +92,6 @@ def get_config(config=None): # determine which members have been removed interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface']) - # Reset config level to interfaces old_level = conf.get_level() conf.set_level(['interfaces']) @@ -102,6 +103,10 @@ def get_config(config=None): tmp = {} for interface in interfaces_removed: + # if member is deleted from bond, add dependencies to call + # ethernet commit again in apply function + # to apply options under ethernet section + set_dependents('ethernet', conf, interface) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): tmp[interface] = {'disable': ''} @@ -116,9 +121,21 @@ def get_config(config=None): if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): + + interface_ethernet_config = conf.get_config_dict( + ['interfaces', 'ethernet', interface], + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_defaults=False, + with_recursive_defaults=False) + + interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config) + # Check if member interface is a new member if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]): bond['shutdown_required'] = {} + interface_config['new_added'] = {} # Check if member interface is disabled conf.set_level(['interfaces']) @@ -151,7 +168,6 @@ def get_config(config=None): # bond members must not have a VRF attached tmp = has_vrf_configured(conf, interface) if tmp: interface_config['has_vrf'] = {} - return bond @@ -212,6 +228,14 @@ def verify(bond): if 'has_vrf' in interface_config: raise ConfigError(error_msg + 'it has a VRF assigned!') + if 'new_added' in interface_config and 'config_paths' in interface_config: + for option_path, option_value in interface_config['config_paths'].items(): + if option_path in EthernetIf.get_bond_member_allowed_options() : + continue + if option_path in BondIf.get_inherit_bond_options(): + continue + raise ConfigError(error_msg + f'it has a "{option_path.replace(".", " ")}" assigned!') + if 'primary' in bond: if bond['primary'] not in bond['member']['interface']: raise ConfigError(f'Primary interface of bond "{bond_name}" must be a member interface') @@ -227,13 +251,17 @@ def generate(bond): def apply(bond): b = BondIf(bond['ifname']) - if 'deleted' in bond: # delete interface b.remove() else: b.update(bond) - + if dict_search('member.interface_remove', bond): + try: + call_dependents() + except ConfigError: + raise ConfigError('Error in updating ethernet interface ' + 'after deleting it from bond') return None if __name__ == '__main__': diff --git a/src/conf_mode/interfaces-ethernet.py b/src/conf_mode/interfaces-ethernet.py index f3e65ad5e..7374a29f7 100755 --- a/src/conf_mode/interfaces-ethernet.py +++ b/src/conf_mode/interfaces-ethernet.py @@ -15,6 +15,7 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import os +import pprint from glob import glob from sys import exit @@ -35,6 +36,7 @@ from vyos.configverify import verify_vrf from vyos.configverify import verify_bond_bridge_member from vyos.ethtool import Ethtool from vyos.ifconfig import EthernetIf +from vyos.ifconfig import BondIf from vyos.pki import find_chain from vyos.pki import encode_certificate from vyos.pki import load_certificate @@ -42,6 +44,9 @@ from vyos.pki import wrap_private_key from vyos.template import render from vyos.utils.process import call from vyos.utils.dict import dict_search +from vyos.utils.dict import dict_to_paths_values +from vyos.utils.dict import dict_set +from vyos.utils.dict import dict_delete from vyos.utils.file import write_file from vyos import ConfigError from vyos import airbag @@ -51,6 +56,90 @@ airbag.enable() cfg_dir = '/run/wpa_supplicant' wpa_suppl_conf = '/run/wpa_supplicant/{ifname}.conf' +def update_bond_options(conf: Config, eth_conf: dict) -> list: + """ + Return list of blocked options if interface is a bond member + :param conf: Config object + :type conf: Config + :param eth_conf: Ethernet config dictionary + :type eth_conf: dict + :return: List of blocked options + :rtype: list + """ + blocked_list = [] + bond_name = list(eth_conf['is_bond_member'].keys())[0] + config_without_defaults = conf.get_config_dict( + ['interfaces', 'ethernet', eth_conf['ifname']], + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_defaults=False, + with_recursive_defaults=False) + config_with_defaults = conf.get_config_dict( + ['interfaces', 'ethernet', eth_conf['ifname']], + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_defaults=True, + with_recursive_defaults=True) + bond_config_with_defaults = conf.get_config_dict( + ['interfaces', 'bonding', bond_name], + key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_defaults=True, + with_recursive_defaults=True) + eth_dict_paths = dict_to_paths_values(config_without_defaults) + eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']] + + #if option is configured under ethernet section + for option_path, option_value in eth_dict_paths.items(): + bond_option_value = dict_search(option_path, bond_config_with_defaults) + + #If option is allowed for changing then continue + if option_path in EthernetIf.get_bond_member_allowed_options(): + continue + # if option is inherited from bond then set valued from bond interface + if option_path in BondIf.get_inherit_bond_options(): + # If option equals to bond option then do nothing + if option_value == bond_option_value: + continue + else: + # if ethernet has option and bond interface has + # then copy it from bond + if bond_option_value is not None: + if is_node_changed(conf, eth_path_base + option_path.split('.')): + Warning( + f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \ + f' Interface "{eth_conf["ifname"]}" is a bond member.' \ + f' Option is inherited from bond "{bond_name}"') + dict_set(option_path, bond_option_value, eth_conf) + continue + # if ethernet has option and bond interface does not have + # then delete it form dict and do not apply it + else: + if is_node_changed(conf, eth_path_base + option_path.split('.')): + Warning( + f'Cannot apply "{option_path.replace(".", " ")}".' \ + f' Interface "{eth_conf["ifname"]}" is a bond member.' \ + f' Option is inherited from bond "{bond_name}"') + dict_delete(option_path, eth_conf) + blocked_list.append(option_path) + + # if inherited option is not configured under ethernet section but configured under bond section + for option_path in BondIf.get_inherit_bond_options(): + bond_option_value = dict_search(option_path, bond_config_with_defaults) + if bond_option_value is not None: + if option_path not in eth_dict_paths: + if is_node_changed(conf, eth_path_base + option_path.split('.')): + Warning( + f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \ + f' Interface "{eth_conf["ifname"]}" is a bond member. ' \ + f'Option is inherited from bond "{bond_name}"') + dict_set(option_path, bond_option_value, eth_conf) + eth_conf['bond_blocked_changes'] = blocked_list + return None + def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the @@ -68,6 +157,8 @@ def get_config(config=None): base = ['interfaces', 'ethernet'] ifname, ethernet = get_interface_dict(conf, base) + if 'is_bond_member' in ethernet: + update_bond_options(conf, ethernet) if 'deleted' not in ethernet: if pki: ethernet['pki'] = pki @@ -80,26 +171,20 @@ def get_config(config=None): return ethernet -def verify(ethernet): - if 'deleted' in ethernet: - return None - ifname = ethernet['ifname'] - verify_interface_exists(ifname) - verify_mtu(ethernet) - verify_mtu_ipv6(ethernet) - verify_dhcpv6(ethernet) - verify_address(ethernet) - verify_vrf(ethernet) - verify_bond_bridge_member(ethernet) - verify_eapol(ethernet) - verify_mirror_redirect(ethernet) - ethtool = Ethtool(ifname) - # No need to check speed and duplex keys as both have default values. +def verify_speed_duplex(ethernet: dict, ethtool: Ethtool): + """ + Verify speed and duplex + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + :param ethtool: Ethernet object + :type ethtool: Ethtool + """ if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or - (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')): - raise ConfigError('Speed/Duplex missmatch. Must be both auto or manually configured') + (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')): + raise ConfigError( + 'Speed/Duplex missmatch. Must be both auto or manually configured') if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto': # We need to verify if the requested speed and duplex setting is @@ -107,37 +192,66 @@ def verify(ethernet): speed = ethernet['speed'] duplex = ethernet['duplex'] if not ethtool.check_speed_duplex(speed, duplex): - raise ConfigError(f'Adapter does not support changing speed and duplex '\ - f'settings to: {speed}/{duplex}!') + raise ConfigError( + f'Adapter does not support changing speed ' \ + f'and duplex settings to: {speed}/{duplex}!') + +def verify_flow_control(ethernet: dict, ethtool: Ethtool): + """ + Verify flow control + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + :param ethtool: Ethernet object + :type ethtool: Ethtool + """ if 'disable_flow_control' in ethernet: if not ethtool.check_flow_control(): - raise ConfigError('Adapter does not support changing flow-control settings!') + raise ConfigError( + 'Adapter does not support changing flow-control settings!') + +def verify_ring_buffer(ethernet: dict, ethtool: Ethtool): + """ + Verify ring buffer + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + :param ethtool: Ethernet object + :type ethtool: Ethtool + """ if 'ring_buffer' in ethernet: max_rx = ethtool.get_ring_buffer_max('rx') if not max_rx: - raise ConfigError('Driver does not support RX ring-buffer configuration!') + raise ConfigError( + 'Driver does not support RX ring-buffer configuration!') max_tx = ethtool.get_ring_buffer_max('tx') if not max_tx: - raise ConfigError('Driver does not support TX ring-buffer configuration!') + raise ConfigError( + 'Driver does not support TX ring-buffer configuration!') rx = dict_search('ring_buffer.rx', ethernet) if rx and int(rx) > int(max_rx): - raise ConfigError(f'Driver only supports a maximum RX ring-buffer '\ + raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \ f'size of "{max_rx}" bytes!') tx = dict_search('ring_buffer.tx', ethernet) if tx and int(tx) > int(max_tx): - raise ConfigError(f'Driver only supports a maximum TX ring-buffer '\ + raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \ f'size of "{max_tx}" bytes!') - # verify offloading capabilities + +def verify_offload(ethernet: dict, ethtool: Ethtool): + """ + Verify offloading capabilities + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + :param ethtool: Ethernet object + :type ethtool: Ethtool + """ if dict_search('offload.rps', ethernet) != None: - if not os.path.exists(f'/sys/class/net/{ifname}/queues/rx-0/rps_cpus'): + if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'): raise ConfigError('Interface does not suport RPS!') - driver = ethtool.get_driver_name() # T3342 - Xen driver requires special treatment if driver == 'vif': @@ -145,14 +259,73 @@ def verify(ethernet): raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\ 'for MTU size larger then 1500 bytes') - if {'is_bond_member', 'mac'} <= set(ethernet): - Warning(f'changing mac address "{mac}" will be ignored as "{ifname}" ' \ - f'is a member of bond "{is_bond_member}"'.format(**ethernet)) +def verify_allowedbond_changes(ethernet: dict): + """ + Verify changed options if interface is in bonding + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + """ + if 'bond_blocked_changes' in ethernet: + for option in ethernet['bond_blocked_changes']: + raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \ + f' on interface "{ethernet["ifname"]}".' \ + f' Interface is a bond member') + + +def verify(ethernet): + if 'deleted' in ethernet: + return None + if 'is_bond_member' in ethernet: + verify_bond_member(ethernet) + else: + verify_ethernet(ethernet) + + +def verify_bond_member(ethernet): + """ + Verification function for ethernet interface which is in bonding + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + """ + ifname = ethernet['ifname'] + verify_interface_exists(ifname) + verify_eapol(ethernet) + verify_mirror_redirect(ethernet) + ethtool = Ethtool(ifname) + verify_speed_duplex(ethernet, ethtool) + verify_flow_control(ethernet, ethtool) + verify_ring_buffer(ethernet, ethtool) + verify_offload(ethernet, ethtool) + verify_allowedbond_changes(ethernet) + +def verify_ethernet(ethernet): + """ + Verification function for simple ethernet interface + :param ethernet: dictionary which is received from get_interface_dict + :type ethernet: dict + """ + ifname = ethernet['ifname'] + verify_interface_exists(ifname) + verify_mtu(ethernet) + verify_mtu_ipv6(ethernet) + verify_dhcpv6(ethernet) + verify_address(ethernet) + verify_vrf(ethernet) + verify_bond_bridge_member(ethernet) + verify_eapol(ethernet) + verify_mirror_redirect(ethernet) + ethtool = Ethtool(ifname) + # No need to check speed and duplex keys as both have default values. + verify_speed_duplex(ethernet, ethtool) + verify_flow_control(ethernet, ethtool) + verify_ring_buffer(ethernet, ethtool) + verify_offload(ethernet, ethtool) # use common function to verify VLAN configuration verify_vlan_config(ethernet) return None + def generate(ethernet): # render real configuration file once wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet) @@ -192,7 +365,8 @@ def generate(ethernet): pki_ca_cert = ethernet['pki']['ca'][ca_cert_name] loaded_ca_cert = load_certificate(pki_ca_cert['certificate']) ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs) - ca_chains.append('\n'.join(encode_certificate(c) for c in ca_full_chain)) + ca_chains.append( + '\n'.join(encode_certificate(c) for c in ca_full_chain)) write_file(ca_cert_file_path, '\n'.join(ca_chains)) @@ -219,6 +393,7 @@ if __name__ == '__main__': c = get_config() verify(c) generate(c) + apply(c) except ConfigError as e: print(e) diff --git a/src/conf_mode/interfaces-pppoe.py b/src/conf_mode/interfaces-pppoe.py index fca91253c..0a03a172c 100755 --- a/src/conf_mode/interfaces-pppoe.py +++ b/src/conf_mode/interfaces-pppoe.py @@ -77,6 +77,11 @@ def verify(pppoe): if {'connect_on_demand', 'vrf'} <= set(pppoe): raise ConfigError('On-demand dialing and VRF can not be used at the same time') + # both MTU and MRU have default values, thus we do not need to check + # if the key exists + if int(pppoe['mru']) > int(pppoe['mtu']): + raise ConfigError('PPPoE MRU needs to be lower then MTU!') + return None def generate(pppoe): diff --git a/src/conf_mode/policy-local-route.py b/src/conf_mode/policy-local-route.py index 2e8aabb80..91e4fce2c 100755 --- a/src/conf_mode/policy-local-route.py +++ b/src/conf_mode/policy-local-route.py @@ -52,19 +52,28 @@ def get_config(config=None): if tmp: for rule in (tmp or []): src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address']) + src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port']) fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark']) iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface']) dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address']) + dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port']) + table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) proto = leaf_node_changed(conf, base_rule + [rule, 'protocol']) rule_def = {} if src: rule_def = dict_merge({'source': {'address': src}}, rule_def) + if src_port: + rule_def = dict_merge({'source': {'port': src_port}}, rule_def) if fwmk: rule_def = dict_merge({'fwmark' : fwmk}, rule_def) if iif: rule_def = dict_merge({'inbound_interface' : iif}, rule_def) if dst: rule_def = dict_merge({'destination': {'address': dst}}, rule_def) + if dst_port: + rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def) + if table: + rule_def = dict_merge({'table' : table}, rule_def) if proto: rule_def = dict_merge({'protocol' : proto}, rule_def) dict = dict_merge({dict_id : {rule : rule_def}}, dict) @@ -79,9 +88,12 @@ def get_config(config=None): if 'rule' in pbr[route]: for rule, rule_config in pbr[route]['rule'].items(): src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address']) + src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port']) fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark']) iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface']) dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address']) + dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port']) + table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table']) proto = leaf_node_changed(conf, base_rule + [rule, 'protocol']) # keep track of changes in configuration # otherwise we might remove an existing node although nothing else has changed @@ -105,14 +117,32 @@ def get_config(config=None): if len(src) > 0: rule_def = dict_merge({'source': {'address': src}}, rule_def) + # source port + if src_port is None: + if 'source' in rule_config: + if 'port' in rule_config['source']: + tmp = rule_config['source']['port'] + if isinstance(tmp, str): + tmp = [tmp] + rule_def = dict_merge({'source': {'port': tmp}}, rule_def) + else: + changed = True + if len(src_port) > 0: + rule_def = dict_merge({'source': {'port': src_port}}, rule_def) + + # fwmark if fwmk is None: if 'fwmark' in rule_config: - rule_def = dict_merge({'fwmark': rule_config['fwmark']}, rule_def) + tmp = rule_config['fwmark'] + if isinstance(tmp, str): + tmp = [tmp] + rule_def = dict_merge({'fwmark': tmp}, rule_def) else: changed = True if len(fwmk) > 0: rule_def = dict_merge({'fwmark' : fwmk}, rule_def) + # inbound-interface if iif is None: if 'inbound_interface' in rule_config: rule_def = dict_merge({'inbound_interface': rule_config['inbound_interface']}, rule_def) @@ -121,6 +151,7 @@ def get_config(config=None): if len(iif) > 0: rule_def = dict_merge({'inbound_interface' : iif}, rule_def) + # destination address if dst is None: if 'destination' in rule_config: if 'address' in rule_config['destination']: @@ -130,9 +161,35 @@ def get_config(config=None): if len(dst) > 0: rule_def = dict_merge({'destination': {'address': dst}}, rule_def) + # destination port + if dst_port is None: + if 'destination' in rule_config: + if 'port' in rule_config['destination']: + tmp = rule_config['destination']['port'] + if isinstance(tmp, str): + tmp = [tmp] + rule_def = dict_merge({'destination': {'port': tmp}}, rule_def) + else: + changed = True + if len(dst_port) > 0: + rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def) + + # table + if table is None: + if 'set' in rule_config and 'table' in rule_config['set']: + rule_def = dict_merge({'table': [rule_config['set']['table']]}, rule_def) + else: + changed = True + if len(table) > 0: + rule_def = dict_merge({'table' : table}, rule_def) + + # protocol if proto is None: if 'protocol' in rule_config: - rule_def = dict_merge({'protocol': rule_config['protocol']}, rule_def) + tmp = rule_config['protocol'] + if isinstance(tmp, str): + tmp = [tmp] + rule_def = dict_merge({'protocol': tmp}, rule_def) else: changed = True if len(proto) > 0: @@ -192,19 +249,27 @@ def apply(pbr): for rule, rule_config in pbr[rule_rm].items(): source = rule_config.get('source', {}).get('address', ['']) + source_port = rule_config.get('source', {}).get('port', ['']) destination = rule_config.get('destination', {}).get('address', ['']) + destination_port = rule_config.get('destination', {}).get('port', ['']) fwmark = rule_config.get('fwmark', ['']) inbound_interface = rule_config.get('inbound_interface', ['']) protocol = rule_config.get('protocol', ['']) + table = rule_config.get('table', ['']) - for src, dst, fwmk, iif, proto in product(source, destination, fwmark, inbound_interface, protocol): + for src, dst, src_port, dst_port, fwmk, iif, proto, table in product( + source, destination, source_port, destination_port, + fwmark, inbound_interface, protocol, table): f_src = '' if src == '' else f' from {src} ' + f_src_port = '' if src_port == '' else f' sport {src_port} ' f_dst = '' if dst == '' else f' to {dst} ' + f_dst_port = '' if dst_port == '' else f' dport {dst_port} ' f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} ' f_iif = '' if iif == '' else f' iif {iif} ' f_proto = '' if proto == '' else f' ipproto {proto} ' + f_table = '' if table == '' else f' lookup {table} ' - call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_fwmk}{f_iif}') + call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif}{f_table}') # Generate new config for route in ['local_route', 'local_route6']: @@ -218,7 +283,9 @@ def apply(pbr): for rule, rule_config in pbr_route['rule'].items(): table = rule_config['set'].get('table', '') source = rule_config.get('source', {}).get('address', ['all']) + source_port = rule_config.get('source', {}).get('port', '') destination = rule_config.get('destination', {}).get('address', ['all']) + destination_port = rule_config.get('destination', {}).get('port', '') fwmark = rule_config.get('fwmark', '') inbound_interface = rule_config.get('inbound_interface', '') protocol = rule_config.get('protocol', '') @@ -227,11 +294,13 @@ def apply(pbr): f_src = f' from {src} ' if src else '' for dst in destination: f_dst = f' to {dst} ' if dst else '' + f_src_port = f' sport {source_port} ' if source_port else '' + f_dst_port = f' dport {destination_port} ' if destination_port else '' f_fwmk = f' fwmark {fwmark} ' if fwmark else '' f_iif = f' iif {inbound_interface} ' if inbound_interface else '' f_proto = f' ipproto {protocol} ' if protocol else '' - call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_fwmk}{f_iif} lookup {table}') + call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table}') return None diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index e00c58ee4..ce67ccff7 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -48,7 +48,8 @@ def get_config(config=None): # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path isis = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) + get_first_key=True, + no_tag_node_value_mangle=True) # Assign the name of our VRF context. This MUST be done before the return # statement below, else on deletion we will delete the default instance @@ -219,6 +220,38 @@ def verify(isis): if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') + + # Check for LFA tiebreaker index duplication + if dict_search('fast_reroute.lfa.local.tiebreaker', isis): + comparison_dictionary = {} + for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items(): + for index, index_options in item_options.items(): + for index_value, index_value_options in index_options.items(): + if index_value not in comparison_dictionary.keys(): + comparison_dictionary[index_value] = [item] + else: + comparison_dictionary[index_value].append(item) + for index, index_length in comparison_dictionary.items(): + if int(len(index_length)) > 1: + raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.') + + # Check for LFA priority-limit configured multiple times per level + if dict_search('fast_reroute.lfa.local.priority_limit', isis): + comparison_dictionary = {} + for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items(): + for level, level_options in priority_options.items(): + if level not in comparison_dictionary.keys(): + comparison_dictionary[level] = [priority] + else: + comparison_dictionary[level].append(priority) + for level, level_length in comparison_dictionary.items(): + if int(len(level_length)) > 1: + raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.') + + # Check for LFA remote prefix list configured with more than one list + if dict_search('fast_reroute.lfa.remote.prefix_list', isis): + if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1: + raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.') return None @@ -265,4 +298,4 @@ if __name__ == '__main__': apply(c) except ConfigError as e: print(e) - exit(1) + exit(1)
\ No newline at end of file diff --git a/src/etc/sysctl.d/30-vyos-router.conf b/src/etc/sysctl.d/30-vyos-router.conf index fcdc1b21d..1c9b8999f 100644 --- a/src/etc/sysctl.d/30-vyos-router.conf +++ b/src/etc/sysctl.d/30-vyos-router.conf @@ -21,7 +21,6 @@ net.ipv4.conf.all.arp_filter=0 # https://vyos.dev/T300 net.ipv4.conf.all.arp_ignore=0 - net.ipv4.conf.all.arp_announce=2 # Enable packet forwarding for IPv4 @@ -103,6 +102,6 @@ net.ipv4.igmp_max_memberships = 512 net.core.rps_sock_flow_entries = 32768 # Congestion control -net.core.default_qdisc=fq +net.core.default_qdisc=fq_codel net.ipv4.tcp_congestion_control=bbr diff --git a/src/helpers/config_dependency.py b/src/helpers/config_dependency.py index 50c72956e..817bcc65a 100755 --- a/src/helpers/config_dependency.py +++ b/src/helpers/config_dependency.py @@ -18,22 +18,75 @@ import os import sys +import json from argparse import ArgumentParser from argparse import ArgumentTypeError - -try: - from vyos.configdep import check_dependency_graph - from vyos.defaults import directories -except ImportError: - # allow running during addon package build - _here = os.path.dirname(__file__) - sys.path.append(os.path.join(_here, '../../python/vyos')) - from configdep import check_dependency_graph - from defaults import directories +from graphlib import TopologicalSorter, CycleError # addon packages will need to specify the dependency directory -dependency_dir = os.path.join(directories['data'], - 'config-mode-dependencies') +data_dir = '/usr/share/vyos/' +dependency_dir = os.path.join(data_dir, 'config-mode-dependencies') + +def dict_merge(source, destination): + from copy import deepcopy + tmp = deepcopy(destination) + + for key, value in source.items(): + if key not in tmp: + tmp[key] = value + elif isinstance(source[key], dict): + tmp[key] = dict_merge(source[key], tmp[key]) + + return tmp + +def read_dependency_dict(dependency_dir: str = dependency_dir) -> dict: + res = {} + for dep_file in os.listdir(dependency_dir): + if not dep_file.endswith('.json'): + continue + path = os.path.join(dependency_dir, dep_file) + with open(path) as f: + d = json.load(f) + if dep_file == 'vyos-1x.json': + res = dict_merge(res, d) + else: + res = dict_merge(d, res) + + return res + +def graph_from_dependency_dict(d: dict) -> dict: + g = {} + for k in list(d): + g[k] = set() + # add the dependencies for every sub-case; should there be cases + # that are mutally exclusive in the future, the graphs will be + # distinguished + for el in list(d[k]): + g[k] |= set(d[k][el]) + + return g + +def is_acyclic(d: dict) -> bool: + g = graph_from_dependency_dict(d) + ts = TopologicalSorter(g) + try: + # get node iterator + order = ts.static_order() + # try iteration + _ = [*order] + except CycleError: + return False + + return True + +def check_dependency_graph(dependency_dir: str = dependency_dir, + supplement: str = None) -> bool: + d = read_dependency_dict(dependency_dir=dependency_dir) + if supplement is not None: + with open(supplement) as f: + d = dict_merge(json.load(f), d) + + return is_acyclic(d) def path_exists(s): if not os.path.exists(s): @@ -50,8 +103,10 @@ def main(): args = vars(parser.parse_args()) if not check_dependency_graph(**args): + print("dependency error: cycle exists") sys.exit(1) + print("dependency graph acyclic") sys.exit(0) if __name__ == '__main__': diff --git a/src/migration-scripts/interfaces/30-to-31 b/src/migration-scripts/interfaces/30-to-31 new file mode 100755 index 000000000..894106ef4 --- /dev/null +++ b/src/migration-scripts/interfaces/30-to-31 @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2021-2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# Deletes Wireguard peers if they have the same public key as the router has. + +import json +from sys import argv +from sys import exit +from vyos.configtree import ConfigTree +from vyos.ifconfig import EthernetIf +from vyos.ifconfig import BondIf +from vyos.utils.dict import dict_to_paths_values + +if len(argv) < 2: + print("Must specify file name!") + exit(1) + +file_name = argv[1] +with open(file_name, 'r') as f: + config_file = f.read() + base = ['interfaces', 'bonding'] + +config = ConfigTree(config_file) +if not config.exists(base): + # Nothing to do + exit(0) +for bond in config.list_nodes(base): + member_base = base + [bond, 'member', 'interface'] + if config.exists(member_base): + for interface in config.return_values(member_base): + if_base = ['interfaces', 'ethernet', interface] + if config.exists(if_base): + config_ethernet = json.loads(config.get_subtree(if_base).to_json()) + eth_dict_paths = dict_to_paths_values(config_ethernet) + for option_path, option_value in eth_dict_paths.items(): + # If option is allowed for changing then continue + converted_path = option_path.replace('-','_') + if converted_path in EthernetIf.get_bond_member_allowed_options(): + continue + # if option is inherited from bond then continue + if converted_path in BondIf.get_inherit_bond_options(): + continue + option_path_list = option_path.split('.') + config.delete(if_base + option_path_list) + del option_path_list[-1] + # delete empty node from config + while len(option_path_list) > 0: + if config.list_nodes(if_base + option_path_list): + break + config.delete(if_base + option_path_list) + del option_path_list[-1] + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print(f'Failed to save the modified config: {e}') + exit(1) diff --git a/src/migration-scripts/openvpn/0-to-1 b/src/migration-scripts/openvpn/0-to-1 new file mode 100755 index 000000000..24bb38d3c --- /dev/null +++ b/src/migration-scripts/openvpn/0-to-1 @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +# Removes outdated ciphers (DES and Blowfish) from OpenVPN configs + +import sys + +from vyos.configtree import ConfigTree + +if len(sys.argv) < 2: + print("Must specify file name!") + sys.exit(1) + +file_name = sys.argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +config = ConfigTree(config_file) + +if not config.exists(['interfaces', 'openvpn']): + # Nothing to do + sys.exit(0) +else: + ovpn_intfs = config.list_nodes(['interfaces', 'openvpn']) + for i in ovpn_intfs: + # Remove DES and Blowfish from 'encryption cipher' + cipher_path = ['interfaces', 'openvpn', i, 'encryption', 'cipher'] + if config.exists(cipher_path): + cipher = config.return_value(cipher_path) + if cipher in ['des', 'bf128', 'bf256']: + config.delete(cipher_path) + + ncp_cipher_path = ['interfaces', 'openvpn', i, 'encryption', 'ncp-ciphers'] + if config.exists(ncp_cipher_path): + ncp_ciphers = config.return_values(['interfaces', 'openvpn', i, 'encryption', 'ncp-ciphers']) + if 'des' in ncp_ciphers: + config.delete_value(['interfaces', 'openvpn', i, 'encryption', 'ncp-ciphers'], 'des') + + # Clean up the encryption subtree if the migration procedure left it empty + if config.exists(['interfaces', 'openvpn', i, 'encryption']) and \ + (config.list_nodes(['interfaces', 'openvpn', i, 'encryption']) == []): + config.delete(['interfaces', 'openvpn', i, 'encryption']) + + try: + with open(file_name, 'w') as f: + f.write(config.to_string()) + except OSError as e: + print("Failed to save the modified config: {}".format(e)) + sys.exit(1) diff --git a/src/op_mode/generate_tech-support_archive.py b/src/op_mode/generate_tech-support_archive.py new file mode 100755 index 000000000..23d81f986 --- /dev/null +++ b/src/op_mode/generate_tech-support_archive.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +import os +import argparse +import glob +from datetime import datetime +from pathlib import Path +from shutil import rmtree + +from socket import gethostname +from sys import exit +from tarfile import open as tar_open +from vyos.utils.process import rc_cmd +from vyos.remote import upload + +def op(cmd: str) -> str: + """Returns a command with the VyOS operational mode wrapper.""" + return f'/opt/vyatta/bin/vyatta-op-cmd-wrapper {cmd}' + +def save_stdout(command: str, file: Path) -> None: + rc, stdout = rc_cmd(command) + body: str = f'''### {command} ### +Command: {command} +Exit code: {rc} +Stdout: +{stdout} + +''' + with file.open(mode='a') as f: + f.write(body) +def __rotate_logs(path: str, log_pattern:str): + files_list = glob.glob(f'{path}/{log_pattern}') + if len(files_list) > 5: + oldest_file = min(files_list, key=os.path.getctime) + os.remove(oldest_file) + + +def __generate_archived_files(location_path: str) -> None: + """ + Generate arhives of main directories + :param location_path: path to temporary directory + :type location_path: str + """ + # Dictionary arhive_name:directory_to_arhive + archive_dict = { + 'etc': '/etc', + 'home': '/home', + 'var-log': '/var/log', + 'root': '/root', + 'tmp': '/tmp', + 'core-dump': '/var/core', + 'config': '/opt/vyatta/etc/config' + } + # Dictionary arhive_name:excluding pattern + archive_excludes = { + # Old location of archives + 'config': 'tech-support-archive', + # New locations of arhives + 'tmp': 'tech-support-archive' + } + for archive_name, path in archive_dict.items(): + archive_file: str = f'{location_path}/{archive_name}.tar.gz' + with tar_open(name=archive_file, mode='x:gz') as tar_file: + if archive_name in archive_excludes: + tar_file.add(path, filter=lambda x: None if str(archive_excludes[archive_name]) in str(x.name) else x) + else: + tar_file.add(path) + + +def __generate_main_archive_file(archive_file: str, tmp_dir_path: str) -> None: + """ + Generate main arhive file + :param archive_file: name of arhive file + :type archive_file: str + :param tmp_dir_path: path to arhive memeber + :type tmp_dir_path: str + """ + with tar_open(name=archive_file, mode='x:gz') as tar_file: + tar_file.add(tmp_dir_path, arcname=os.path.basename(tmp_dir_path)) + + +if __name__ == '__main__': + defualt_tmp_dir = '/tmp' + parser = argparse.ArgumentParser() + parser.add_argument("path", nargs='?', default=defualt_tmp_dir) + args = parser.parse_args() + location_path = args.path[:-1] if args.path[-1] == '/' else args.path + + hostname: str = gethostname() + time_now: str = datetime.now().isoformat(timespec='seconds') + + remote = False + tmp_path = '' + tmp_dir_path = '' + if 'ftp://' in args.path or 'scp://' in args.path: + remote = True + tmp_path = defualt_tmp_dir + else: + tmp_path = location_path + archive_pattern = f'_tech-support-archive_' + archive_file_name = f'{hostname}{archive_pattern}{time_now}.tar.gz' + + # Log rotation in tmp directory + if tmp_path == defualt_tmp_dir: + __rotate_logs(tmp_path, f'*{archive_pattern}*') + + # Temporary directory creation + tmp_dir_path = f'{tmp_path}/drops-debug_{time_now}' + tmp_dir: Path = Path(tmp_dir_path) + tmp_dir.mkdir() + + report_file: Path = Path(f'{tmp_dir_path}/show_tech-support_report.txt') + report_file.touch() + try: + + save_stdout(op('show tech-support report'), report_file) + # Generate included archives + __generate_archived_files(tmp_dir_path) + + # Generate main archive + __generate_main_archive_file(f'{tmp_path}/{archive_file_name}', tmp_dir_path) + # Delete temporary directory + rmtree(tmp_dir) + # Upload to remote site if it is scpecified + if remote: + upload(f'{tmp_path}/{archive_file_name}', args.path) + print(f'Debug file is generated and located in {location_path}/{archive_file_name}') + except Exception as err: + print(f'Error during generating a debug file: {err}') + # cleanup + if tmp_dir.exists(): + rmtree(tmp_dir) + finally: + # cleanup + exit()
\ No newline at end of file diff --git a/src/op_mode/interfaces_wireless.py b/src/op_mode/interfaces_wireless.py new file mode 100755 index 000000000..dfe50e2cb --- /dev/null +++ b/src/op_mode/interfaces_wireless.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import re +import sys +import typing +import vyos.opmode + +from copy import deepcopy +from tabulate import tabulate +from vyos.utils.process import popen +from vyos.configquery import ConfigTreeQuery + +def _verify(func): + """Decorator checks if Wireless LAN config exists""" + from functools import wraps + + @wraps(func) + def _wrapper(*args, **kwargs): + config = ConfigTreeQuery() + if not config.exists(['interfaces', 'wireless']): + raise vyos.opmode.UnconfiguredSubsystem(unconf_message) + return func(*args, **kwargs) + return _wrapper + +def _get_raw_info_data(): + output_data = [] + + config = ConfigTreeQuery() + raw = config.get_config_dict(['interfaces', 'wireless'], effective=True, + get_first_key=True, key_mangling=('-', '_')) + for interface, interface_config in raw.items(): + tmp = {'name' : interface} + + if 'type' in interface_config: + tmp.update({'type' : interface_config['type']}) + else: + tmp.update({'type' : '-'}) + + if 'ssid' in interface_config: + tmp.update({'ssid' : interface_config['ssid']}) + else: + tmp.update({'ssid' : '-'}) + + if 'channel' in interface_config: + tmp.update({'channel' : interface_config['channel']}) + else: + tmp.update({'channel' : '-'}) + + output_data.append(tmp) + + return output_data + +def _get_formatted_info_output(raw_data): + output=[] + for ssid in raw_data: + output.append([ssid['name'], ssid['type'], ssid['ssid'], ssid['channel']]) + + headers = ["Interface", "Type", "SSID", "Channel"] + print(tabulate(output, headers, numalign="left")) + +def _get_raw_scan_data(intf_name): + # XXX: This ignores errors + tmp, _ = popen(f'iw dev {intf_name} scan ap-force') + networks = [] + data = { + 'ssid': '', + 'mac': '', + 'channel': '', + 'signal': '' + } + re_mac = re.compile(r'([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})') + for line in tmp.splitlines(): + if line.startswith('BSS '): + ssid = deepcopy(data) + ssid['mac'] = re.search(re_mac, line).group() + + elif line.lstrip().startswith('SSID: '): + # SSID can be " SSID: WLAN-57 6405", thus strip all leading whitespaces + ssid['ssid'] = line.lstrip().split(':')[-1].lstrip() + + elif line.lstrip().startswith('signal: '): + # Siganl can be " signal: -67.00 dBm", thus strip all leading whitespaces + ssid['signal'] = line.lstrip().split(':')[-1].split()[0] + + elif line.lstrip().startswith('DS Parameter set: channel'): + # Channel can be " DS Parameter set: channel 6" , thus + # strip all leading whitespaces + ssid['channel'] = line.lstrip().split(':')[-1].split()[-1] + networks.append(ssid) + continue + + return networks + +def _format_scan_data(raw_data): + output=[] + for ssid in raw_data: + output.append([ssid['mac'], ssid['ssid'], ssid['channel'], ssid['signal']]) + headers = ["Address", "SSID", "Channel", "Signal (dbm)"] + return tabulate(output, headers, numalign="left") + +def _get_raw_station_data(intf_name): + # XXX: This ignores errors + tmp, _ = popen(f'iw dev {intf_name} station dump') + clients = [] + data = { + 'mac': '', + 'signal': '', + 'rx_bytes': '', + 'rx_packets': '', + 'tx_bytes': '', + 'tx_packets': '' + } + re_mac = re.compile(r'([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})') + for line in tmp.splitlines(): + if line.startswith('Station'): + client = deepcopy(data) + client['mac'] = re.search(re_mac, line).group() + + elif line.lstrip().startswith('signal avg:'): + client['signal'] = line.lstrip().split(':')[-1].lstrip().split()[0] + + elif line.lstrip().startswith('rx bytes:'): + client['rx_bytes'] = line.lstrip().split(':')[-1].lstrip() + + elif line.lstrip().startswith('rx packets:'): + client['rx_packets'] = line.lstrip().split(':')[-1].lstrip() + + elif line.lstrip().startswith('tx bytes:'): + client['tx_bytes'] = line.lstrip().split(':')[-1].lstrip() + + elif line.lstrip().startswith('tx packets:'): + client['tx_packets'] = line.lstrip().split(':')[-1].lstrip() + clients.append(client) + continue + + return clients + +def _format_station_data(raw_data): + output=[] + for ssid in raw_data: + output.append([ssid['mac'], ssid['signal'], ssid['rx_bytes'], ssid['rx_packets'], ssid['tx_bytes'], ssid['tx_packets']]) + headers = ["Station", "Signal", "RX bytes", "RX packets", "TX bytes", "TX packets"] + return tabulate(output, headers, numalign="left") + +@_verify +def show_info(raw: bool): + info_data = _get_raw_info_data() + if raw: + return info_data + return _get_formatted_info_output(info_data) + +def show_scan(raw: bool, intf_name: str): + data = _get_raw_scan_data(intf_name) + if raw: + return data + return _format_scan_data(data) + +@_verify +def show_stations(raw: bool, intf_name: str): + data = _get_raw_station_data(intf_name) + if raw: + return data + return _format_station_data(data) + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/show-ssh-fingerprints.py b/src/op_mode/show-ssh-fingerprints.py new file mode 100644 index 000000000..913baae46 --- /dev/null +++ b/src/op_mode/show-ssh-fingerprints.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# +# Copyright 2017-2023 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library. If not, see <http://www.gnu.org/licenses/>. + +import sys +import glob +import argparse +from vyos.utils.process import cmd + +# Parse command line +parser = argparse.ArgumentParser() +parser.add_argument("--ascii", help="Show visual ASCII art representation of the public key", action="store_true") +args = parser.parse_args() + +# Get list of server public keys +publickeys = glob.glob("/etc/ssh/*.pub") + +if publickeys: + print("SSH server public key fingerprints:\n", flush=True) + for keyfile in publickeys: + if args.ascii: + try: + print(cmd("ssh-keygen -l -v -E sha256 -f " + keyfile) + "\n", flush=True) + # Ignore invalid public keys + except: + pass + else: + try: + print(cmd("ssh-keygen -l -E sha256 -f " + keyfile) + "\n", flush=True) + # Ignore invalid public keys + except: + pass +else: + print("No SSH server public keys are found.", flush=True) + +sys.exit(0) diff --git a/src/op_mode/show_wireless.py b/src/op_mode/show_wireless.py deleted file mode 100755 index 340163057..000000000 --- a/src/op_mode/show_wireless.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2019-2023 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import argparse -import re - -from sys import exit -from copy import deepcopy - -from vyos.config import Config -from vyos.utils.process import popen - -parser = argparse.ArgumentParser() -parser.add_argument("-s", "--scan", help="Scan for Wireless APs on given interface, e.g. 'wlan0'") -parser.add_argument("-b", "--brief", action="store_true", help="Show wireless configuration") -parser.add_argument("-c", "--stations", help="Show wireless clients connected on interface, e.g. 'wlan0'") - -def show_brief(): - config = Config() - if len(config.list_effective_nodes('interfaces wireless')) == 0: - print("No Wireless interfaces configured") - exit(0) - - interfaces = [] - for intf in config.list_effective_nodes('interfaces wireless'): - config.set_level(f'interfaces wireless {intf}') - data = { 'name': intf } - data['type'] = config.return_effective_value('type') or '-' - data['ssid'] = config.return_effective_value('ssid') or '-' - data['channel'] = config.return_effective_value('channel') or '-' - interfaces.append(data) - - return interfaces - -def ssid_scan(intf): - # XXX: This ignores errors - tmp, _ = popen(f'/sbin/iw dev {intf} scan ap-force') - networks = [] - data = { - 'ssid': '', - 'mac': '', - 'channel': '', - 'signal': '' - } - re_mac = re.compile(r'([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})') - for line in tmp.splitlines(): - if line.startswith('BSS '): - ssid = deepcopy(data) - ssid['mac'] = re.search(re_mac, line).group() - - elif line.lstrip().startswith('SSID: '): - # SSID can be " SSID: WLAN-57 6405", thus strip all leading whitespaces - ssid['ssid'] = line.lstrip().split(':')[-1].lstrip() - - elif line.lstrip().startswith('signal: '): - # Siganl can be " signal: -67.00 dBm", thus strip all leading whitespaces - ssid['signal'] = line.lstrip().split(':')[-1].split()[0] - - elif line.lstrip().startswith('DS Parameter set: channel'): - # Channel can be " DS Parameter set: channel 6" , thus - # strip all leading whitespaces - ssid['channel'] = line.lstrip().split(':')[-1].split()[-1] - networks.append(ssid) - continue - - return networks - -def show_clients(intf): - # XXX: This ignores errors - tmp, _ = popen(f'/sbin/iw dev {intf} station dump') - clients = [] - data = { - 'mac': '', - 'signal': '', - 'rx_bytes': '', - 'rx_packets': '', - 'tx_bytes': '', - 'tx_packets': '' - } - re_mac = re.compile(r'([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})') - for line in tmp.splitlines(): - if line.startswith('Station'): - client = deepcopy(data) - client['mac'] = re.search(re_mac, line).group() - - elif line.lstrip().startswith('signal avg:'): - client['signal'] = line.lstrip().split(':')[-1].lstrip().split()[0] - - elif line.lstrip().startswith('rx bytes:'): - client['rx_bytes'] = line.lstrip().split(':')[-1].lstrip() - - elif line.lstrip().startswith('rx packets:'): - client['rx_packets'] = line.lstrip().split(':')[-1].lstrip() - - elif line.lstrip().startswith('tx bytes:'): - client['tx_bytes'] = line.lstrip().split(':')[-1].lstrip() - - elif line.lstrip().startswith('tx packets:'): - client['tx_packets'] = line.lstrip().split(':')[-1].lstrip() - clients.append(client) - continue - - return clients - -if __name__ == '__main__': - args = parser.parse_args() - - if args.scan: - print("Address SSID Channel Signal (dbm)") - for network in ssid_scan(args.scan): - print("{:<17} {:<32} {:>3} {}".format(network['mac'], - network['ssid'], - network['channel'], - network['signal'])) - exit(0) - - elif args.brief: - print("Interface Type SSID Channel") - for intf in show_brief(): - print("{:<9} {:<12} {:<32} {:>3}".format(intf['name'], - intf['type'], - intf['ssid'], - intf['channel'])) - exit(0) - - elif args.stations: - print("Station Signal RX: bytes packets TX: bytes packets") - for client in show_clients(args.stations): - print("{:<17} {:>3} {:>15} {:>9} {:>15} {:>10} ".format(client['mac'], - client['signal'], client['rx_bytes'], client['rx_packets'], client['tx_bytes'], client['tx_packets'])) - - exit(0) - - else: - parser.print_help() - exit(1) diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server index 66e80ced5..3a9efb73e 100755 --- a/src/services/vyos-http-api-server +++ b/src/services/vyos-http-api-server @@ -22,12 +22,14 @@ import grp import copy import json import logging +import signal import traceback import threading +from time import sleep from typing import List, Union, Callable, Dict -import uvicorn from fastapi import FastAPI, Depends, Request, Response, HTTPException +from fastapi import BackgroundTasks from fastapi.responses import HTMLResponse from fastapi.exceptions import RequestValidationError from fastapi.routing import APIRoute @@ -36,10 +38,14 @@ from starlette.middleware.cors import CORSMiddleware from starlette.datastructures import FormData from starlette.formparsers import FormParser, MultiPartParser from multipart.multipart import parse_options_header +from uvicorn import Config as UvicornConfig +from uvicorn import Server as UvicornServer from ariadne.asgi import GraphQL -import vyos.config +from vyos.config import Config +from vyos.configtree import ConfigTree +from vyos.configdiff import get_config_diff from vyos.configsession import ConfigSession, ConfigSessionError import api.graphql.state @@ -410,12 +416,24 @@ app.router.route_class = MultipartRoute async def validation_exception_handler(request, exc): return error(400, str(exc.errors()[0])) +self_ref_msg = "Requested HTTP API server configuration change; commit will be called in the background" + +def call_commit(s: ConfigSession): + try: + s.commit() + except ConfigSessionError as e: + s.discard() + if app.state.vyos_debug: + logger.warning(f"ConfigSessionError:\n {traceback.format_exc()}") + else: + logger.warning(f"ConfigSessionError: {e}") + def _configure_op(data: Union[ConfigureModel, ConfigureListModel, ConfigSectionModel, ConfigSectionListModel], - request: Request): + request: Request, background_tasks: BackgroundTasks): session = app.state.vyos_session env = session.get_session_env() - config = vyos.config.Config(session_env=env) + config = Config(session_env=env) endpoint = request.url.path @@ -470,7 +488,15 @@ def _configure_op(data: Union[ConfigureModel, ConfigureListModel, else: raise ConfigSessionError(f"'{op}' is not a valid operation") # end for - session.commit() + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, session) + msg = self_ref_msg + else: + session.commit() + logger.info(f"Configuration modified via HTTP API using key '{app.state.vyos_id}'") except ConfigSessionError as e: session.discard() @@ -495,21 +521,21 @@ def _configure_op(data: Union[ConfigureModel, ConfigureListModel, @app.post('/configure') def configure_op(data: Union[ConfigureModel, - ConfigureListModel], - request: Request): - return _configure_op(data, request) + ConfigureListModel], + request: Request, background_tasks: BackgroundTasks): + return _configure_op(data, request, background_tasks) @app.post('/configure-section') def configure_section_op(data: Union[ConfigSectionModel, - ConfigSectionListModel], - request: Request): - return _configure_op(data, request) + ConfigSectionListModel], + request: Request, background_tasks: BackgroundTasks): + return _configure_op(data, request, background_tasks) @app.post("/retrieve") async def retrieve_op(data: RetrieveModel): session = app.state.vyos_session env = session.get_session_env() - config = vyos.config.Config(session_env=env) + config = Config(session_env=env) op = data.op path = " ".join(data.path) @@ -528,10 +554,10 @@ async def retrieve_op(data: RetrieveModel): res = session.show_config(path=data.path) if config_format == 'json': - config_tree = vyos.configtree.ConfigTree(res) + config_tree = ConfigTree(res) res = json.loads(config_tree.to_json()) elif config_format == 'json_ast': - config_tree = vyos.configtree.ConfigTree(res) + config_tree = ConfigTree(res) res = json.loads(config_tree.to_json_ast()) elif config_format == 'raw': pass @@ -548,10 +574,11 @@ async def retrieve_op(data: RetrieveModel): return success(res) @app.post('/config-file') -def config_file_op(data: ConfigFileModel): +def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): session = app.state.vyos_session - + env = session.get_session_env() op = data.op + msg = None try: if op == 'save': @@ -559,14 +586,23 @@ def config_file_op(data: ConfigFileModel): path = data.file else: path = '/config/config.boot' - res = session.save_config(path) + msg = session.save_config(path) elif op == 'load': if data.file: path = data.file else: return error(400, "Missing required field \"file\"") - res = session.migrate_and_load_config(path) - res = session.commit() + + session.migrate_and_load_config(path) + + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, session) + msg = self_ref_msg + else: + session.commit() else: return error(400, f"'{op}' is not a valid operation") except ConfigSessionError as e: @@ -575,7 +611,7 @@ def config_file_op(data: ConfigFileModel): logger.critical(traceback.format_exc()) return error(500, "An internal error occured. Check the logs for details.") - return success(res) + return success(msg) @app.post('/image') def image_op(data: ImageModel): @@ -607,7 +643,7 @@ def image_op(data: ImageModel): return success(res) @app.post('/container-image') -def image_op(data: ContainerImageModel): +def container_image_op(data: ContainerImageModel): session = app.state.vyos_session op = data.op @@ -702,7 +738,7 @@ def reset_op(data: ResetModel): # GraphQL integration ### -def graphql_init(fast_api_app): +def graphql_init(app: FastAPI = app): from api.graphql.libs.token_auth import get_user_context api.graphql.state.init() api.graphql.state.settings['app'] = app @@ -728,26 +764,45 @@ def graphql_init(fast_api_app): debug=True, introspection=in_spec)) ### +# Modify uvicorn to allow reloading server within the configsession +### -if __name__ == '__main__': - # systemd's user and group options don't work, do it by hand here, - # else no one else will be able to commit - cfg_group = grp.getgrnam(CFG_GROUP) - os.setgid(cfg_group.gr_gid) +server = None +shutdown = False - # Need to set file permissions to 775 too so that every vyattacfg group member - # has write access to the running config - os.umask(0o002) +class ApiServerConfig(UvicornConfig): + pass + +class ApiServer(UvicornServer): + def install_signal_handlers(self): + pass + +def reload_handler(signum, frame): + global server + logger.debug('Reload signal received...') + if server is not None: + server.handle_exit(signum, frame) + server = None + logger.info('Server stopping for reload...') + else: + logger.warning('Reload called for non-running server...') +def shutdown_handler(signum, frame): + global shutdown + logger.debug('Shutdown signal received...') + server.handle_exit(signum, frame) + logger.info('Server shutdown...') + shutdown = True + +def initialization(session: ConfigSession, app: FastAPI = app): + global server try: server_config = load_server_config() - except Exception as err: - logger.critical(f"Failed to load the HTTP API server config: {err}") + except Exception as e: + logger.critical(f'Failed to load the HTTP API server config: {e}') sys.exit(1) - config_session = ConfigSession(os.getpid()) - - app.state.vyos_session = config_session + app.state.vyos_session = session app.state.vyos_keys = server_config['api_keys'] app.state.vyos_debug = server_config['debug'] @@ -770,14 +825,44 @@ if __name__ == '__main__': if app.state.vyos_graphql: graphql_init(app) + if not server_config['socket']: + config = ApiServerConfig(app, + host=server_config["listen_address"], + port=int(server_config["port"]), + proxy_headers=True) + else: + config = ApiServerConfig(app, + uds="/run/api.sock", + proxy_headers=True) + server = ApiServer(config) + +def run_server(): try: - if not server_config['socket']: - uvicorn.run(app, host=server_config["listen_address"], - port=int(server_config["port"]), - proxy_headers=True) - else: - uvicorn.run(app, uds="/run/api.sock", - proxy_headers=True) - except OSError as err: - logger.critical(f"OSError {err}") + server.run() + except OSError as e: + logger.critical(e) sys.exit(1) + +if __name__ == '__main__': + # systemd's user and group options don't work, do it by hand here, + # else no one else will be able to commit + cfg_group = grp.getgrnam(CFG_GROUP) + os.setgid(cfg_group.gr_gid) + + # Need to set file permissions to 775 too so that every vyattacfg group member + # has write access to the running config + os.umask(0o002) + + signal.signal(signal.SIGHUP, reload_handler) + signal.signal(signal.SIGTERM, shutdown_handler) + + config_session = ConfigSession(os.getpid()) + + while True: + logger.debug('Enter main loop...') + if shutdown: + break + if server is None: + initialization(config_session) + server.run() + sleep(1) diff --git a/src/system/uacctd_stop.py b/src/system/uacctd_stop.py new file mode 100755 index 000000000..a1b57335b --- /dev/null +++ b/src/system/uacctd_stop.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# Control pmacct daemons in a tricky way. +# Pmacct has signal processing in a main loop, together with packet +# processing. Because of this, while it is waiting for packets, it cannot +# handle the control signal. We need to start the systemctl command and then +# send some packets to pmacct to wake it up + +from argparse import ArgumentParser +from socket import socket, AF_INET, SOCK_DGRAM +from sys import exit +from time import sleep + +from psutil import Process + + +def stop_process(pid: int, timeout: int) -> None: + """Send a signal to uacctd + and then send packets to special address predefined in a firewall + to unlock main loop in uacctd and finish the process properly + + Args: + pid (int): uacctd PID + timeout (int): seconds to wait for a process end + """ + # find a process + uacctd = Process(pid) + uacctd.terminate() + + # create a socket + trigger = socket(AF_INET, SOCK_DGRAM) + + first_cycle: bool = True + while uacctd.is_running() and timeout: + print('sending a packet to uacctd...') + trigger.sendto(b'WAKEUP', ('127.0.254.0', 1)) + # do not sleep during first attempt + if not first_cycle: + sleep(1) + timeout -= 1 + first_cycle = False + + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument('process_id', + type=int, + help='PID file of uacctd core process') + parser.add_argument('timeout', + type=int, + help='time to wait for process end') + args = parser.parse_args() + stop_process(args.process_id, args.timeout) + exit() |