diff options
Diffstat (limited to 'src')
41 files changed, 1560 insertions, 1320 deletions
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py index 8efeaed54..7567444db 100755 --- a/src/conf_mode/container.py +++ b/src/conf_mode/container.py @@ -73,9 +73,19 @@ def get_config(config=None): # Merge per-container default values if 'name' in container: default_values = defaults(base + ['name']) + if 'port' in default_values: + del default_values['port'] for name in container['name']: container['name'][name] = dict_merge(default_values, container['name'][name]) + # XXX: T2665: we can not safely rely on the defaults() when there are + # tagNodes in place, it is better to blend in the defaults manually. + if 'port' in container['name'][name]: + for port in container['name'][name]['port']: + default_values = defaults(base + ['name', 'port']) + container['name'][name]['port'][port] = dict_merge( + default_values, container['name'][name]['port'][port]) + # Delete container network, delete containers tmp = node_changed(conf, base + ['network']) if tmp: container.update({'network_remove' : tmp}) @@ -168,6 +178,11 @@ def verify(container): if not os.path.exists(source): raise ConfigError(f'Volume "{volume}" source path "{source}" does not exist!') + if 'port' in container_config: + for tmp in container_config['port']: + if not {'source', 'destination'} <= set(container_config['port'][tmp]): + raise ConfigError(f'Both "source" and "destination" must be specified for a port mapping!') + # If 'allow-host-networks' or 'network' not set. if 'allow_host_networks' not in container_config and 'network' not in container_config: raise ConfigError(f'Must either set "network" or "allow-host-networks" for container "{name}"!') @@ -237,14 +252,10 @@ def generate_run_arguments(name, container_config): if 'port' in container_config: protocol = '' for portmap in container_config['port']: - if 'protocol' in container_config['port'][portmap]: - protocol = container_config['port'][portmap]['protocol'] - protocol = f'/{protocol}' - else: - protocol = '/tcp' + protocol = container_config['port'][portmap]['protocol'] sport = container_config['port'][portmap]['source'] dport = container_config['port'][portmap]['destination'] - port += f' -p {sport}:{dport}{protocol}' + port += f' -p {sport}:{dport}/{protocol}' # Bind volume volume = '' diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py index f68acfe02..20cf1ead1 100755 --- a/src/conf_mode/firewall.py +++ b/src/conf_mode/firewall.py @@ -65,7 +65,8 @@ valid_groups = [ 'address_group', 'domain_group', 'network_group', - 'port_group' + 'port_group', + 'interface_group' ] nested_group_types = [ diff --git a/src/conf_mode/high-availability.py b/src/conf_mode/high-availability.py index 8a959dc79..4ed16d0d7 100755 --- a/src/conf_mode/high-availability.py +++ b/src/conf_mode/high-availability.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2018-2022 VyOS maintainers and contributors +# Copyright (C) 2018-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -144,8 +144,10 @@ def verify(ha): # Virtual-server if 'virtual_server' in ha: for vs, vs_config in ha['virtual_server'].items(): - if 'port' not in vs_config: - raise ConfigError(f'Port is required but not set for virtual-server "{vs}"') + if 'port' not in vs_config and 'fwmark' not in vs_config: + raise ConfigError(f'Port or fwmark is required but not set for virtual-server "{vs}"') + if 'port' in vs_config and 'fwmark' in vs_config: + raise ConfigError(f'Cannot set both port and fwmark for virtual-server "{vs}"') if 'real_server' not in vs_config: raise ConfigError(f'Real-server ip is required but not set for virtual-server "{vs}"') # Real-server diff --git a/src/conf_mode/interfaces-bonding.py b/src/conf_mode/interfaces-bonding.py index b883ebef2..9936620c8 100755 --- a/src/conf_mode/interfaces-bonding.py +++ b/src/conf_mode/interfaces-bonding.py @@ -21,6 +21,7 @@ from netifaces import interfaces from vyos.config import Config from vyos.configdict import get_interface_dict +from vyos.configdict import is_node_changed from vyos.configdict import leaf_node_changed from vyos.configdict import is_member from vyos.configdict import is_source_interface @@ -81,10 +82,10 @@ def get_config(config=None): if 'mode' in bond: bond['mode'] = get_bond_mode(bond['mode']) - tmp = leaf_node_changed(conf, base + [ifname, 'mode']) + tmp = is_node_changed(conf, base + [ifname, 'mode']) if tmp: bond['shutdown_required'] = {} - tmp = leaf_node_changed(conf, base + [ifname, 'lacp-rate']) + tmp = is_node_changed(conf, base + [ifname, 'lacp-rate']) if tmp: bond['shutdown_required'] = {} # determine which members have been removed diff --git a/src/conf_mode/interfaces-geneve.py b/src/conf_mode/interfaces-geneve.py index 08cc3a48d..f6694ddde 100755 --- a/src/conf_mode/interfaces-geneve.py +++ b/src/conf_mode/interfaces-geneve.py @@ -14,14 +14,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os - from sys import exit from netifaces import interfaces from vyos.config import Config from vyos.configdict import get_interface_dict -from vyos.configdict import leaf_node_changed from vyos.configdict import is_node_changed from vyos.configverify import verify_address from vyos.configverify import verify_mtu_ipv6 @@ -49,13 +46,10 @@ def get_config(config=None): # GENEVE interfaces are picky and require recreation if certain parameters # change. But a GENEVE interface should - of course - not be re-created if # it's description or IP address is adjusted. Feels somehow logic doesn't it? - for cli_option in ['remote', 'vni']: - if leaf_node_changed(conf, base + [ifname, cli_option]): + for cli_option in ['remote', 'vni', 'parameters']: + if is_node_changed(conf, base + [ifname, cli_option]): geneve.update({'rebuild_required': {}}) - if is_node_changed(conf, base + [ifname, 'parameters']): - geneve.update({'rebuild_required': {}}) - return geneve def verify(geneve): diff --git a/src/conf_mode/interfaces-pseudo-ethernet.py b/src/conf_mode/interfaces-pseudo-ethernet.py index 4c65bc0b6..dce5c2358 100755 --- a/src/conf_mode/interfaces-pseudo-ethernet.py +++ b/src/conf_mode/interfaces-pseudo-ethernet.py @@ -21,7 +21,7 @@ from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configdict import is_source_interface -from vyos.configdict import leaf_node_changed +from vyos.configdict import is_node_changed from vyos.configverify import verify_vrf from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete @@ -51,7 +51,7 @@ def get_config(config=None): mode = is_node_changed(conf, ['mode']) if mode: peth.update({'shutdown_required' : {}}) - if leaf_node_changed(conf, base + [ifname, 'mode']): + if is_node_changed(conf, base + [ifname, 'mode']): peth.update({'rebuild_required': {}}) if 'source_interface' in peth: diff --git a/src/conf_mode/interfaces-sstpc.py b/src/conf_mode/interfaces-sstpc.py index 6b8094c51..b5cc4cf4e 100755 --- a/src/conf_mode/interfaces-sstpc.py +++ b/src/conf_mode/interfaces-sstpc.py @@ -70,7 +70,10 @@ def verify(sstpc): verify_authentication(sstpc) verify_vrf(sstpc) - if dict_search('ssl.ca_certificate', sstpc) == None: + if not dict_search('server', sstpc): + raise ConfigError('Remote SSTP server must be specified!') + + if not dict_search('ssl.ca_certificate', sstpc): raise ConfigError('Missing mandatory CA certificate!') return None diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces-tunnel.py index acef1fda7..e2701d9d3 100755 --- a/src/conf_mode/interfaces-tunnel.py +++ b/src/conf_mode/interfaces-tunnel.py @@ -21,7 +21,7 @@ from netifaces import interfaces from vyos.config import Config from vyos.configdict import get_interface_dict -from vyos.configdict import leaf_node_changed +from vyos.configdict import is_node_changed from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_interface_exists @@ -52,7 +52,7 @@ def get_config(config=None): ifname, tunnel = get_interface_dict(conf, base) if 'deleted' not in tunnel: - tmp = leaf_node_changed(conf, base + [ifname, 'encapsulation']) + tmp = is_node_changed(conf, base + [ifname, 'encapsulation']) if tmp: tunnel.update({'encapsulation_changed': {}}) # We also need to inspect other configured tunnels as there are Kernel diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces-vxlan.py index af2d0588d..b1536148c 100755 --- a/src/conf_mode/interfaces-vxlan.py +++ b/src/conf_mode/interfaces-vxlan.py @@ -52,13 +52,11 @@ def get_config(config=None): # VXLAN interfaces are picky and require recreation if certain parameters # change. But a VXLAN interface should - of course - not be re-created if # it's description or IP address is adjusted. Feels somehow logic doesn't it? - for cli_option in ['external', 'gpe', 'group', 'port', 'remote', + for cli_option in ['parameters', 'external', 'gpe', 'group', 'port', 'remote', 'source-address', 'source-interface', 'vni']: - if leaf_node_changed(conf, base + [ifname, cli_option]): + if is_node_changed(conf, base + [ifname, cli_option]): vxlan.update({'rebuild_required': {}}) - - if is_node_changed(conf, base + [ifname, 'parameters']): - vxlan.update({'rebuild_required': {}}) + break # We need to verify that no other VXLAN tunnel is configured when external # mode is in use - Linux Kernel limitation diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py index e8f3cc87a..54de467ca 100755 --- a/src/conf_mode/pki.py +++ b/src/conf_mode/pki.py @@ -51,6 +51,11 @@ sync_search = [ 'script': '/usr/libexec/vyos/conf_mode/interfaces-openvpn.py' }, { + 'keys': ['ca_certificate'], + 'path': ['interfaces', 'sstpc'], + 'script': '/usr/libexec/vyos/conf_mode/interfaces-sstpc.py' + }, + { 'keys': ['certificate', 'ca_certificate', 'local_key', 'remote_key'], 'path': ['vpn', 'ipsec'], 'script': '/usr/libexec/vyos/conf_mode/vpn_ipsec.py' diff --git a/src/conf_mode/policy-route.py b/src/conf_mode/policy-route.py index 1d016695e..40a32efb3 100755 --- a/src/conf_mode/policy-route.py +++ b/src/conf_mode/policy-route.py @@ -36,7 +36,8 @@ valid_groups = [ 'address_group', 'domain_group', 'network_group', - 'port_group' + 'port_group', + 'interface_group' ] def get_config(config=None): diff --git a/src/conf_mode/protocols_failover.py b/src/conf_mode/protocols_failover.py new file mode 100755 index 000000000..048ba7a89 --- /dev/null +++ b/src/conf_mode/protocols_failover.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import json + +from pathlib import Path + +from vyos.config import Config +from vyos.configdict import dict_merge +from vyos.template import render +from vyos.util import call +from vyos.xml import defaults +from vyos import ConfigError +from vyos import airbag + +airbag.enable() + + +service_name = 'vyos-failover' +service_conf = Path(f'/run/{service_name}.conf') +systemd_service = '/etc/systemd/system/vyos-failover.service' +rt_proto_failover = '/etc/iproute2/rt_protos.d/failover.conf' + + +def get_config(config=None): + if config: + conf = config + else: + conf = Config() + + base = ['protocols', 'failover'] + failover = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + + # Set default values only if we set config + if failover.get('route'): + for route, route_config in failover.get('route').items(): + for next_hop, next_hop_config in route_config.get('next_hop').items(): + default_values = defaults(base + ['route']) + failover['route'][route]['next_hop'][next_hop] = dict_merge( + default_values['next_hop'], failover['route'][route]['next_hop'][next_hop]) + + return failover + +def verify(failover): + # bail out early - looks like removal from running config + if not failover: + return None + + if 'route' not in failover: + raise ConfigError(f'Failover "route" is mandatory!') + + for route, route_config in failover['route'].items(): + if not route_config.get('next_hop'): + raise ConfigError(f'Next-hop for "{route}" is mandatory!') + + for next_hop, next_hop_config in route_config.get('next_hop').items(): + if 'interface' not in next_hop_config: + raise ConfigError(f'Interface for route "{route}" next-hop "{next_hop}" is mandatory!') + + if not next_hop_config.get('check'): + raise ConfigError(f'Check target for next-hop "{next_hop}" is mandatory!') + + if 'target' not in next_hop_config['check']: + raise ConfigError(f'Check target for next-hop "{next_hop}" is mandatory!') + + check_type = next_hop_config['check']['type'] + if check_type == 'tcp' and 'port' not in next_hop_config['check']: + raise ConfigError(f'Check port for next-hop "{next_hop}" and type TCP is mandatory!') + + return None + +def generate(failover): + if not failover: + service_conf.unlink(missing_ok=True) + return None + + # Add own rt_proto 'failover' + # Helps to detect all own routes 'proto failover' + with open(rt_proto_failover, 'w') as f: + f.write('111 failover\n') + + # Write configuration file + conf_json = json.dumps(failover, indent=4) + service_conf.write_text(conf_json) + render(systemd_service, 'protocols/systemd_vyos_failover_service.j2', failover) + + return None + +def apply(failover): + if not failover: + call(f'systemctl stop {service_name}.service') + call('ip route flush protocol failover') + else: + call('systemctl daemon-reload') + call(f'systemctl restart {service_name}.service') + call(f'ip route flush protocol failover') + + return None + +if __name__ == '__main__': + try: + c = get_config() + verify(c) + generate(c) + apply(c) + except ConfigError as e: + print(e) + exit(1) diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py index dbe3be225..1fe3b6aa9 100755 --- a/src/conf_mode/qos.py +++ b/src/conf_mode/qos.py @@ -15,14 +15,59 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit +from netifaces import interfaces from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configverify import verify_interface_exists +from vyos.qos import CAKE +from vyos.qos import DropTail +from vyos.qos import FairQueue +from vyos.qos import FQCodel +from vyos.qos import Limiter +from vyos.qos import NetEm +from vyos.qos import Priority +from vyos.qos import RandomDetect +from vyos.qos import RateLimiter +from vyos.qos import RoundRobin +from vyos.qos import TrafficShaper +from vyos.qos import TrafficShaperHFSC +from vyos.util import call +from vyos.util import dict_search_recursive from vyos.xml import defaults from vyos import ConfigError from vyos import airbag airbag.enable() +map_vyops_tc = { + 'cake' : CAKE, + 'drop_tail' : DropTail, + 'fair_queue' : FairQueue, + 'fq_codel' : FQCodel, + 'limiter' : Limiter, + 'network_emulator' : NetEm, + 'priority_queue' : Priority, + 'random_detect' : RandomDetect, + 'rate_control' : RateLimiter, + 'round_robin' : RoundRobin, + 'shaper' : TrafficShaper, + 'shaper_hfsc' : TrafficShaperHFSC, +} + +def get_shaper(qos, interface_config, direction): + policy_name = interface_config[direction] + # An interface might have a QoS configuration, search the used + # configuration referenced by this. Path will hold the dict element + # referenced by the config, as this will be of sort: + # + # ['policy', 'drop_tail', 'foo-dtail'] <- we are only interested in + # drop_tail as the policy/shaper type + _, path = next(dict_search_recursive(qos, policy_name)) + shaper_type = path[1] + shaper_config = qos['policy'][shaper_type][policy_name] + + return (map_vyops_tc[shaper_type], shaper_config) + def get_config(config=None): if config: conf = config @@ -32,48 +77,167 @@ def get_config(config=None): if not conf.exists(base): return None - qos = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + qos = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) if 'policy' in qos: for policy in qos['policy']: - # CLI mangles - to _ for better Jinja2 compatibility - do we need - # Jinja2 here? - policy = policy.replace('-','_') + # when calling defaults() we need to use the real CLI node, thus we + # need a hyphen + policy_hyphen = policy.replace('_', '-') + + if policy in ['random_detect']: + for rd_name, rd_config in qos['policy'][policy].items(): + # There are eight precedence levels - ensure all are present + # to be filled later down with the appropriate default values + default_precedence = {'precedence' : { '0' : {}, '1' : {}, '2' : {}, '3' : {}, + '4' : {}, '5' : {}, '6' : {}, '7' : {} }} + qos['policy']['random_detect'][rd_name] = dict_merge( + default_precedence, qos['policy']['random_detect'][rd_name]) - default_values = defaults(base + ['policy', policy]) + for p_name, p_config in qos['policy'][policy].items(): + default_values = defaults(base + ['policy', policy_hyphen]) - # class is another tag node which requires individual handling - class_default_values = defaults(base + ['policy', policy, 'class']) - if 'class' in default_values: - del default_values['class'] + if policy in ['priority_queue']: + if 'default' not in p_config: + raise ConfigError(f'QoS policy {p_name} misses "default" class!') + + # XXX: T2665: we can not safely rely on the defaults() when there are + # tagNodes in place, it is better to blend in the defaults manually. + if 'class' in default_values: + del default_values['class'] + if 'precedence' in default_values: + del default_values['precedence'] - for p_name, p_config in qos['policy'][policy].items(): qos['policy'][policy][p_name] = dict_merge( default_values, qos['policy'][policy][p_name]) + # class is another tag node which requires individual handling if 'class' in p_config: + default_values = defaults(base + ['policy', policy_hyphen, 'class']) for p_class in p_config['class']: qos['policy'][policy][p_name]['class'][p_class] = dict_merge( - class_default_values, qos['policy'][policy][p_name]['class'][p_class]) + default_values, qos['policy'][policy][p_name]['class'][p_class]) + + if 'precedence' in p_config: + default_values = defaults(base + ['policy', policy_hyphen, 'precedence']) + # precedence values are a bit more complex as they are calculated + # under specific circumstances - thus we need to iterate two times. + # first blend in the defaults from XML / CLI + for precedence in p_config['precedence']: + qos['policy'][policy][p_name]['precedence'][precedence] = dict_merge( + default_values, qos['policy'][policy][p_name]['precedence'][precedence]) + # second calculate defaults based on actual dictionary + for precedence in p_config['precedence']: + max_thr = int(qos['policy'][policy][p_name]['precedence'][precedence]['maximum_threshold']) + if 'minimum_threshold' not in qos['policy'][policy][p_name]['precedence'][precedence]: + qos['policy'][policy][p_name]['precedence'][precedence]['minimum_threshold'] = str( + int((9 + int(precedence)) * max_thr) // 18); + + if 'queue_limit' not in qos['policy'][policy][p_name]['precedence'][precedence]: + qos['policy'][policy][p_name]['precedence'][precedence]['queue_limit'] = \ + str(int(4 * max_thr)) - import pprint - pprint.pprint(qos) return qos def verify(qos): - if not qos: + if not qos or 'interface' not in qos: return None # network policy emulator # reorder rerquires delay to be set + if 'policy' in qos: + for policy_type in qos['policy']: + for policy, policy_config in qos['policy'][policy_type].items(): + # a policy with it's given name is only allowed to exist once + # on the system. This is because an interface selects a policy + # for ingress/egress traffic, and thus there can only be one + # policy with a given name. + # + # We check if the policy name occurs more then once - error out + # if this is true + counter = 0 + for _, path in dict_search_recursive(qos['policy'], policy): + counter += 1 + if counter > 1: + raise ConfigError(f'Conflicting policy name "{policy}", already in use!') + + if 'class' in policy_config: + for cls, cls_config in policy_config['class'].items(): + # bandwidth is not mandatory for priority-queue - that is why this is on the exception list + if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin']: + raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!') + if 'match' in cls_config: + for match, match_config in cls_config['match'].items(): + if {'ip', 'ipv6'} <= set(match_config): + raise ConfigError(f'Can not use both IPv6 and IPv4 in one match ({match})!') + + if policy_type in ['random_detect']: + if 'precedence' in policy_config: + for precedence, precedence_config in policy_config['precedence'].items(): + max_tr = int(precedence_config['maximum_threshold']) + if {'maximum_threshold', 'minimum_threshold'} <= set(precedence_config): + min_tr = int(precedence_config['minimum_threshold']) + if min_tr >= max_tr: + raise ConfigError(f'Policy "{policy}" uses min-threshold "{min_tr}" >= max-threshold "{max_tr}"!') + + if {'maximum_threshold', 'queue_limit'} <= set(precedence_config): + queue_lim = int(precedence_config['queue_limit']) + if queue_lim < max_tr: + raise ConfigError(f'Policy "{policy}" uses queue-limit "{queue_lim}" < max-threshold "{max_tr}"!') + + if 'default' in policy_config: + if 'bandwidth' not in policy_config['default']: + raise ConfigError('Bandwidth not defined for default traffic!') + + # we should check interface ingress/egress configuration after verifying that + # the policy name is used only once - this makes the logic easier! + for interface, interface_config in qos['interface'].items(): + verify_interface_exists(interface) + + for direction in ['egress', 'ingress']: + # bail out early if shaper for given direction is not used at all + if direction not in interface_config: + continue + + policy_name = interface_config[direction] + if 'policy' not in qos or list(dict_search_recursive(qos['policy'], policy_name)) == []: + raise ConfigError(f'Selected QoS policy "{policy_name}" does not exist!') + + shaper_type, shaper_config = get_shaper(qos, interface_config, direction) + tmp = shaper_type(interface).get_direction() + if direction not in tmp: + raise ConfigError(f'Selected QoS policy on interface "{interface}" only supports "{tmp}"!') - raise ConfigError('123') return None def generate(qos): + if not qos or 'interface' not in qos: + return None + return None def apply(qos): + # Always delete "old" shapers first + for interface in interfaces(): + # Ignore errors (may have no qdisc) + call(f'tc qdisc del dev {interface} parent ffff:') + call(f'tc qdisc del dev {interface} root') + + if not qos or 'interface' not in qos: + return None + + for interface, interface_config in qos['interface'].items(): + for direction in ['egress', 'ingress']: + # bail out early if shaper for given direction is not used at all + if direction not in interface_config: + continue + + shaper_type, shaper_config = get_shaper(qos, interface_config, direction) + tmp = shaper_type(interface) + tmp.update(shaper_config, direction) + return None if __name__ == '__main__': diff --git a/src/conf_mode/service_webproxy.py b/src/conf_mode/service_webproxy.py index 32af31bde..41a1deaa3 100755 --- a/src/conf_mode/service_webproxy.py +++ b/src/conf_mode/service_webproxy.py @@ -28,8 +28,10 @@ from vyos.util import dict_search from vyos.util import write_file from vyos.validate import is_addr_assigned from vyos.xml import defaults +from vyos.base import Warning from vyos import ConfigError from vyos import airbag + airbag.enable() squid_config_file = '/etc/squid/squid.conf' @@ -37,24 +39,57 @@ squidguard_config_file = '/etc/squidguard/squidGuard.conf' squidguard_db_dir = '/opt/vyatta/etc/config/url-filtering/squidguard/db' user_group = 'proxy' -def generate_sg_localdb(category, list_type, role, proxy): + +def check_blacklist_categorydb(config_section): + if 'block_category' in config_section: + for category in config_section['block_category']: + check_categorydb(category) + if 'allow_category' in config_section: + for category in config_section['allow_category']: + check_categorydb(category) + + +def check_categorydb(category: str): + """ + Check if category's db exist + :param category: + :type str: + """ + path_to_cat: str = f'{squidguard_db_dir}/{category}' + if not os.path.exists(f'{path_to_cat}/domains.db') \ + and not os.path.exists(f'{path_to_cat}/urls.db') \ + and not os.path.exists(f'{path_to_cat}/expressions.db'): + Warning(f'DB of category {category} does not exist.\n ' + f'Use [update webproxy blacklists] ' + f'or delete undefined category!') + + +def generate_sg_rule_localdb(category, list_type, role, proxy): + if not category or not list_type or not role: + return None + cat_ = category.replace('-', '_') - if isinstance(dict_search(f'url_filtering.squidguard.{cat_}', proxy), - list): + if role == 'default': + path_to_cat = f'{cat_}' + else: + path_to_cat = f'rule.{role}.{cat_}' + if isinstance( + dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy), + list): # local block databases must be generated "on-the-fly" tmp = { - 'squidguard_db_dir' : squidguard_db_dir, - 'category' : f'{category}-default', - 'list_type' : list_type, - 'rule' : role + 'squidguard_db_dir': squidguard_db_dir, + 'category': f'{category}-{role}', + 'list_type': list_type, + 'rule': role } sg_tmp_file = '/tmp/sg.conf' - db_file = f'{category}-default/{list_type}' - domains = '\n'.join(dict_search(f'url_filtering.squidguard.{cat_}', proxy)) - + db_file = f'{category}-{role}/{list_type}' + domains = '\n'.join( + dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy)) # local file - write_file(f'{squidguard_db_dir}/{category}-default/local', '', + write_file(f'{squidguard_db_dir}/{category}-{role}/local', '', user=user_group, group=user_group) # database input file write_file(f'{squidguard_db_dir}/{db_file}', domains, @@ -64,17 +99,18 @@ def generate_sg_localdb(category, list_type, role, proxy): render(sg_tmp_file, 'squid/sg_acl.conf.j2', tmp, user=user_group, group=user_group) - call(f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"') + call( + f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"') if os.path.exists(sg_tmp_file): os.unlink(sg_tmp_file) - else: # if category is not part of our configuration, clean out the # squidguard lists - tmp = f'{squidguard_db_dir}/{category}-default' + tmp = f'{squidguard_db_dir}/{category}-{role}' if os.path.exists(tmp): - rmtree(f'{squidguard_db_dir}/{category}-default') + rmtree(f'{squidguard_db_dir}/{category}-{role}') + def get_config(config=None): if config: @@ -85,7 +121,8 @@ def get_config(config=None): if not conf.exists(base): return None - proxy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) + proxy = conf.get_config_dict(base, key_mangling=('-', '_'), + get_first_key=True) # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. default_values = defaults(base) @@ -110,10 +147,11 @@ def get_config(config=None): default_values = defaults(base + ['cache-peer']) for peer in proxy['cache_peer']: proxy['cache_peer'][peer] = dict_merge(default_values, - proxy['cache_peer'][peer]) + proxy['cache_peer'][peer]) return proxy + def verify(proxy): if not proxy: return None @@ -170,17 +208,30 @@ def generate(proxy): render(squidguard_config_file, 'squid/squidGuard.conf.j2', proxy) cat_dict = { - 'local-block' : 'domains', - 'local-block-keyword' : 'expressions', - 'local-block-url' : 'urls', - 'local-ok' : 'domains', - 'local-ok-url' : 'urls' + 'local-block': 'domains', + 'local-block-keyword': 'expressions', + 'local-block-url': 'urls', + 'local-ok': 'domains', + 'local-ok-url': 'urls' } - for category, list_type in cat_dict.items(): - generate_sg_localdb(category, list_type, 'default', proxy) + if dict_search(f'url_filtering.squidguard', proxy) is not None: + squidgard_config_section = proxy['url_filtering']['squidguard'] + + for category, list_type in cat_dict.items(): + generate_sg_rule_localdb(category, list_type, 'default', proxy) + check_blacklist_categorydb(squidgard_config_section) + + if 'rule' in squidgard_config_section: + for rule in squidgard_config_section['rule']: + rule_config_section = squidgard_config_section['rule'][ + rule] + for category, list_type in cat_dict.items(): + generate_sg_rule_localdb(category, list_type, rule, proxy) + check_blacklist_categorydb(rule_config_section) return None + def apply(proxy): if not proxy: # proxy is removed in the commit @@ -198,6 +249,7 @@ def apply(proxy): call('systemctl restart squid.service') return None + if __name__ == '__main__': try: c = get_config() diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system-option.py index 36dbf155b..e6c7a0ed2 100755 --- a/src/conf_mode/system-option.py +++ b/src/conf_mode/system-option.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2020 VyOS maintainers and contributors +# Copyright (C) 2019-2022 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -22,17 +22,19 @@ from time import sleep from vyos.config import Config from vyos.configdict import dict_merge +from vyos.configverify import verify_source_interface from vyos.template import render from vyos.util import cmd from vyos.util import is_systemd_service_running from vyos.validate import is_addr_assigned +from vyos.validate import is_intf_addr_assigned from vyos.xml import defaults from vyos import ConfigError from vyos import airbag airbag.enable() curlrc_config = r'/etc/curlrc' -ssh_config = r'/etc/ssh/ssh_config' +ssh_config = r'/etc/ssh/ssh_config.d/91-vyos-ssh-client-options.conf' systemd_action_file = '/lib/systemd/system/ctrl-alt-del.target' def get_config(config=None): @@ -68,8 +70,17 @@ def verify(options): if 'ssh_client' in options: config = options['ssh_client'] if 'source_address' in config: + address = config['source_address'] if not is_addr_assigned(config['source_address']): - raise ConfigError('No interface with give address specified!') + raise ConfigError('No interface with address "{address}" configured!') + + if 'source_interface' in config: + verify_source_interface(config) + if 'source_address' in config: + address = config['source_address'] + interface = config['source_interface'] + if not is_intf_addr_assigned(interface, address): + raise ConfigError(f'Address "{address}" not assigned on interface "{interface}"!') return None diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py index b79e9847a..04e2f2939 100755 --- a/src/conf_mode/vpn_ipsec.py +++ b/src/conf_mode/vpn_ipsec.py @@ -622,7 +622,7 @@ def wait_for_vici_socket(timeout=5, sleep_interval=0.1): sleep(sleep_interval) def apply(ipsec): - systemd_service = 'strongswan-starter.service' + systemd_service = 'strongswan.service' if not ipsec: call(f'systemctl stop {systemd_service}') else: diff --git a/src/conf_mode/vpn_l2tp.py b/src/conf_mode/vpn_l2tp.py index 27e78db99..65623c2b1 100755 --- a/src/conf_mode/vpn_l2tp.py +++ b/src/conf_mode/vpn_l2tp.py @@ -58,6 +58,9 @@ default_config_data = { 'ppp_echo_failure' : '3', 'ppp_echo_interval' : '30', 'ppp_echo_timeout': '0', + 'ppp_ipv6_accept_peer_intf_id': False, + 'ppp_ipv6_intf_id': None, + 'ppp_ipv6_peer_intf_id': None, 'radius_server': [], 'radius_acct_inter_jitter': '', 'radius_acct_tmo': '3', @@ -314,6 +317,15 @@ def get_config(config=None): if conf.exists(['ppp-options', 'ipv6']): l2tp['ppp_ipv6'] = conf.return_value(['ppp-options', 'ipv6']) + if conf.exists(['ppp-options', 'ipv6-accept-peer-intf-id']): + l2tp['ppp_ipv6_accept_peer_intf_id'] = True + + if conf.exists(['ppp-options', 'ipv6-intf-id']): + l2tp['ppp_ipv6_intf_id'] = conf.return_value(['ppp-options', 'ipv6-intf-id']) + + if conf.exists(['ppp-options', 'ipv6-peer-intf-id']): + l2tp['ppp_ipv6_peer_intf_id'] = conf.return_value(['ppp-options', 'ipv6-peer-intf-id']) + return l2tp diff --git a/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook b/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook index 61a89e62a..1f1926e17 100755 --- a/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook +++ b/src/etc/dhcp/dhclient-exit-hooks.d/ipsec-dhclient-hook @@ -23,7 +23,7 @@ DHCP_HOOK_IFLIST="/tmp/ipsec_dhcp_waiting" if [ -f $DHCP_HOOK_IFLIST ] && [ "$reason" == "BOUND" ]; then if grep -qw $interface $DHCP_HOOK_IFLIST; then sudo rm $DHCP_HOOK_IFLIST - sudo python3 /usr/libexec/vyos/conf_mode/vpn_ipsec.py + sudo /usr/libexec/vyos/conf_mode/vpn_ipsec.py exit 0 fi fi diff --git a/src/helpers/vyos-failover.py b/src/helpers/vyos-failover.py new file mode 100755 index 000000000..1ac193423 --- /dev/null +++ b/src/helpers/vyos-failover.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import json +import subprocess +import socket +import time + +from vyos.util import rc_cmd +from pathlib import Path +from systemd import journal + + +my_name = Path(__file__).stem + + +def get_best_route_options(route, debug=False): + """ + Return current best route ('gateway, interface, metric) + + % get_best_route_options('203.0.113.1') + ('192.168.0.1', 'eth1', 1) + + % get_best_route_options('203.0.113.254') + (None, None, None) + """ + rc, data = rc_cmd(f'ip --detail --json route show protocol failover {route}') + if rc == 0: + data = json.loads(data) + if len(data) == 0: + print(f'\nRoute {route} for protocol failover was not found') + return None, None, None + # Fake metric 999 by default + # Search route with the lowest metric + best_metric = 999 + for entry in data: + if debug: print('\n', entry) + metric = entry.get('metric') + gateway = entry.get('gateway') + iface = entry.get('dev') + if metric < best_metric: + best_metric = metric + best_gateway = gateway + best_interface = iface + if debug: + print(f'### Best_route exists: {route}, best_gateway: {best_gateway}, ' + f'best_metric: {best_metric}, best_iface: {best_interface}') + return best_gateway, best_interface, best_metric + +def is_port_open(ip, port): + """ + Check connection to remote host and port + Return True if host alive + + % is_port_open('example.com', 8080) + True + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) + s.settimeout(2) + try: + s.connect((ip, int(port))) + s.shutdown(socket.SHUT_RDWR) + return True + except: + return False + finally: + s.close() + +def is_target_alive(target=None, iface='', proto='icmp', port=None, debug=False): + """ + Host availability check by ICMP, ARP, TCP + Return True if target checks is successful + + % is_target_alive('192.0.2.1', 'eth1', proto='arp') + True + """ + if iface != '': + iface = f'-I {iface}' + if proto == 'icmp': + command = f'/usr/bin/ping -q {target} {iface} -n -c 2 -W 1' + rc, response = rc_cmd(command) + if debug: print(f' [ CHECK-TARGET ]: [{command}] -- return-code [RC: {rc}]') + if rc == 0: + return True + elif proto == 'arp': + command = f'/usr/bin/arping -b -c 2 -f -w 1 -i 1 {iface} {target}' + rc, response = rc_cmd(command) + if debug: print(f' [ CHECK-TARGET ]: [{command}] -- return-code [RC: {rc}]') + if rc == 0: + return True + elif proto == 'tcp' and port is not None: + return True if is_port_open(target, port) else False + else: + return False + + +if __name__ == '__main__': + # Parse command arguments and get config + parser = argparse.ArgumentParser() + parser.add_argument('-c', + '--config', + action='store', + help='Path to protocols failover configuration', + required=True, + type=Path) + + args = parser.parse_args() + try: + config_path = Path(args.config) + config = json.loads(config_path.read_text()) + except Exception as err: + print( + f'Configuration file "{config_path}" does not exist or malformed: {err}' + ) + exit(1) + + # Useful debug info to console, use debug = True + # sudo systemctl stop vyos-failover.service + # sudo /usr/libexec/vyos/vyos-failover.py --config /run/vyos-failover.conf + debug = False + + while(True): + + for route, route_config in config.get('route').items(): + + exists_route = exists_gateway, exists_iface, exists_metric = get_best_route_options(route, debug=debug) + + for next_hop, nexthop_config in route_config.get('next_hop').items(): + conf_iface = nexthop_config.get('interface') + conf_metric = int(nexthop_config.get('metric')) + port = nexthop_config.get('check').get('port') + port_opt = f'port {port}' if port else '' + proto = nexthop_config.get('check').get('type') + target = nexthop_config.get('check').get('target') + timeout = nexthop_config.get('check').get('timeout') + + # Best route not fonund in the current routing table + if exists_route == (None, None, None): + if debug: print(f" [NEW_ROUTE_DETECTED] route: [{route}]") + # Add route if check-target alive + if is_target_alive(target, conf_iface, proto, port, debug=debug): + if debug: print(f' [ ADD ] -- ip route add {route} via {next_hop} dev {conf_iface} ' + f'metric {conf_metric} proto failover\n###') + rc, command = rc_cmd(f'ip route add {route} via {next_hop} dev {conf_iface} ' + f'metric {conf_metric} proto failover') + # If something is wrong and gateway not added + # Example: Error: Next-hop has invalid gateway. + if rc !=0: + if debug: print(f'{command} -- return-code [RC: {rc}] {next_hop} dev {conf_iface}') + else: + journal.send(f'ip route add {route} via {next_hop} dev {conf_iface} ' + f'metric {conf_metric} proto failover', SYSLOG_IDENTIFIER=my_name) + else: + if debug: print(f' [ TARGET_FAIL ] target checks fails for [{target}], do nothing') + journal.send(f'Check fail for route {route} target {target} proto {proto} ' + f'{port_opt}', SYSLOG_IDENTIFIER=my_name) + + # Route was added, check if the target is alive + # We should delete route if check fails only if route exists in the routing table + if not is_target_alive(target, conf_iface, proto, port, debug=debug) and \ + exists_route != (None, None, None): + if debug: + print(f'Nexh_hop {next_hop} fail, target not response') + print(f' [ DEL ] -- ip route del {route} via {next_hop} dev {conf_iface} ' + f'metric {conf_metric} proto failover [DELETE]') + rc_cmd(f'ip route del {route} via {next_hop} dev {conf_iface} metric {conf_metric} proto failover') + journal.send(f'ip route del {route} via {next_hop} dev {conf_iface} ' + f'metric {conf_metric} proto failover', SYSLOG_IDENTIFIER=my_name) + + time.sleep(int(timeout)) diff --git a/src/migration-scripts/container/0-to-1 b/src/migration-scripts/container/0-to-1 new file mode 100755 index 000000000..d0461389b --- /dev/null +++ b/src/migration-scripts/container/0-to-1 @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T4870: change underlaying container filesystem from vfs to overlay + +import os +import shutil +import sys + +from vyos.configtree import ConfigTree +from vyos.util import call + +if (len(sys.argv) < 1): + print("Must specify file name!") + sys.exit(1) + +file_name = sys.argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['container', 'name'] +config = ConfigTree(config_file) + +# Check if containers exist and we need to perform image manipulation +if config.exists(base): + for container in config.list_nodes(base): + # Stop any given container first + call(f'systemctl stop vyos-container-{container}.service') + # Export container image for later re-import to new filesystem. We store + # the backup on a real disk as a tmpfs (like /tmp) could probably lack + # memory if a host has too many containers stored. + image_name = config.return_value(base + [container, 'image']) + call(f'podman image save --quiet --output /root/{container}.tar --format oci-archive {image_name}') + +# No need to adjust the strage driver online (this is only used for testing and +# debugging on a live system) - it is already overlay2 when the migration script +# is run during system update. But the specified driver in the image is actually +# overwritten by the still present VFS filesystem on disk. Thus podman still +# thinks it uses VFS until we delete the libpod directory under: +# /usr/lib/live/mount/persistence/container/storage +#call('sed -i "s/vfs/overlay2/g" /etc/containers/storage.conf /usr/share/vyos/templates/container/storage.conf.j2') + +base_path = '/usr/lib/live/mount/persistence/container/storage' +for dir in ['libpod', 'vfs', 'vfs-containers', 'vfs-images', 'vfs-layers']: + if os.path.exists(f'{base_path}/{dir}'): + shutil.rmtree(f'{base_path}/{dir}') + +# Now all remaining information about VFS is gone and we operate in overlayfs2 +# filesystem mode. Time to re-import the images. +if config.exists(base): + for container in config.list_nodes(base): + # Export container image for later re-import to new filesystem + image_name = config.return_value(base + [container, 'image']) + image_path = f'/root/{container}.tar' + call(f'podman image load --quiet --input {image_path}') + + # Start any given container first + call(f'systemctl start vyos-container-{container}.service') + + # Delete temporary container image + if os.path.exists(image_path): + os.unlink(image_path) + diff --git a/src/migration-scripts/firewall/8-to-9 b/src/migration-scripts/firewall/8-to-9 new file mode 100755 index 000000000..f7c1bb90d --- /dev/null +++ b/src/migration-scripts/firewall/8-to-9 @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# T4780: Add firewall interface group +# cli changes from: +# set firewall [name | ipv6-name] <name> rule <number> [inbound-interface | outbound-interface] <interface_name> +# To +# set firewall [name | ipv6-name] <name> rule <number> [inbound-interface | outbound-interface] [interface-name | interface-group] <interface_name | interface_group> + +import re + +from sys import argv +from sys import exit + +from vyos.configtree import ConfigTree +from vyos.ifconfig import Section + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['firewall'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +if config.exists(base + ['name']): + for name in config.list_nodes(base + ['name']): + if not config.exists(base + ['name', name, 'rule']): + continue + + for rule in config.list_nodes(base + ['name', name, 'rule']): + rule_iiface = base + ['name', name, 'rule', rule, 'inbound-interface'] + rule_oiface = base + ['name', name, 'rule', rule, 'outbound-interface'] + + if config.exists(rule_iiface): + tmp = config.return_value(rule_iiface) + config.delete(rule_iiface) + config.set(rule_iiface + ['interface-name'], value=tmp) + + if config.exists(rule_oiface): + tmp = config.return_value(rule_oiface) + config.delete(rule_oiface) + config.set(rule_oiface + ['interface-name'], value=tmp) + + +if config.exists(base + ['ipv6-name']): + for name in config.list_nodes(base + ['ipv6-name']): + if not config.exists(base + ['ipv6-name', name, 'rule']): + continue + + for rule in config.list_nodes(base + ['ipv6-name', name, 'rule']): + rule_iiface = base + ['ipv6-name', name, 'rule', rule, 'inbound-interface'] + rule_oiface = base + ['ipv6-name', name, 'rule', rule, 'outbound-interface'] + + if config.exists(rule_iiface): + tmp = config.return_value(rule_iiface) + config.delete(rule_iiface) + config.set(rule_iiface + ['interface-name'], value=tmp) + + if config.exists(rule_oiface): + tmp = config.return_value(rule_oiface) + config.delete(rule_oiface) + config.set(rule_oiface + ['interface-name'], value=tmp) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + exit(1)
\ No newline at end of file diff --git a/src/migration-scripts/ipsec/9-to-10 b/src/migration-scripts/ipsec/9-to-10 index 1254104cb..de366ef3b 100755 --- a/src/migration-scripts/ipsec/9-to-10 +++ b/src/migration-scripts/ipsec/9-to-10 @@ -85,10 +85,10 @@ if config.exists(base + ['site-to-site', 'peer']): config.rename(peer_base + ['authentication', 'id'], 'local-id') # For the peer '@foo' set remote-id 'foo' if remote-id is not defined - if peer.startswith('@'): - if not config.exists(peer_base + ['authentication', 'remote-id']): - tmp = peer.replace('@', '') - config.set(peer_base + ['authentication', 'remote-id'], value=tmp) + # For the peer '192.0.2.1' set remote-id '192.0.2.1' if remote-id is not defined + if not config.exists(peer_base + ['authentication', 'remote-id']): + tmp = peer.replace('@', '') if peer.startswith('@') else peer + config.set(peer_base + ['authentication', 'remote-id'], value=tmp) # replace: 'peer <tag> force-encapsulation enable' # => 'peer <tag> force-udp-encapsulation' diff --git a/src/migration-scripts/qos/1-to-2 b/src/migration-scripts/qos/1-to-2 new file mode 100755 index 000000000..41026cbd6 --- /dev/null +++ b/src/migration-scripts/qos/1-to-2 @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from sys import argv,exit + +from vyos.base import Warning +from vyos.configtree import ConfigTree +from vyos.util import read_file + +def bandwidth_percent_to_val(interface, percent) -> int: + speed = read_file(f'/sys/class/net/{interface}/speed') + if not speed.isnumeric(): + Warning('Interface speed cannot be determined (assuming 10 Mbit/s)') + speed = 10 + speed = int(speed) *1000000 # convert to MBit/s + return speed * int(percent) // 100 # integer division + +if (len(argv) < 1): + print("Must specify file name!") + exit(1) + +file_name = argv[1] + +with open(file_name, 'r') as f: + config_file = f.read() + +base = ['traffic-policy'] +config = ConfigTree(config_file) + +if not config.exists(base): + # Nothing to do + exit(0) + +iface_config = {} + +if config.exists(['interfaces']): + def get_qos(config, interface, interface_base): + if config.exists(interface_base): + tmp = { interface : {} } + if config.exists(interface_base + ['in']): + tmp[interface]['ingress'] = config.return_value(interface_base + ['in']) + if config.exists(interface_base + ['out']): + tmp[interface]['egress'] = config.return_value(interface_base + ['out']) + config.delete(interface_base) + return tmp + return None + + # Migrate "interface ethernet eth0 traffic-policy in|out" to "qos interface eth0 ingress|egress" + for type in config.list_nodes(['interfaces']): + for interface in config.list_nodes(['interfaces', type]): + interface_base = ['interfaces', type, interface, 'traffic-policy'] + tmp = get_qos(config, interface, interface_base) + if tmp: iface_config.update(tmp) + + vif_path = ['interfaces', type, interface, 'vif'] + if config.exists(vif_path): + for vif in config.list_nodes(vif_path): + vif_interface_base = vif_path + [vif, 'traffic-policy'] + ifname = f'{interface}.{vif}' + tmp = get_qos(config, ifname, vif_interface_base) + if tmp: iface_config.update(tmp) + + vif_s_path = ['interfaces', type, interface, 'vif-s'] + if config.exists(vif_s_path): + for vif_s in config.list_nodes(vif_s_path): + vif_s_interface_base = vif_s_path + [vif_s, 'traffic-policy'] + ifname = f'{interface}.{vif_s}' + tmp = get_qos(config, ifname, vif_s_interface_base) + if tmp: iface_config.update(tmp) + + # vif-c interfaces MUST be migrated before their parent vif-s + # interface as the migrate_*() functions delete the path! + vif_c_path = ['interfaces', type, interface, 'vif-s', vif_s, 'vif-c'] + if config.exists(vif_c_path): + for vif_c in config.list_nodes(vif_c_path): + vif_c_interface_base = vif_c_path + [vif_c, 'traffic-policy'] + ifname = f'{interface}.{vif_s}.{vif_c}' + tmp = get_qos(config, ifname, vif_s_interface_base) + if tmp: iface_config.update(tmp) + + +# Now we have the information which interface uses which QoS policy. +# Interface binding will be moved to the qos CLi tree +config.set(['qos']) +config.copy(base, ['qos', 'policy']) +config.delete(base) + +# Now map the interface policy binding to the new CLI syntax +if len(iface_config): + config.set(['qos', 'interface']) + config.set_tag(['qos', 'interface']) + +for interface, interface_config in iface_config.items(): + config.set(['qos', 'interface', interface]) + config.set_tag(['qos', 'interface', interface]) + if 'ingress' in interface_config: + config.set(['qos', 'interface', interface, 'ingress'], value=interface_config['ingress']) + if 'egress' in interface_config: + config.set(['qos', 'interface', interface, 'egress'], value=interface_config['egress']) + +# Remove "burst" CLI node from network emulator +netem_base = ['qos', 'policy', 'network-emulator'] +if config.exists(netem_base): + for policy_name in config.list_nodes(netem_base): + if config.exists(netem_base + [policy_name, 'burst']): + config.delete(netem_base + [policy_name, 'burst']) + +try: + with open(file_name, 'w') as f: + f.write(config.to_string()) +except OSError as e: + print("Failed to save the modified config: {}".format(e)) + exit(1) diff --git a/src/op_mode/conntrack.py b/src/op_mode/conntrack.py index fff537936..df213cc5a 100755 --- a/src/op_mode/conntrack.py +++ b/src/op_mode/conntrack.py @@ -116,7 +116,7 @@ def get_formatted_output(dict_data): reply_src = f'{reply_src}:{reply_sport}' if reply_sport else reply_src reply_dst = f'{reply_dst}:{reply_dport}' if reply_dport else reply_dst state = meta['state'] if 'state' in meta else '' - mark = meta['mark'] + mark = meta['mark'] if 'mark' in meta else '' zone = meta['zone'] if 'zone' in meta else '' data_entries.append( [conn_id, orig_src, orig_dst, reply_src, reply_dst, proto, state, timeout, mark, zone]) diff --git a/src/op_mode/container.py b/src/op_mode/container.py index ce466ffc1..ecefc556e 100755 --- a/src/op_mode/container.py +++ b/src/op_mode/container.py @@ -23,7 +23,6 @@ from vyos.util import cmd import vyos.opmode - def _get_json_data(command: str) -> list: """ Get container command format JSON @@ -38,7 +37,7 @@ def _get_raw_data(command: str) -> list: def show_container(raw: bool): - command = 'sudo podman ps --all' + command = 'podman ps --all' container_data = _get_raw_data(command) if raw: return container_data @@ -47,8 +46,8 @@ def show_container(raw: bool): def show_image(raw: bool): - command = 'sudo podman image ls' - container_data = _get_raw_data('sudo podman image ls') + command = 'podman image ls' + container_data = _get_raw_data('podman image ls') if raw: return container_data else: @@ -56,7 +55,7 @@ def show_image(raw: bool): def show_network(raw: bool): - command = 'sudo podman network ls' + command = 'podman network ls' container_data = _get_raw_data(command) if raw: return container_data @@ -67,7 +66,7 @@ def show_network(raw: bool): def restart(name: str): from vyos.util import rc_cmd - rc, output = rc_cmd(f'sudo podman restart {name}') + rc, output = rc_cmd(f'systemctl restart vyos-container-{name}.service') if rc != 0: print(output) return None diff --git a/src/op_mode/dhcp.py b/src/op_mode/dhcp.py index 07e9b7d6c..b9e6e7bc9 100755 --- a/src/op_mode/dhcp.py +++ b/src/op_mode/dhcp.py @@ -15,13 +15,14 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys -from ipaddress import ip_address import typing from datetime import datetime -from sys import exit -from tabulate import tabulate +from ipaddress import ip_address from isc_dhcp_leases import IscDhcpLeases +from tabulate import tabulate + +import vyos.opmode from vyos.base import Warning from vyos.configquery import ConfigTreeQuery @@ -30,19 +31,10 @@ from vyos.util import cmd from vyos.util import dict_search from vyos.util import is_systemd_service_running -import vyos.opmode - - config = ConfigTreeQuery() -pool_key = "shared-networkname" - - -def _in_pool(lease, pool): - if pool_key in lease.sets: - if lease.sets[pool_key] == pool: - return True - return False - +lease_valid_states = ['all', 'active', 'free', 'expired', 'released', 'abandoned', 'reset', 'backup'] +sort_valid_inet = ['end', 'mac', 'hostname', 'ip', 'pool', 'remaining', 'start', 'state'] +sort_valid_inet6 = ['end', 'iaid_duid', 'ip', 'last_communication', 'pool', 'remaining', 'state', 'type'] def _utc_to_local(utc_dt): return datetime.fromtimestamp((datetime.fromtimestamp(utc_dt) - datetime(1970, 1, 1)).total_seconds()) @@ -71,7 +63,7 @@ def _find_list_of_dict_index(lst, key='ip', value='') -> int: return idx -def _get_raw_server_leases(family, pool=None) -> list: +def _get_raw_server_leases(family='inet', pool=None, sorted=None, state=[]) -> list: """ Get DHCP server leases :return list @@ -79,9 +71,12 @@ def _get_raw_server_leases(family, pool=None) -> list: lease_file = '/config/dhcpdv6.leases' if family == 'inet6' else '/config/dhcpd.leases' data = [] leases = IscDhcpLeases(lease_file).get() - if pool is not None: - if config.exists(f'service dhcp-server shared-network-name {pool}'): - leases = list(filter(lambda x: _in_pool(x, pool), leases)) + + if pool is None: + pool = _get_dhcp_pools(family=family) + else: + pool = [pool] + for lease in leases: data_lease = {} data_lease['ip'] = lease.ip @@ -90,7 +85,7 @@ def _get_raw_server_leases(family, pool=None) -> list: data_lease['end'] = lease.end.timestamp() if family == 'inet': - data_lease['hardware'] = lease.ethernet + data_lease['mac'] = lease.ethernet data_lease['start'] = lease.start.timestamp() data_lease['hostname'] = lease.hostname @@ -110,8 +105,9 @@ def _get_raw_server_leases(family, pool=None) -> list: data_lease['remaining'] = '' # Do not add old leases - if data_lease['remaining'] != '': - data.append(data_lease) + if data_lease['remaining'] != '' and data_lease['pool'] in pool: + if not state or data_lease['state'] in state: + data.append(data_lease) # deduplicate checked = [] @@ -123,15 +119,20 @@ def _get_raw_server_leases(family, pool=None) -> list: idx = _find_list_of_dict_index(data, key='ip', value=addr) data.pop(idx) + if sorted: + if sorted == 'ip': + data.sort(key = lambda x:ip_address(x['ip'])) + else: + data.sort(key = lambda x:x[sorted]) return data -def _get_formatted_server_leases(raw_data, family): +def _get_formatted_server_leases(raw_data, family='inet'): data_entries = [] if family == 'inet': for lease in raw_data: ipaddr = lease.get('ip') - hw_addr = lease.get('hardware') + hw_addr = lease.get('mac') state = lease.get('state') start = lease.get('start') start = _utc_to_local(start).strftime('%Y/%m/%d %H:%M:%S') @@ -142,7 +143,7 @@ def _get_formatted_server_leases(raw_data, family): hostname = lease.get('hostname') data_entries.append([ipaddr, hw_addr, state, start, end, remain, pool, hostname]) - headers = ['IP Address', 'Hardware address', 'State', 'Lease start', 'Lease expiration', 'Remaining', 'Pool', + headers = ['IP Address', 'MAC address', 'State', 'Lease start', 'Lease expiration', 'Remaining', 'Pool', 'Hostname'] if family == 'inet6': @@ -256,16 +257,28 @@ def show_pool_statistics(raw: bool, family: str, pool: typing.Optional[str]): @_verify -def show_server_leases(raw: bool, family: str): +def show_server_leases(raw: bool, family: str, pool: typing.Optional[str], + sorted: typing.Optional[str], state: typing.Optional[str]): # if dhcp server is down, inactive leases may still be shown as active, so warn the user. if not is_systemd_service_running('isc-dhcp-server.service'): Warning('DHCP server is configured but not started. Data may be stale.') - leases = _get_raw_server_leases(family) + v = 'v6' if family == 'inet6' else '' + if pool and pool not in _get_dhcp_pools(family=family): + raise vyos.opmode.IncorrectValue(f'DHCP{v} pool "{pool}" does not exist!') + + if state and state not in lease_valid_states: + raise vyos.opmode.IncorrectValue(f'DHCP{v} state "{state}" is invalid!') + + sort_valid = sort_valid_inet6 if family == 'inet6' else sort_valid_inet + if sorted and sorted not in sort_valid: + raise vyos.opmode.IncorrectValue(f'DHCP{v} sort "{sorted}" is invalid!') + + lease_data = _get_raw_server_leases(family=family, pool=pool, sorted=sorted, state=state) if raw: - return leases + return lease_data else: - return _get_formatted_server_leases(leases, family) + return _get_formatted_server_leases(lease_data, family=family) if __name__ == '__main__': diff --git a/src/op_mode/generate_system_login_user.py b/src/op_mode/generate_system_login_user.py new file mode 100755 index 000000000..8f8827b1b --- /dev/null +++ b/src/op_mode/generate_system_login_user.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import os + +from vyos.util import popen +from secrets import token_hex +from base64 import b32encode + +if os.geteuid() != 0: + exit("You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.") + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-u", "--username", type=str, help='Username used for authentication', required=True) + parser.add_argument("-l", "--rate_limit", type=str, help='Limit number of logins (rate-limit) per rate-time (default: 3)', default="3", required=False) + parser.add_argument("-t", "--rate_time", type=str, help='Limit number of logins (rate-limit) per rate-time (default: 30)', default="30", required=False) + parser.add_argument("-w", "--window_size", type=str, help='Set window of concurrently valid codes (default: 3)', default="3", required=False) + parser.add_argument("-i", "--interval", type=str, help='Duration of single time interval', default="30", required=False) + parser.add_argument("-d", "--digits", type=str, help='The number of digits in the one-time password', default="6", required=False) + args = parser.parse_args() + + hostname = os.uname()[1] + username = args.username + rate_limit = args.rate_limit + rate_time = args.rate_time + window_size = args.window_size + digits = args.digits + period = args.interval + + # check variables: + if int(rate_limit) < 1 or int(rate_limit) > 10: + print("") + quit("Number of logins (rate-limit) must be between '1' and '10'") + + if int(rate_time) < 15 or int(rate_time) > 600: + print("") + quit("The rate-time must be between '15' and '600' seconds") + + if int(window_size) < 1 or int(window_size) > 21: + print("") + quit("Window of concurrently valid codes must be between '1' and '21' seconds") + + # generate OTP key, URL & QR: + key_hex = token_hex(20) + key_base32 = b32encode(bytes.fromhex(key_hex)).decode() + + otp_url=''.join(["otpauth://totp/",username,"@",hostname,"?secret=",key_base32,"&digits=",digits,"&period=",period]) + qrcode,err = popen('qrencode -t ansiutf8', input=otp_url) + + print("# You can share it with the user, he just needs to scan the QR in his OTP app") + print("# username: ", username) + print("# OTP KEY: ", key_base32) + print("# OTP URL: ", otp_url) + print(qrcode) + print('# To add this OTP key to configuration, run the following commands:') + print(f"set system login user {username} authentication otp key '{key_base32}'") + if rate_limit != "3": + print(f"set system login user {username} authentication otp rate-limit '{rate_limit}'") + if rate_time != "30": + print(f"set system login user {username} authentication otp rate-time '{rate_time}'") + if window_size != "3": + print(f"set system login user {username} authentication otp window-size '{window_size}'") diff --git a/src/op_mode/interfaces.py b/src/op_mode/interfaces.py new file mode 100755 index 000000000..678c74980 --- /dev/null +++ b/src/op_mode/interfaces.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import re +import sys +import glob +import json +import typing +from datetime import datetime +from tabulate import tabulate + +import vyos.opmode +from vyos.ifconfig import Section +from vyos.ifconfig import Interface +from vyos.ifconfig import VRRP +from vyos.util import cmd, rc_cmd, call + +def catch_broken_pipe(func): + def wrapped(*args, **kwargs): + try: + func(*args, **kwargs) + except (BrokenPipeError, KeyboardInterrupt): + # Flush output to /dev/null and bail out. + os.dup2(os.open(os.devnull, os.O_WRONLY), sys.stdout.fileno()) + return wrapped + +# The original implementation of filtered_interfaces has signature: +# (ifnames: list, iftypes: typing.Union[str, list], vif: bool, vrrp: bool) -> intf: Interface: +# Arg types allowed in CLI (ifnames: str, iftypes: str) were manually +# re-typed from argparse args. +# We include the function in a general form, however op-mode standard +# functions will restrict to the CLI-allowed arg types, wrapped in Optional. +def filtered_interfaces(ifnames: typing.Union[str, list], + iftypes: typing.Union[str, list], + vif: bool, vrrp: bool) -> Interface: + """ + get all interfaces from the OS and return them; ifnames can be used to + filter which interfaces should be considered + + ifnames: a list of interface names to consider, empty do not filter + + return an instance of the Interface class + """ + if isinstance(ifnames, str): + ifnames = [ifnames] if ifnames else [] + if isinstance(iftypes, list): + for iftype in iftypes: + yield from filtered_interfaces(ifnames, iftype, vif, vrrp) + + for ifname in Section.interfaces(iftypes): + # Bail out early if interface name not part of our search list + if ifnames and ifname not in ifnames: + continue + + # As we are only "reading" from the interface - we must use the + # generic base class which exposes all the data via a common API + interface = Interface(ifname, create=False, debug=False) + + # VLAN interfaces have a '.' in their name by convention + if vif and not '.' in ifname: + continue + + if vrrp: + vrrp_interfaces = VRRP.active_interfaces() + if ifname not in vrrp_interfaces: + continue + + yield interface + +def _split_text(text, used=0): + """ + take a string and attempt to split it to fit with the width of the screen + + text: the string to split + used: number of characted already used in the screen + """ + no_tty = call('tty -s') + + returned = cmd('stty size') if not no_tty else '' + returned = returned.split() + if len(returned) == 2: + _, columns = tuple(int(_) for _ in returned) + else: + _, columns = (40, 80) + + desc_len = columns - used + + line = '' + for word in text.split(): + if len(line) + len(word) < desc_len: + line = f'{line} {word}' + continue + if line: + yield line[1:] + else: + line = f'{line} {word}' + + yield line[1:] + +def _get_counter_val(prev, now): + """ + attempt to correct a counter if it wrapped, copied from perl + + prev: previous counter + now: the current counter + """ + # This function has to deal with both 32 and 64 bit counters + if prev == 0: + return now + + # device is using 64 bit values assume they never wrap + value = now - prev + if (now >> 32) != 0: + return value + + # The counter has rolled. If the counter has rolled + # multiple times since the prev value, then this math + # is meaningless. + if value < 0: + value = (4294967296 - prev) + now + + return value + +def _pppoe(ifname): + out = cmd('ps -C pppd -f') + if ifname in out: + return 'C' + if ifname in [_.split('/')[-1] for _ in glob.glob('/etc/ppp/peers/pppoe*')]: + return 'D' + return '' + +def _find_intf_by_ifname(intf_l: list, name: str): + for d in intf_l: + if d['ifname'] == name: + return d + return {} + +# lifted out of operational.py to separate formatting from data +def _format_stats(stats, indent=4): + stat_names = { + 'rx': ['bytes', 'packets', 'errors', 'dropped', 'overrun', 'mcast'], + 'tx': ['bytes', 'packets', 'errors', 'dropped', 'carrier', 'collisions'], + } + + stats_dir = { + 'rx': ['rx_bytes', 'rx_packets', 'rx_errors', 'rx_dropped', 'rx_over_errors', 'multicast'], + 'tx': ['tx_bytes', 'tx_packets', 'tx_errors', 'tx_dropped', 'tx_carrier_errors', 'collisions'], + } + tabs = [] + for rtx in list(stats_dir): + tabs.append([f'{rtx.upper()}:', ] + stat_names[rtx]) + tabs.append(['', ] + [stats[_] for _ in stats_dir[rtx]]) + + s = tabulate( + tabs, + stralign="right", + numalign="right", + tablefmt="plain" + ) + + p = ' '*indent + return f'{p}' + s.replace('\n', f'\n{p}') + +def _get_raw_data(ifname: typing.Optional[str], + iftype: typing.Optional[str], + vif: bool, vrrp: bool) -> list: + if ifname is None: + ifname = '' + if iftype is None: + iftype = '' + ret =[] + for interface in filtered_interfaces(ifname, iftype, vif, vrrp): + res_intf = {} + cache = interface.operational.load_counters() + + out = cmd(f'ip -json addr show {interface.ifname}') + res_intf_l = json.loads(out) + res_intf = res_intf_l[0] + + if res_intf['link_type'] == 'tunnel6': + # Note that 'ip -6 tun show {interface.ifname}' is not json + # aware, so find in list + out = cmd('ip -json -6 tun show') + tunnel = json.loads(out) + res_intf['tunnel6'] = _find_intf_by_ifname(tunnel, + interface.ifname) + if 'ip6_tnl_f_use_orig_tclass' in res_intf['tunnel6']: + res_intf['tunnel6']['tclass'] = 'inherit' + del res_intf['tunnel6']['ip6_tnl_f_use_orig_tclass'] + + res_intf['counters_last_clear'] = int(cache.get('timestamp', 0)) + + res_intf['description'] = interface.get_alias() + + res_intf['stats'] = interface.operational.get_stats() + + ret.append(res_intf) + + # find pppoe interfaces that are in a transitional/dead state + if ifname.startswith('pppoe') and not _find_intf_by_ifname(ret, ifname): + pppoe_intf = {} + pppoe_intf['unhandled'] = None + pppoe_intf['ifname'] = ifname + pppoe_intf['state'] = _pppoe(ifname) + ret.append(pppoe_intf) + + return ret + +def _get_summary_data(ifname: typing.Optional[str], + iftype: typing.Optional[str], + vif: bool, vrrp: bool) -> list: + if ifname is None: + ifname = '' + if iftype is None: + iftype = '' + ret = [] + for interface in filtered_interfaces(ifname, iftype, vif, vrrp): + res_intf = {} + + res_intf['ifname'] = interface.ifname + res_intf['oper_state'] = interface.operational.get_state() + res_intf['admin_state'] = interface.get_admin_state() + res_intf['addr'] = [_ for _ in interface.get_addr() if not _.startswith('fe80::')] + res_intf['description'] = interface.get_alias() + + ret.append(res_intf) + + # find pppoe interfaces that are in a transitional/dead state + if ifname.startswith('pppoe') and not _find_intf_by_ifname(ret, ifname): + pppoe_intf = {} + pppoe_intf['unhandled'] = None + pppoe_intf['ifname'] = ifname + pppoe_intf['state'] = _pppoe(ifname) + ret.append(pppoe_intf) + + return ret + +def _get_counter_data(ifname: typing.Optional[str], + iftype: typing.Optional[str], + vif: bool, vrrp: bool) -> list: + if ifname is None: + ifname = '' + if iftype is None: + iftype = '' + ret = [] + for interface in filtered_interfaces(ifname, iftype, vif, vrrp): + res_intf = {} + + oper = interface.operational.get_state() + + if oper not in ('up','unknown'): + continue + + stats = interface.operational.get_stats() + cache = interface.operational.load_counters() + res_intf['ifname'] = interface.ifname + res_intf['rx_packets'] = _get_counter_val(cache['rx_packets'], stats['rx_packets']) + res_intf['rx_bytes'] = _get_counter_val(cache['rx_bytes'], stats['rx_bytes']) + res_intf['tx_packets'] = _get_counter_val(cache['tx_packets'], stats['tx_packets']) + res_intf['tx_bytes'] = _get_counter_val(cache['tx_bytes'], stats['tx_bytes']) + + ret.append(res_intf) + + return ret + +@catch_broken_pipe +def _format_show_data(data: list): + unhandled = [] + for intf in data: + if 'unhandled' in intf: + unhandled.append(intf) + continue + # instead of reformatting data, call non-json output: + rc, out = rc_cmd(f"ip addr show {intf['ifname']}") + if rc != 0: + continue + out = re.sub('^\d+:\s+','',out) + # add additional data already collected + if 'tunnel6' in intf: + t6_d = intf['tunnel6'] + t6_str = 'encaplimit %s hoplimit %s tclass %s flowlabel %s (flowinfo %s)' % ( + t6_d.get('encap_limit', ''), t6_d.get('hoplimit', ''), + t6_d.get('tclass', ''), t6_d.get('flowlabel', ''), + t6_d.get('flowinfo', '')) + out = re.sub('(\n\s+)(link/tunnel6)', f'\g<1>{t6_str}\g<1>\g<2>', out) + print(out) + ts = intf.get('counters_last_clear', 0) + if ts: + when = datetime.fromtimestamp(ts).strftime("%a %b %d %R:%S %Z %Y") + print(f' Last clear: {when}') + description = intf.get('description', '') + if description: + print(f' Description: {description}') + + stats = intf.get('stats', {}) + if stats: + print() + print(_format_stats(stats)) + + for intf in unhandled: + string = { + 'C': 'Coming up', + 'D': 'Link down' + }[intf['state']] + print(f"{intf['ifname']}: {string}") + + return 0 + +@catch_broken_pipe +def _format_show_summary(data): + format1 = '%-16s %-33s %-4s %s' + format2 = '%-16s %s' + + print('Codes: S - State, L - Link, u - Up, D - Down, A - Admin Down') + print(format1 % ("Interface", "IP Address", "S/L", "Description")) + print(format1 % ("---------", "----------", "---", "-----------")) + + unhandled = [] + for intf in data: + if 'unhandled' in intf: + unhandled.append(intf) + continue + ifname = [intf['ifname'],] + oper = ['u',] if intf['oper_state'] in ('up', 'unknown') else ['D',] + admin = ['u',] if intf['admin_state'] in ('up', 'unknown') else ['A',] + addrs = intf['addr'] or ['-',] + descs = list(_split_text(intf['description'], 0)) + + while ifname or oper or admin or addrs or descs: + i = ifname.pop(0) if ifname else '' + a = addrs.pop(0) if addrs else '' + d = descs.pop(0) if descs else '' + s = [admin.pop(0)] if admin else [] + l = [oper.pop(0)] if oper else [] + if len(a) < 33: + print(format1 % (i, a, '/'.join(s+l), d)) + else: + print(format2 % (i, a)) + print(format1 % ('', '', '/'.join(s+l), d)) + + for intf in unhandled: + string = { + 'C': 'u/D', + 'D': 'A/D' + }[intf['state']] + print(format1 % (ifname, '', string, '')) + + return 0 + +@catch_broken_pipe +def _format_show_counters(data: list): + formatting = '%-12s %10s %10s %10s %10s' + print(formatting % ('Interface', 'Rx Packets', 'Rx Bytes', 'Tx Packets', 'Tx Bytes')) + + for intf in data: + print(formatting % ( + intf['ifname'], + intf['rx_packets'], + intf['rx_bytes'], + intf['tx_packets'], + intf['tx_bytes'] + )) + + return 0 + +def show(raw: bool, intf_name: typing.Optional[str], + intf_type: typing.Optional[str], + vif: bool, vrrp: bool): + data = _get_raw_data(intf_name, intf_type, vif, vrrp) + if raw: + return data + return _format_show_data(data) + +def show_summary(raw: bool, intf_name: typing.Optional[str], + intf_type: typing.Optional[str], + vif: bool, vrrp: bool): + data = _get_summary_data(intf_name, intf_type, vif, vrrp) + if raw: + return data + return _format_show_summary(data) + +def show_counters(raw: bool, intf_name: typing.Optional[str], + intf_type: typing.Optional[str], + vif: bool, vrrp: bool): + data = _get_counter_data(intf_name, intf_type, vif, vrrp) + if raw: + return data + return _format_show_counters(data) + +if __name__ == '__main__': + try: + res = vyos.opmode.run(sys.modules[__name__]) + if res: + print(res) + except (ValueError, vyos.opmode.Error) as e: + print(e) + sys.exit(1) diff --git a/src/op_mode/nat.py b/src/op_mode/nat.py index f899eb3dc..cf06de0e9 100755 --- a/src/op_mode/nat.py +++ b/src/op_mode/nat.py @@ -18,23 +18,21 @@ import jmespath import json import sys import xmltodict +import typing -from sys import exit from tabulate import tabulate -from vyos.configquery import ConfigTreeQuery +import vyos.opmode +from vyos.configquery import ConfigTreeQuery from vyos.util import cmd from vyos.util import dict_search -import vyos.opmode - - base = 'nat' unconf_message = 'NAT is not configured' -def _get_xml_translation(direction, family): +def _get_xml_translation(direction, family, address=None): """ Get conntrack XML output --src-nat|--dst-nat """ @@ -42,7 +40,10 @@ def _get_xml_translation(direction, family): opt = '--src-nat' if direction == 'destination': opt = '--dst-nat' - return cmd(f'sudo conntrack --dump --family {family} {opt} --output xml') + tmp = f'conntrack --dump --family {family} {opt} --output xml' + if address: + tmp += f' --src {address}' + return cmd(tmp) def _xml_to_dict(xml): @@ -66,7 +67,7 @@ def _get_json_data(direction, family): if direction == 'destination': chain = 'PREROUTING' family = 'ip6' if family == 'inet6' else 'ip' - return cmd(f'sudo nft --json list chain {family} vyos_nat {chain}') + return cmd(f'nft --json list chain {family} vyos_nat {chain}') def _get_raw_data_rules(direction, family): @@ -82,11 +83,11 @@ def _get_raw_data_rules(direction, family): return rules -def _get_raw_translation(direction, family): +def _get_raw_translation(direction, family, address=None): """ Return: dictionary """ - xml = _get_xml_translation(direction, family) + xml = _get_xml_translation(direction, family, address) if len(xml) == 0: output = {'conntrack': { @@ -231,7 +232,7 @@ def _get_formatted_output_statistics(data, direction): return output -def _get_formatted_translation(dict_data, nat_direction, family): +def _get_formatted_translation(dict_data, nat_direction, family, verbose): data_entries = [] if 'error' in dict_data['conntrack']: return 'Entries not found' @@ -269,14 +270,14 @@ def _get_formatted_translation(dict_data, nat_direction, family): reply_src = f'{reply_src}:{reply_sport}' if reply_sport else reply_src reply_dst = f'{reply_dst}:{reply_dport}' if reply_dport else reply_dst state = meta['state'] if 'state' in meta else '' - mark = meta['mark'] + mark = meta.get('mark', '') zone = meta['zone'] if 'zone' in meta else '' if nat_direction == 'source': - data_entries.append( - [orig_src, reply_dst, proto, timeout, mark, zone]) + tmp = [orig_src, reply_dst, proto, timeout, mark, zone] + data_entries.append(tmp) elif nat_direction == 'destination': - data_entries.append( - [orig_dst, reply_src, proto, timeout, mark, zone]) + tmp = [orig_dst, reply_src, proto, timeout, mark, zone] + data_entries.append(tmp) headers = ["Pre-NAT", "Post-NAT", "Proto", "Timeout", "Mark", "Zone"] output = tabulate(data_entries, headers, numalign="left") @@ -315,13 +316,20 @@ def show_statistics(raw: bool, direction: str, family: str): @_verify -def show_translations(raw: bool, direction: str, family: str): +def show_translations(raw: bool, direction: + str, family: str, + address: typing.Optional[str], + verbose: typing.Optional[bool]): family = 'ipv6' if family == 'inet6' else 'ipv4' - nat_translation = _get_raw_translation(direction, family) + nat_translation = _get_raw_translation(direction, + family=family, + address=address) + if raw: return nat_translation else: - return _get_formatted_translation(nat_translation, direction, family) + return _get_formatted_translation(nat_translation, direction, family, + verbose) if __name__ == '__main__': diff --git a/src/op_mode/show_acceleration.py b/src/op_mode/show_acceleration.py index 752db3deb..48c31d4d9 100755 --- a/src/op_mode/show_acceleration.py +++ b/src/op_mode/show_acceleration.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019 VyOS maintainers and contributors +# Copyright (C) 2019-2022 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -13,7 +13,6 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -# import sys import os @@ -24,12 +23,11 @@ from vyos.config import Config from vyos.util import popen from vyos.util import call - def detect_qat_dev(): - output, err = popen('sudo lspci -nn', decode='utf-8') + output, err = popen('lspci -nn', decode='utf-8') if not err: data = re.findall('(8086:19e2)|(8086:37c8)|(8086:0435)|(8086:6f54)', output) - #If QAT devices found + # QAT devices found if data: return print("\t No QAT device found") @@ -44,11 +42,11 @@ def show_qat_status(): sys.exit(1) # Show QAT service - call('sudo /etc/init.d/qat_service status') + call('/etc/init.d/qat_service status') # Return QAT devices def get_qat_devices(): - data_st, err = popen('sudo /etc/init.d/qat_service status', decode='utf-8') + data_st, err = popen('/etc/init.d/qat_service status', decode='utf-8') if not err: elm_lst = re.findall('qat_dev\d', data_st) print('\n'.join(elm_lst)) @@ -57,7 +55,7 @@ def get_qat_devices(): def get_qat_proc_path(qat_dev): q_type = "" q_bsf = "" - output, err = popen('sudo /etc/init.d/qat_service status', decode='utf-8') + output, err = popen('/etc/init.d/qat_service status', decode='utf-8') if not err: # Parse QAT service output data_st = output.split("\n") @@ -95,20 +93,20 @@ args = parser.parse_args() if args.hw: detect_qat_dev() # Show availible Intel QAT devices - call('sudo lspci -nn | egrep -e \'8086:37c8|8086:19e2|8086:0435|8086:6f54\'') + call('lspci -nn | egrep -e \'8086:37c8|8086:19e2|8086:0435|8086:6f54\'') elif args.flow and args.dev: check_qat_if_conf() - call('sudo cat '+get_qat_proc_path(args.dev)+"fw_counters") + call('cat '+get_qat_proc_path(args.dev)+"fw_counters") elif args.interrupts: check_qat_if_conf() # Delete _dev from args.dev - call('sudo cat /proc/interrupts | grep qat') + call('cat /proc/interrupts | grep qat') elif args.status: check_qat_if_conf() show_qat_status() elif args.conf and args.dev: check_qat_if_conf() - call('sudo cat '+get_qat_proc_path(args.dev)+"dev_cfg") + call('cat '+get_qat_proc_path(args.dev)+"dev_cfg") elif args.dev_list: get_qat_devices() else: diff --git a/src/op_mode/show_dhcp.py b/src/op_mode/show_dhcp.py deleted file mode 100755 index 4b1758eea..000000000 --- a/src/op_mode/show_dhcp.py +++ /dev/null @@ -1,260 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2018-2021 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -# -# TODO: merge with show_dhcpv6.py - -from json import dumps -from argparse import ArgumentParser -from ipaddress import ip_address -from tabulate import tabulate -from sys import exit -from collections import OrderedDict -from datetime import datetime - -from isc_dhcp_leases import Lease, IscDhcpLeases - -from vyos.base import Warning -from vyos.config import Config -from vyos.util import is_systemd_service_running - -lease_file = "/config/dhcpd.leases" -pool_key = "shared-networkname" - -lease_display_fields = OrderedDict() -lease_display_fields['ip'] = 'IP address' -lease_display_fields['hardware_address'] = 'Hardware address' -lease_display_fields['state'] = 'State' -lease_display_fields['start'] = 'Lease start' -lease_display_fields['end'] = 'Lease expiration' -lease_display_fields['remaining'] = 'Remaining' -lease_display_fields['pool'] = 'Pool' -lease_display_fields['hostname'] = 'Hostname' - -lease_valid_states = ['all', 'active', 'free', 'expired', 'released', 'abandoned', 'reset', 'backup'] - -def in_pool(lease, pool): - if pool_key in lease.sets: - if lease.sets[pool_key] == pool: - return True - - return False - -def utc_to_local(utc_dt): - return datetime.fromtimestamp((utc_dt - datetime(1970,1,1)).total_seconds()) - -def get_lease_data(lease): - data = {} - - # isc-dhcp lease times are in UTC so we need to convert them to local time to display - try: - data["start"] = utc_to_local(lease.start).strftime("%Y/%m/%d %H:%M:%S") - except: - data["start"] = "" - - try: - data["end"] = utc_to_local(lease.end).strftime("%Y/%m/%d %H:%M:%S") - except: - data["end"] = "" - - try: - data["remaining"] = lease.end - datetime.utcnow() - # negative timedelta prints wrong so bypass it - if (data["remaining"].days >= 0): - # substraction gives us a timedelta object which can't be formatted with strftime - # so we use str(), split gets rid of the microseconds - data["remaining"] = str(data["remaining"]).split('.')[0] - else: - data["remaining"] = "" - except: - data["remaining"] = "" - - # currently not used but might come in handy - # todo: parse into datetime string - for prop in ['tstp', 'tsfp', 'atsfp', 'cltt']: - if prop in lease.data: - data[prop] = lease.data[prop] - else: - data[prop] = '' - - data["hardware_address"] = lease.ethernet - data["hostname"] = lease.hostname - - data["state"] = lease.binding_state - data["ip"] = lease.ip - - try: - data["pool"] = lease.sets[pool_key] - except: - data["pool"] = "" - - return data - -def get_leases(config, leases, state, pool=None, sort='ip'): - # get leases from file - leases = IscDhcpLeases(lease_file).get() - - # filter leases by state - if 'all' not in state: - leases = list(filter(lambda x: x.binding_state in state, leases)) - - # filter leases by pool name - if pool is not None: - if config.exists_effective("service dhcp-server shared-network-name {0}".format(pool)): - leases = list(filter(lambda x: in_pool(x, pool), leases)) - else: - print("Pool {0} does not exist.".format(pool)) - exit(0) - - # should maybe filter all state=active by lease.valid here? - - # sort by start time to dedupe (newest lease overrides older) - leases = sorted(leases, key = lambda lease: lease.start) - - # dedupe by converting to dict - leases_dict = {} - for lease in leases: - # dedupe by IP - leases_dict[lease.ip] = lease - - # convert the lease data - leases = list(map(get_lease_data, leases_dict.values())) - - # apply output/display sort - if sort == 'ip': - leases = sorted(leases, key = lambda lease: int(ip_address(lease['ip']))) - else: - leases = sorted(leases, key = lambda lease: lease[sort]) - - return leases - -def show_leases(leases): - lease_list = [] - for l in leases: - lease_list_params = [] - for k in lease_display_fields.keys(): - lease_list_params.append(l[k]) - lease_list.append(lease_list_params) - - output = tabulate(lease_list, lease_display_fields.values()) - - print(output) - -def get_pool_size(config, pool): - size = 0 - subnets = config.list_effective_nodes("service dhcp-server shared-network-name {0} subnet".format(pool)) - for s in subnets: - ranges = config.list_effective_nodes("service dhcp-server shared-network-name {0} subnet {1} range".format(pool, s)) - for r in ranges: - start = config.return_effective_value("service dhcp-server shared-network-name {0} subnet {1} range {2} start".format(pool, s, r)) - stop = config.return_effective_value("service dhcp-server shared-network-name {0} subnet {1} range {2} stop".format(pool, s, r)) - - # Add +1 because both range boundaries are inclusive - size += int(ip_address(stop)) - int(ip_address(start)) + 1 - - return size - -def show_pool_stats(stats): - headers = ["Pool", "Size", "Leases", "Available", "Usage"] - output = tabulate(stats, headers) - - print(output) - -if __name__ == '__main__': - parser = ArgumentParser() - - group = parser.add_mutually_exclusive_group() - group.add_argument("-l", "--leases", action="store_true", help="Show DHCP leases") - group.add_argument("-s", "--statistics", action="store_true", help="Show DHCP statistics") - group.add_argument("--allowed", type=str, choices=["sort", "state"], help="Show allowed values for argument") - - parser.add_argument("-p", "--pool", type=str, help="Show lease for specific pool") - parser.add_argument("-S", "--sort", type=str, default='ip', help="Sort by") - parser.add_argument("-t", "--state", type=str, nargs="+", default=["active"], help="Lease state to show (can specify multiple with spaces)") - parser.add_argument("-j", "--json", action="store_true", default=False, help="Produce JSON output") - - args = parser.parse_args() - - conf = Config() - - if args.allowed == 'sort': - print(' '.join(lease_display_fields.keys())) - exit(0) - elif args.allowed == 'state': - print(' '.join(lease_valid_states)) - exit(0) - elif args.allowed: - parser.print_help() - exit(1) - - if args.sort not in lease_display_fields.keys(): - print(f'Invalid sort key, choose from: {list(lease_display_fields.keys())}') - exit(0) - - if not set(args.state) < set(lease_valid_states): - print(f'Invalid lease state, choose from: {lease_valid_states}') - exit(0) - - # Do nothing if service is not configured - if not conf.exists_effective('service dhcp-server'): - print("DHCP service is not configured.") - exit(0) - - # if dhcp server is down, inactive leases may still be shown as active, so warn the user. - if not is_systemd_service_running('isc-dhcp-server.service'): - Warning('DHCP server is configured but not started. Data may be stale.') - - if args.leases: - leases = get_leases(conf, lease_file, args.state, args.pool, args.sort) - - if args.json: - print(dumps(leases, indent=4)) - else: - show_leases(leases) - - elif args.statistics: - pools = [] - - # Get relevant pools - if args.pool: - pools = [args.pool] - else: - pools = conf.list_effective_nodes("service dhcp-server shared-network-name") - - # Get pool usage stats - stats = [] - for p in pools: - size = get_pool_size(conf, p) - leases = len(get_leases(conf, lease_file, state='active', pool=p)) - - use_percentage = round(leases / size * 100) if size != 0 else 0 - - if args.json: - pool_stats = {"pool": p, "size": size, "leases": leases, - "available": (size - leases), "percentage": use_percentage} - else: - # For tabulate - pool_stats = [p, size, leases, size - leases, "{0}%".format(use_percentage)] - stats.append(pool_stats) - - # Print stats - if args.json: - print(dumps(stats, indent=4)) - else: - show_pool_stats(stats) - - else: - parser.print_help() - exit(1) diff --git a/src/op_mode/show_dhcpv6.py b/src/op_mode/show_dhcpv6.py deleted file mode 100755 index b34b730e6..000000000 --- a/src/op_mode/show_dhcpv6.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2018-2021 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -# -# TODO: merge with show_dhcp.py - -from json import dumps -from argparse import ArgumentParser -from ipaddress import ip_address -from tabulate import tabulate -from sys import exit -from collections import OrderedDict -from datetime import datetime - -from isc_dhcp_leases import Lease, IscDhcpLeases - -from vyos.base import Warning -from vyos.config import Config -from vyos.util import is_systemd_service_running - -lease_file = "/config/dhcpdv6.leases" -pool_key = "shared-networkname" - -lease_display_fields = OrderedDict() -lease_display_fields['ip'] = 'IPv6 address' -lease_display_fields['state'] = 'State' -lease_display_fields['last_comm'] = 'Last communication' -lease_display_fields['expires'] = 'Lease expiration' -lease_display_fields['remaining'] = 'Remaining' -lease_display_fields['type'] = 'Type' -lease_display_fields['pool'] = 'Pool' -lease_display_fields['iaid_duid'] = 'IAID_DUID' - -lease_valid_states = ['all', 'active', 'free', 'expired', 'released', 'abandoned', 'reset', 'backup'] - -def in_pool(lease, pool): - if pool_key in lease.sets: - if lease.sets[pool_key] == pool: - return True - - return False - -def format_hex_string(in_str): - out_str = "" - - # if input is divisible by 2, add : every 2 chars - if len(in_str) > 0 and len(in_str) % 2 == 0: - out_str = ':'.join(a+b for a,b in zip(in_str[::2], in_str[1::2])) - else: - out_str = in_str - - return out_str - -def utc_to_local(utc_dt): - return datetime.fromtimestamp((utc_dt - datetime(1970,1,1)).total_seconds()) - -def get_lease_data(lease): - data = {} - - # isc-dhcp lease times are in UTC so we need to convert them to local time to display - try: - data["expires"] = utc_to_local(lease.end).strftime("%Y/%m/%d %H:%M:%S") - except: - data["expires"] = "" - - try: - data["last_comm"] = utc_to_local(lease.last_communication).strftime("%Y/%m/%d %H:%M:%S") - except: - data["last_comm"] = "" - - try: - data["remaining"] = lease.end - datetime.utcnow() - # negative timedelta prints wrong so bypass it - if (data["remaining"].days >= 0): - # substraction gives us a timedelta object which can't be formatted with strftime - # so we use str(), split gets rid of the microseconds - data["remaining"] = str(data["remaining"]).split('.')[0] - else: - data["remaining"] = "" - except: - data["remaining"] = "" - - # isc-dhcp records lease declarations as ia_{na|ta|pd} IAID_DUID {...} - # where IAID_DUID is the combined IAID and DUID - data["iaid_duid"] = format_hex_string(lease.host_identifier_string) - - lease_types_long = {"na": "non-temporary", "ta": "temporary", "pd": "prefix delegation"} - data["type"] = lease_types_long[lease.type] - - data["state"] = lease.binding_state - data["ip"] = lease.ip - - try: - data["pool"] = lease.sets[pool_key] - except: - data["pool"] = "" - - return data - -def get_leases(config, leases, state, pool=None, sort='ip'): - leases = IscDhcpLeases(lease_file).get() - - # filter leases by state - if 'all' not in state: - leases = list(filter(lambda x: x.binding_state in state, leases)) - - # filter leases by pool name - if pool is not None: - if config.exists_effective("service dhcp-server shared-network-name {0}".format(pool)): - leases = list(filter(lambda x: in_pool(x, pool), leases)) - else: - print("Pool {0} does not exist.".format(pool)) - exit(0) - - # should maybe filter all state=active by lease.valid here? - - # sort by last_comm time to dedupe (newest lease overrides older) - leases = sorted(leases, key = lambda lease: lease.last_communication) - - # dedupe by converting to dict - leases_dict = {} - for lease in leases: - # dedupe by IP - leases_dict[lease.ip] = lease - - # convert the lease data - leases = list(map(get_lease_data, leases_dict.values())) - - # apply output/display sort - if sort == 'ip': - leases = sorted(leases, key = lambda k: int(ip_address(k['ip'].split('/')[0]))) - else: - leases = sorted(leases, key = lambda k: k[sort]) - - return leases - -def show_leases(leases): - lease_list = [] - for l in leases: - lease_list_params = [] - for k in lease_display_fields.keys(): - lease_list_params.append(l[k]) - lease_list.append(lease_list_params) - - output = tabulate(lease_list, lease_display_fields.values()) - - print(output) - -if __name__ == '__main__': - parser = ArgumentParser() - - group = parser.add_mutually_exclusive_group() - group.add_argument("-l", "--leases", action="store_true", help="Show DHCPv6 leases") - group.add_argument("-s", "--statistics", action="store_true", help="Show DHCPv6 statistics") - group.add_argument("--allowed", type=str, choices=["pool", "sort", "state"], help="Show allowed values for argument") - - parser.add_argument("-p", "--pool", type=str, help="Show lease for specific pool") - parser.add_argument("-S", "--sort", type=str, default='ip', help="Sort by") - parser.add_argument("-t", "--state", type=str, nargs="+", default=["active"], help="Lease state to show (can specify multiple with spaces)") - parser.add_argument("-j", "--json", action="store_true", default=False, help="Produce JSON output") - - args = parser.parse_args() - - conf = Config() - - if args.allowed == 'pool': - if conf.exists_effective('service dhcpv6-server'): - print(' '.join(conf.list_effective_nodes("service dhcpv6-server shared-network-name"))) - exit(0) - elif args.allowed == 'sort': - print(' '.join(lease_display_fields.keys())) - exit(0) - elif args.allowed == 'state': - print(' '.join(lease_valid_states)) - exit(0) - elif args.allowed: - parser.print_help() - exit(1) - - if args.sort not in lease_display_fields.keys(): - print(f'Invalid sort key, choose from: {list(lease_display_fields.keys())}') - exit(0) - - if not set(args.state) < set(lease_valid_states): - print(f'Invalid lease state, choose from: {lease_valid_states}') - exit(0) - - # Do nothing if service is not configured - if not conf.exists_effective('service dhcpv6-server'): - print("DHCPv6 service is not configured") - exit(0) - - # if dhcp server is down, inactive leases may still be shown as active, so warn the user. - if not is_systemd_service_running('isc-dhcp-server6.service'): - Warning('DHCPv6 server is configured but not started. Data may be stale.') - - if args.leases: - leases = get_leases(conf, lease_file, args.state, args.pool, args.sort) - - if args.json: - print(dumps(leases, indent=4)) - else: - show_leases(leases) - elif args.statistics: - print("DHCPv6 statistics option is not available") - else: - parser.print_help() - exit(1) diff --git a/src/op_mode/show_ipsec_sa.py b/src/op_mode/show_ipsec_sa.py deleted file mode 100755 index 5b8f00dba..000000000 --- a/src/op_mode/show_ipsec_sa.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2022 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -from re import split as re_split -from sys import exit - -from hurry import filesize -from tabulate import tabulate -from vici import Session as vici_session - -from vyos.util import seconds_to_human - - -def convert(text): - return int(text) if text.isdigit() else text.lower() - - -def alphanum_key(key): - return [convert(c) for c in re_split('([0-9]+)', str(key))] - - -def format_output(sas): - sa_data = [] - - for sa in sas: - for parent_sa in sa.values(): - # create an item for each child-sa - for child_sa in parent_sa.get('child-sas', {}).values(): - # prepare a list for output data - sa_out_name = sa_out_state = sa_out_uptime = sa_out_bytes = sa_out_packets = sa_out_remote_addr = sa_out_remote_id = sa_out_proposal = 'N/A' - - # collect raw data - sa_name = child_sa.get('name') - sa_state = child_sa.get('state') - sa_uptime = child_sa.get('install-time') - sa_bytes_in = child_sa.get('bytes-in') - sa_bytes_out = child_sa.get('bytes-out') - sa_packets_in = child_sa.get('packets-in') - sa_packets_out = child_sa.get('packets-out') - sa_remote_addr = parent_sa.get('remote-host') - sa_remote_id = parent_sa.get('remote-id') - sa_proposal_encr_alg = child_sa.get('encr-alg') - sa_proposal_integ_alg = child_sa.get('integ-alg') - sa_proposal_encr_keysize = child_sa.get('encr-keysize') - sa_proposal_dh_group = child_sa.get('dh-group') - - # format data to display - if sa_name: - sa_out_name = sa_name.decode() - if sa_state: - if sa_state == b'INSTALLED': - sa_out_state = 'up' - else: - sa_out_state = 'down' - if sa_uptime: - sa_out_uptime = seconds_to_human(sa_uptime.decode()) - if sa_bytes_in and sa_bytes_out: - bytes_in = filesize.size(int(sa_bytes_in.decode())) - bytes_out = filesize.size(int(sa_bytes_out.decode())) - sa_out_bytes = f'{bytes_in}/{bytes_out}' - if sa_packets_in and sa_packets_out: - packets_in = filesize.size(int(sa_packets_in.decode()), - system=filesize.si) - packets_out = filesize.size(int(sa_packets_out.decode()), - system=filesize.si) - sa_out_packets = f'{packets_in}/{packets_out}' - if sa_remote_addr: - sa_out_remote_addr = sa_remote_addr.decode() - if sa_remote_id: - sa_out_remote_id = sa_remote_id.decode() - # format proposal - if sa_proposal_encr_alg: - sa_out_proposal = sa_proposal_encr_alg.decode() - if sa_proposal_encr_keysize: - sa_proposal_encr_keysize_str = sa_proposal_encr_keysize.decode() - sa_out_proposal = f'{sa_out_proposal}_{sa_proposal_encr_keysize_str}' - if sa_proposal_integ_alg: - sa_proposal_integ_alg_str = sa_proposal_integ_alg.decode() - sa_out_proposal = f'{sa_out_proposal}/{sa_proposal_integ_alg_str}' - if sa_proposal_dh_group: - sa_proposal_dh_group_str = sa_proposal_dh_group.decode() - sa_out_proposal = f'{sa_out_proposal}/{sa_proposal_dh_group_str}' - - # add a new item to output data - sa_data.append([ - sa_out_name, sa_out_state, sa_out_uptime, sa_out_bytes, - sa_out_packets, sa_out_remote_addr, sa_out_remote_id, - sa_out_proposal - ]) - - # return output data - return sa_data - - -if __name__ == '__main__': - try: - session = vici_session() - sas = list(session.list_sas()) - - sa_data = format_output(sas) - sa_data = sorted(sa_data, key=alphanum_key) - - headers = [ - "Connection", "State", "Uptime", "Bytes In/Out", "Packets In/Out", - "Remote address", "Remote ID", "Proposal" - ] - output = tabulate(sa_data, headers) - print(output) - except PermissionError: - print("You do not have a permission to connect to the IPsec daemon") - exit(1) - except ConnectionRefusedError: - print("IPsec is not runing") - exit(1) - except Exception as e: - print("An error occured: {0}".format(e)) - exit(1) diff --git a/src/op_mode/show_nat66_statistics.py b/src/op_mode/show_nat66_statistics.py deleted file mode 100755 index cb10aed9f..000000000 --- a/src/op_mode/show_nat66_statistics.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2018 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import jmespath -import json - -from argparse import ArgumentParser -from jinja2 import Template -from sys import exit -from vyos.util import cmd - -OUT_TMPL_SRC=""" -rule pkts bytes interface ----- ---- ----- --------- -{% for r in output %} -{% if r.comment %} -{% set packets = r.counter.packets %} -{% set bytes = r.counter.bytes %} -{% set interface = r.interface %} -{# remove rule comment prefix #} -{% set comment = r.comment | replace('SRC-NAT66-', '') | replace('DST-NAT66-', '') %} -{{ "%-4s" | format(comment) }} {{ "%9s" | format(packets) }} {{ "%12s" | format(bytes) }} {{ interface }} -{% endif %} -{% endfor %} -""" - -parser = ArgumentParser() -group = parser.add_mutually_exclusive_group() -group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true") -group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true") -args = parser.parse_args() - -if args.source or args.destination: - tmp = cmd('sudo nft -j list table ip6 vyos_nat') - tmp = json.loads(tmp) - - source = r"nftables[?rule.chain=='POSTROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" - destination = r"nftables[?rule.chain=='PREROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" - data = { - 'output' : jmespath.search(source if args.source else destination, tmp), - 'direction' : 'source' if args.source else 'destination' - } - - tmpl = Template(OUT_TMPL_SRC, lstrip_blocks=True) - print(tmpl.render(data)) - exit(0) -else: - parser.print_help() - exit(1) - diff --git a/src/op_mode/show_nat66_translations.py b/src/op_mode/show_nat66_translations.py deleted file mode 100755 index 045d64065..000000000 --- a/src/op_mode/show_nat66_translations.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2020 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -''' -show nat translations -''' - -import os -import sys -import ipaddress -import argparse -import xmltodict - -from vyos.util import popen -from vyos.util import DEVNULL - -conntrack = '/usr/sbin/conntrack' - -verbose_format = "%-20s %-18s %-20s %-18s" -normal_format = "%-20s %-20s %-4s %-8s %s" - - -def headers(verbose, pipe): - if verbose: - return verbose_format % ('Pre-NAT src', 'Pre-NAT dst', 'Post-NAT src', 'Post-NAT dst') - return normal_format % ('Pre-NAT', 'Post-NAT', 'Prot', 'Timeout', 'Type' if pipe else '') - - -def command(srcdest, proto, ipaddr): - command = f'{conntrack} -o xml -L -f ipv6' - - if proto: - command += f' -p {proto}' - - if srcdest == 'source': - command += ' -n' - if ipaddr: - command += f' --orig-src {ipaddr}' - if srcdest == 'destination': - command += ' -g' - if ipaddr: - command += f' --orig-dst {ipaddr}' - - return command - - -def run(command): - xml, code = popen(command,stderr=DEVNULL) - if code: - sys.exit('conntrack failed') - return xml - - -def content(xmlfile): - xml = '' - with open(xmlfile,'r') as r: - xml += r.read() - return xml - - -def pipe(): - xml = '' - while True: - line = sys.stdin.readline() - xml += line - if '</conntrack>' in line: - break - - sys.stdin = open('/dev/tty') - return xml - - -def process(data, stats, protocol, pipe, verbose, flowtype=''): - if not data: - return - - parsed = xmltodict.parse(data) - - print(headers(verbose, pipe)) - - # to help the linter to detect typos - ORIGINAL = 'original' - REPLY = 'reply' - INDEPENDANT = 'independent' - SPORT = 'sport' - DPORT = 'dport' - SRC = 'src' - DST = 'dst' - - for rule in parsed['conntrack']['flow']: - src, dst, sport, dport, proto = {}, {}, {}, {}, {} - packet_count, byte_count = {}, {} - timeout, use = 0, 0 - - rule_type = rule.get('type', '') - - for meta in rule['meta']: - # print(meta) - direction = meta['@direction'] - - if direction in (ORIGINAL, REPLY): - if 'layer3' in meta: - l3 = meta['layer3'] - src[direction] = l3[SRC] - dst[direction] = l3[DST] - - if 'layer4' in meta: - l4 = meta['layer4'] - sp = l4.get(SPORT, '') - dp = l4.get(DPORT, '') - if sp: - sport[direction] = sp - if dp: - dport[direction] = dp - proto[direction] = l4.get('@protoname','') - - if stats and 'counters' in meta: - packet_count[direction] = meta['packets'] - byte_count[direction] = meta['bytes'] - continue - - if direction == INDEPENDANT: - timeout = meta['timeout'] - use = meta['use'] - continue - - in_src = '%s:%s' % (src[ORIGINAL], sport[ORIGINAL]) if ORIGINAL in sport else src[ORIGINAL] - in_dst = '%s:%s' % (dst[ORIGINAL], dport[ORIGINAL]) if ORIGINAL in dport else dst[ORIGINAL] - - # inverted the the perl code !!? - out_dst = '%s:%s' % (dst[REPLY], dport[REPLY]) if REPLY in dport else dst[REPLY] - out_src = '%s:%s' % (src[REPLY], sport[REPLY]) if REPLY in sport else src[REPLY] - - if flowtype == 'source': - v = ORIGINAL in sport and REPLY in dport - f = '%s:%s' % (src[ORIGINAL], sport[ORIGINAL]) if v else src[ORIGINAL] - t = '%s:%s' % (dst[REPLY], dport[REPLY]) if v else dst[REPLY] - else: - v = ORIGINAL in dport and REPLY in sport - f = '%s:%s' % (dst[ORIGINAL], dport[ORIGINAL]) if v else dst[ORIGINAL] - t = '%s:%s' % (src[REPLY], sport[REPLY]) if v else src[REPLY] - - # Thomas: I do not believe proto should be an option - p = proto.get('original', '') - if protocol and p != protocol: - continue - - if verbose: - msg = verbose_format % (in_src, in_dst, out_dst, out_src) - p = f'{p}: ' if p else '' - msg += f'\n {p}{f} ==> {t}' - msg += f' timeout: {timeout}' if timeout else '' - msg += f' use: {use} ' if use else '' - msg += f' type: {rule_type}' if rule_type else '' - print(msg) - else: - print(normal_format % (f, t, p, timeout, rule_type if rule_type else '')) - - if stats: - for direction in ('original', 'reply'): - if direction in packet_count: - print(' %-8s: packets %s, bytes %s' % direction, packet_count[direction], byte_count[direction]) - - -def main(): - parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__) - parser.add_argument('--verbose', help='provide more details about the flows', action='store_true') - parser.add_argument('--proto', help='filter by protocol', default='', type=str) - parser.add_argument('--file', help='read the conntrack xml from a file', type=str) - parser.add_argument('--stats', help='add usage statistics', action='store_true') - parser.add_argument('--type', help='NAT type (source, destination)', required=True, type=str) - parser.add_argument('--ipaddr', help='source ip address to filter on', type=ipaddress.ip_address) - parser.add_argument('--pipe', help='read conntrack xml data from stdin', action='store_true') - - arg = parser.parse_args() - - if arg.type not in ('source', 'destination'): - sys.exit('Unknown NAT type!') - - if arg.pipe: - process(pipe(), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) - elif arg.file: - process(content(arg.file), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) - else: - try: - process(run(command(arg.type, arg.proto, arg.ipaddr)), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) - except: - pass - -if __name__ == '__main__': - main() diff --git a/src/op_mode/show_nat_statistics.py b/src/op_mode/show_nat_statistics.py deleted file mode 100755 index be41e083b..000000000 --- a/src/op_mode/show_nat_statistics.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2018 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import jmespath -import json - -from argparse import ArgumentParser -from jinja2 import Template -from sys import exit -from vyos.util import cmd - -OUT_TMPL_SRC=""" -rule pkts bytes interface ----- ---- ----- --------- -{% for r in output %} -{% if r.comment %} -{% set packets = r.counter.packets %} -{% set bytes = r.counter.bytes %} -{% set interface = r.interface %} -{# remove rule comment prefix #} -{% set comment = r.comment | replace('SRC-NAT-', '') | replace('DST-NAT-', '') | replace(' tcp_udp', '') %} -{{ "%-4s" | format(comment) }} {{ "%9s" | format(packets) }} {{ "%12s" | format(bytes) }} {{ interface }} -{% endif %} -{% endfor %} -""" - -parser = ArgumentParser() -group = parser.add_mutually_exclusive_group() -group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true") -group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true") -args = parser.parse_args() - -if args.source or args.destination: - tmp = cmd('sudo nft -j list table ip vyos_nat') - tmp = json.loads(tmp) - - source = r"nftables[?rule.chain=='POSTROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" - destination = r"nftables[?rule.chain=='PREROUTING'].rule.{chain: chain, handle: handle, comment: comment, counter: expr[].counter | [0], interface: expr[].match.right | [0] }" - data = { - 'output' : jmespath.search(source if args.source else destination, tmp), - 'direction' : 'source' if args.source else 'destination' - } - - tmpl = Template(OUT_TMPL_SRC, lstrip_blocks=True) - print(tmpl.render(data)) - exit(0) -else: - parser.print_help() - exit(1) - diff --git a/src/op_mode/show_nat_translations.py b/src/op_mode/show_nat_translations.py deleted file mode 100755 index 508845e23..000000000 --- a/src/op_mode/show_nat_translations.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2020-2022 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -''' -show nat translations -''' - -import os -import sys -import ipaddress -import argparse -import xmltodict - -from vyos.util import popen -from vyos.util import DEVNULL - -conntrack = '/usr/sbin/conntrack' - -verbose_format = "%-20s %-18s %-20s %-18s" -normal_format = "%-20s %-20s %-4s %-8s %s" - - -def headers(verbose, pipe): - if verbose: - return verbose_format % ('Pre-NAT src', 'Pre-NAT dst', 'Post-NAT src', 'Post-NAT dst') - return normal_format % ('Pre-NAT', 'Post-NAT', 'Prot', 'Timeout', 'Type' if pipe else '') - - -def command(srcdest, proto, ipaddr): - command = f'{conntrack} -o xml -L' - - if proto: - command += f' -p {proto}' - - if srcdest == 'source': - command += ' -n' - if ipaddr: - command += f' --orig-src {ipaddr}' - if srcdest == 'destination': - command += ' -g' - if ipaddr: - command += f' --orig-dst {ipaddr}' - - return command - - -def run(command): - xml, code = popen(command,stderr=DEVNULL) - if code: - sys.exit('conntrack failed') - return xml - - -def content(xmlfile): - xml = '' - with open(xmlfile,'r') as r: - xml += r.read() - return xml - - -def pipe(): - xml = '' - while True: - line = sys.stdin.readline() - xml += line - if '</conntrack>' in line: - break - - sys.stdin = open('/dev/tty') - return xml - - -def xml_to_dict(xml): - """ - Convert XML to dictionary - Return: dictionary - """ - parse = xmltodict.parse(xml) - # If only one NAT entry we must change dict T4499 - if 'meta' in parse['conntrack']['flow']: - return dict(conntrack={'flow': [parse['conntrack']['flow']]}) - return parse - - -def process(data, stats, protocol, pipe, verbose, flowtype=''): - if not data: - return - - parsed = xml_to_dict(data) - - print(headers(verbose, pipe)) - - # to help the linter to detect typos - ORIGINAL = 'original' - REPLY = 'reply' - INDEPENDANT = 'independent' - SPORT = 'sport' - DPORT = 'dport' - SRC = 'src' - DST = 'dst' - - for rule in parsed['conntrack']['flow']: - src, dst, sport, dport, proto = {}, {}, {}, {}, {} - packet_count, byte_count = {}, {} - timeout, use = 0, 0 - - rule_type = rule.get('type', '') - - for meta in rule['meta']: - # print(meta) - direction = meta['@direction'] - - if direction in (ORIGINAL, REPLY): - if 'layer3' in meta: - l3 = meta['layer3'] - src[direction] = l3[SRC] - dst[direction] = l3[DST] - - if 'layer4' in meta: - l4 = meta['layer4'] - sp = l4.get(SPORT, '') - dp = l4.get(DPORT, '') - if sp: - sport[direction] = sp - if dp: - dport[direction] = dp - proto[direction] = l4.get('@protoname','') - - if stats and 'counters' in meta: - packet_count[direction] = meta['packets'] - byte_count[direction] = meta['bytes'] - continue - - if direction == INDEPENDANT: - timeout = meta['timeout'] - use = meta['use'] - continue - - in_src = '%s:%s' % (src[ORIGINAL], sport[ORIGINAL]) if ORIGINAL in sport else src[ORIGINAL] - in_dst = '%s:%s' % (dst[ORIGINAL], dport[ORIGINAL]) if ORIGINAL in dport else dst[ORIGINAL] - - # inverted the the perl code !!? - out_dst = '%s:%s' % (dst[REPLY], dport[REPLY]) if REPLY in dport else dst[REPLY] - out_src = '%s:%s' % (src[REPLY], sport[REPLY]) if REPLY in sport else src[REPLY] - - if flowtype == 'source': - v = ORIGINAL in sport and REPLY in dport - f = '%s:%s' % (src[ORIGINAL], sport[ORIGINAL]) if v else src[ORIGINAL] - t = '%s:%s' % (dst[REPLY], dport[REPLY]) if v else dst[REPLY] - else: - v = ORIGINAL in dport and REPLY in sport - f = '%s:%s' % (dst[ORIGINAL], dport[ORIGINAL]) if v else dst[ORIGINAL] - t = '%s:%s' % (src[REPLY], sport[REPLY]) if v else src[REPLY] - - # Thomas: I do not believe proto should be an option - p = proto.get('original', '') - if protocol and p != protocol: - continue - - if verbose: - msg = verbose_format % (in_src, in_dst, out_dst, out_src) - p = f'{p}: ' if p else '' - msg += f'\n {p}{f} ==> {t}' - msg += f' timeout: {timeout}' if timeout else '' - msg += f' use: {use} ' if use else '' - msg += f' type: {rule_type}' if rule_type else '' - print(msg) - else: - print(normal_format % (f, t, p, timeout, rule_type if rule_type else '')) - - if stats: - for direction in ('original', 'reply'): - if direction in packet_count: - print(' %-8s: packets %s, bytes %s' % direction, packet_count[direction], byte_count[direction]) - - -def main(): - parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__) - parser.add_argument('--verbose', help='provide more details about the flows', action='store_true') - parser.add_argument('--proto', help='filter by protocol', default='', type=str) - parser.add_argument('--file', help='read the conntrack xml from a file', type=str) - parser.add_argument('--stats', help='add usage statistics', action='store_true') - parser.add_argument('--type', help='NAT type (source, destination)', required=True, type=str) - parser.add_argument('--ipaddr', help='source ip address to filter on', type=ipaddress.ip_address) - parser.add_argument('--pipe', help='read conntrack xml data from stdin', action='store_true') - - arg = parser.parse_args() - - if arg.type not in ('source', 'destination'): - sys.exit('Unknown NAT type!') - - if arg.pipe: - process(pipe(), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) - elif arg.file: - process(content(arg.file), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) - else: - try: - process(run(command(arg.type, arg.proto, arg.ipaddr)), arg.stats, arg.proto, arg.pipe, arg.verbose, arg.type) - except: - pass - -if __name__ == '__main__': - main() diff --git a/src/op_mode/show_raid.sh b/src/op_mode/show_raid.sh index ba4174692..ab5d4d50f 100755 --- a/src/op_mode/show_raid.sh +++ b/src/op_mode/show_raid.sh @@ -1,5 +1,13 @@ #!/bin/bash +if [ "$EUID" -ne 0 ]; then + # This should work without sudo because we have read + # access to the dev, but for some reason mdadm must be + # run as root in order to succeed. + echo "Please run as root" + exit 1 +fi + raid_set_name=$1 raid_sets=`cat /proc/partitions | grep md | awk '{ print $4 }'` valid_set=`echo $raid_sets | grep $raid_set_name` @@ -10,7 +18,7 @@ else # This should work without sudo because we have read # access to the dev, but for some reason mdadm must be # run as root in order to succeed. - sudo /sbin/mdadm --detail /dev/${raid_set_name} + mdadm --detail /dev/${raid_set_name} else echo "Must be administrator or root to display RAID status" fi diff --git a/src/op_mode/vpn_ipsec.py b/src/op_mode/vpn_ipsec.py index 68dc5bc45..2392cfe92 100755 --- a/src/op_mode/vpn_ipsec.py +++ b/src/op_mode/vpn_ipsec.py @@ -48,8 +48,8 @@ def reset_peer(peer, tunnel): result = True for conn in conns: try: - call(f'sudo /usr/sbin/ipsec down {conn}{{*}}', timeout = 10) - call(f'sudo /usr/sbin/ipsec up {conn}', timeout = 10) + call(f'/usr/sbin/ipsec down {conn}{{*}}', timeout = 10) + call(f'/usr/sbin/ipsec up {conn}', timeout = 10) except TimeoutExpired as e: print(f'Timed out while resetting {conn}') result = False @@ -81,8 +81,8 @@ def reset_profile(profile, tunnel): print('Profile not found, aborting') return - call(f'sudo /usr/sbin/ipsec down {conn}') - result = call(f'sudo /usr/sbin/ipsec up {conn}') + call(f'/usr/sbin/ipsec down {conn}') + result = call(f'/usr/sbin/ipsec up {conn}') print('Profile reset result: ' + ('success' if result == 0 else 'failed')) @@ -90,17 +90,17 @@ def debug_peer(peer, tunnel): peer = peer.replace(':', '-') if not peer or peer == "all": debug_commands = [ - "sudo ipsec statusall", - "sudo swanctl -L", - "sudo swanctl -l", - "sudo swanctl -P", - "sudo ip x sa show", - "sudo ip x policy show", - "sudo ip tunnel show", - "sudo ip address", - "sudo ip rule show", - "sudo ip route | head -100", - "sudo ip route show table 220" + "ipsec statusall", + "swanctl -L", + "swanctl -l", + "swanctl -P", + "ip x sa show", + "ip x policy show", + "ip tunnel show", + "ip address", + "ip rule show", + "ip route | head -100", + "ip route show table 220" ] for debug_cmd in debug_commands: print(f'\n### {debug_cmd} ###') @@ -117,7 +117,7 @@ def debug_peer(peer, tunnel): return for conn in conns: - call(f'sudo /usr/sbin/ipsec statusall | grep {conn}') + call(f'/usr/sbin/ipsec statusall | grep {conn}') if __name__ == '__main__': parser = argparse.ArgumentParser() diff --git a/src/op_mode/webproxy_update_blacklist.sh b/src/op_mode/webproxy_update_blacklist.sh index d5f301b75..4fb9a54c6 100755 --- a/src/op_mode/webproxy_update_blacklist.sh +++ b/src/op_mode/webproxy_update_blacklist.sh @@ -18,6 +18,23 @@ blacklist_url='ftp://ftp.univ-tlse1.fr/pub/reseau/cache/squidguard_contrib/black data_dir="/opt/vyatta/etc/config/url-filtering" archive="${data_dir}/squidguard/archive" db_dir="${data_dir}/squidguard/db" +conf_file="/etc/squidguard/squidGuard.conf" +tmp_conf_file="/tmp/sg_update_db.conf" + +#$1-category +#$2-type +#$3-list +create_sg_db () +{ + FILE=$db_dir/$1/$2 + if test -f "$FILE"; then + rm -f ${tmp_conf_file} + printf "dbhome $db_dir\ndest $1 {\n $3 $1/$2\n}\nacl {\n default {\n pass any\n }\n}" >> ${tmp_conf_file} + /usr/bin/squidGuard -b -c ${tmp_conf_file} -C $FILE + rm -f ${tmp_conf_file} + fi + +} while [ $# -gt 0 ] do @@ -88,6 +105,16 @@ if [[ -n $update ]] && [[ $update -eq "yes" ]]; then # fix permissions chown -R proxy:proxy ${db_dir} + + #create db + category_list=(`find $db_dir -type d -exec basename {} \; `) + for category in ${category_list[@]} + do + create_sg_db $category "domains" "domainlist" + create_sg_db $category "urls" "urllist" + create_sg_db $category "expressions" "expressionlist" + done + chown -R proxy:proxy ${db_dir} chmod 755 ${db_dir} logger --priority WARNING "webproxy blacklist entries updated (${count_before}/${count_after})" diff --git a/src/services/api/graphql/libs/op_mode.py b/src/services/api/graphql/libs/op_mode.py index 6939ed5d6..211f8ce19 100644 --- a/src/services/api/graphql/libs/op_mode.py +++ b/src/services/api/graphql/libs/op_mode.py @@ -84,7 +84,7 @@ def map_type_name(type_name: type, optional: bool = False) -> str: if type_name == int: return 'Int!' if not optional else 'Int = null' if type_name == bool: - return 'Boolean!' if not optional else 'Boolean = false' + return 'Boolean = false' if typing.get_origin(type_name) == list: if not optional: return f'[{map_type_name(typing.get_args(type_name)[0])}]!' |