summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/conf_mode/arp.py2
-rwxr-xr-xsrc/conf_mode/firewall.py30
-rwxr-xr-xsrc/conf_mode/https.py29
-rwxr-xr-xsrc/conf_mode/interfaces-ethernet.py23
-rwxr-xr-xsrc/conf_mode/interfaces-macsec.py8
-rwxr-xr-xsrc/conf_mode/interfaces-pseudo-ethernet.py7
-rwxr-xr-xsrc/conf_mode/nat.py8
-rwxr-xr-xsrc/conf_mode/ntp.py21
-rwxr-xr-xsrc/conf_mode/service_monitoring_telegraf.py80
-rwxr-xr-xsrc/conf_mode/service_upnp.py19
-rwxr-xr-xsrc/conf_mode/ssh.py23
-rwxr-xr-xsrc/conf_mode/system_console.py27
-rwxr-xr-xsrc/conf_mode/vpn_ipsec.py8
-rwxr-xr-xsrc/conf_mode/vpn_openconnect.py5
-rwxr-xr-xsrc/conf_mode/vpn_sstp.py8
-rwxr-xr-xsrc/etc/opennhrp/opennhrp-script.py326
-rw-r--r--src/etc/systemd/system/wpa_supplicant-wired@.service.d/override.conf11
-rwxr-xr-xsrc/etc/telegraf/custom_scripts/show_interfaces_input_filter.py16
-rwxr-xr-xsrc/op_mode/conntrack.py23
-rwxr-xr-xsrc/op_mode/ipsec.py116
-rwxr-xr-xsrc/op_mode/nat.py136
-rwxr-xr-xsrc/op_mode/openconnect-control.py5
-rwxr-xr-xsrc/op_mode/openconnect.py81
-rwxr-xr-xsrc/op_mode/restart_dhcp_relay.py4
-rwxr-xr-xsrc/op_mode/show_nat66_rules.py102
-rw-r--r--src/services/api/graphql/bindings.py3
-rw-r--r--src/services/api/graphql/graphql/errors.py8
-rw-r--r--src/services/api/graphql/graphql/mutations.py17
-rw-r--r--src/services/api/graphql/graphql/queries.py17
-rw-r--r--src/services/api/graphql/session/__init__.py (renamed from src/services/api/graphql/recipes/__init__.py)0
-rwxr-xr-xsrc/services/api/graphql/session/composite/system_status.py (renamed from src/services/api/graphql/recipes/queries/system_status.py)0
-rw-r--r--src/services/api/graphql/session/errors/op_mode_errors.py13
-rw-r--r--src/services/api/graphql/session/override/remove_firewall_address_group_members.py (renamed from src/services/api/graphql/recipes/remove_firewall_address_group_members.py)0
-rw-r--r--src/services/api/graphql/session/session.py (renamed from src/services/api/graphql/recipes/session.py)15
-rw-r--r--src/services/api/graphql/session/templates/create_dhcp_server.tmpl (renamed from src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/create_firewall_address_group.tmpl (renamed from src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl (renamed from src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/create_interface_ethernet.tmpl (renamed from src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl)0
-rwxr-xr-xsrc/services/api/graphql/utils/schema_from_op_mode.py48
-rwxr-xr-xsrc/services/vyos-http-api-server1
-rwxr-xr-xsrc/system/keepalived-fifo.py12
-rw-r--r--src/systemd/telegraf.service15
46 files changed, 940 insertions, 327 deletions
diff --git a/src/conf_mode/arp.py b/src/conf_mode/arp.py
index 1cd8f5451..7dc5206e0 100755
--- a/src/conf_mode/arp.py
+++ b/src/conf_mode/arp.py
@@ -61,7 +61,7 @@ def apply(arp):
continue
for address, address_config in interface_config['address'].items():
mac = address_config['mac']
- call(f'ip neigh add {address} lladdr {mac} dev {interface}')
+ call(f'ip neigh replace {address} lladdr {mac} dev {interface}')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py
index 07eca722f..f0ea1a1e5 100755
--- a/src/conf_mode/firewall.py
+++ b/src/conf_mode/firewall.py
@@ -206,9 +206,31 @@ def get_config(config=None):
firewall = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
no_tag_node_value_mangle=True)
+ # We have gathered the dict representation of the CLI, but there are
+ # default options which we need to update into the dictionary retrived.
+ # XXX: T2665: we currently have no nice way for defaults under tag
+ # nodes, thus we load the defaults "by hand"
default_values = defaults(base)
+ for tmp in ['name', 'ipv6_name']:
+ if tmp in default_values:
+ del default_values[tmp]
+
firewall = dict_merge(default_values, firewall)
+ # Merge in defaults for IPv4 ruleset
+ if 'name' in firewall:
+ default_values = defaults(base + ['name'])
+ for name in firewall['name']:
+ firewall['name'][name] = dict_merge(default_values,
+ firewall['name'][name])
+
+ # Merge in defaults for IPv6 ruleset
+ if 'ipv6_name' in firewall:
+ default_values = defaults(base + ['ipv6-name'])
+ for ipv6_name in firewall['ipv6_name']:
+ firewall['ipv6_name'][ipv6_name] = dict_merge(default_values,
+ firewall['ipv6_name'][ipv6_name])
+
firewall['policy_resync'] = bool('group' in firewall or node_changed(conf, base + ['group']))
firewall['interfaces'] = get_firewall_interfaces(conf)
firewall['zone_policy'] = get_firewall_zones(conf)
@@ -315,7 +337,7 @@ def verify_nested_group(group_name, group, groups, seen):
if g in seen:
raise ConfigError(f'Group "{group_name}" has a circular reference')
-
+
seen.append(g)
if 'include' in groups[g]:
@@ -378,7 +400,7 @@ def cleanup_commands(firewall):
if firewall['geoip_updated']:
geoip_key = 'deleted_ipv6_name' if table == 'ip6 filter' else 'deleted_name'
geoip_list = dict_search_args(firewall, 'geoip_updated', geoip_key) or []
-
+
json_str = cmd(f'nft -t -j list table {table}')
obj = loads(json_str)
@@ -420,7 +442,7 @@ def cleanup_commands(firewall):
if set_name.startswith('GEOIP_CC_') and set_name in geoip_list:
commands_sets.append(f'delete set {table} {set_name}')
continue
-
+
if set_name.startswith("RECENT_"):
commands_sets.append(f'delete set {table} {set_name}')
continue
@@ -520,7 +542,7 @@ def apply(firewall):
if install_result == 1:
raise ConfigError('Failed to apply firewall')
- # set fireall group domain-group xxx
+ # set firewall group domain-group xxx
if 'group' in firewall:
if 'domain_group' in firewall['group']:
# T970 Enable a resolver (systemd daemon) that checks
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
index 3057357fc..7cd7ea42e 100755
--- a/src/conf_mode/https.py
+++ b/src/conf_mode/https.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2021 VyOS maintainers and contributors
+# Copyright (C) 2019-2022 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -29,6 +29,8 @@ from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
+from vyos.util import check_port_availability
+from vyos.util import is_listen_port_bind_service
from vyos.util import write_file
from vyos import airbag
@@ -107,6 +109,31 @@ def verify(https):
raise ConfigError("At least one 'virtual-host <id> server-name' "
"matching the 'certbot domain-name' is required.")
+ server_block_list = []
+
+ # organize by vhosts
+ vhost_dict = https.get('virtual-host', {})
+
+ if not vhost_dict:
+ # no specified virtual hosts (server blocks); use default
+ server_block_list.append(default_server_block)
+ else:
+ for vhost in list(vhost_dict):
+ server_block = deepcopy(default_server_block)
+ data = vhost_dict.get(vhost, {})
+ server_block['address'] = data.get('listen-address', '*')
+ server_block['port'] = data.get('listen-port', '443')
+ server_block_list.append(server_block)
+
+ for entry in server_block_list:
+ _address = entry.get('address')
+ _address = '0.0.0.0' if _address == '*' else _address
+ _port = entry.get('port')
+ proto = 'tcp'
+ if check_port_availability(_address, int(_port), proto) is not True and \
+ not is_listen_port_bind_service(int(_port), 'nginx'):
+ raise ConfigError(f'"{proto}" port "{_port}" is used by another service')
+
verify_vrf(https)
return None
diff --git a/src/conf_mode/interfaces-ethernet.py b/src/conf_mode/interfaces-ethernet.py
index 30e7a2af7..e02841831 100755
--- a/src/conf_mode/interfaces-ethernet.py
+++ b/src/conf_mode/interfaces-ethernet.py
@@ -153,11 +153,20 @@ def verify(ethernet):
return None
def generate(ethernet):
- if 'eapol' in ethernet:
- render(wpa_suppl_conf.format(**ethernet),
- 'ethernet/wpa_supplicant.conf.j2', ethernet)
+ # render real configuration file once
+ wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet)
+
+ if 'deleted' in ethernet:
+ # delete configuration on interface removal
+ if os.path.isfile(wpa_supplicant_conf):
+ os.unlink(wpa_supplicant_conf)
+ return None
+ if 'eapol' in ethernet:
ifname = ethernet['ifname']
+
+ render(wpa_supplicant_conf, 'ethernet/wpa_supplicant.conf.j2', ethernet)
+
cert_file_path = os.path.join(cfg_dir, f'{ifname}_cert.pem')
cert_key_path = os.path.join(cfg_dir, f'{ifname}_cert.key')
@@ -184,10 +193,6 @@ def generate(ethernet):
write_file(ca_cert_file_path,
'\n'.join(encode_certificate(c) for c in ca_full_chain))
- else:
- # delete configuration on interface removal
- if os.path.isfile(wpa_suppl_conf.format(**ethernet)):
- os.unlink(wpa_suppl_conf.format(**ethernet))
return None
@@ -203,9 +208,9 @@ def apply(ethernet):
else:
e.update(ethernet)
if 'eapol' in ethernet:
- eapol_action='restart'
+ eapol_action='reload-or-restart'
- call(f'systemctl {eapol_action} wpa_supplicant-macsec@{ifname}')
+ call(f'systemctl {eapol_action} wpa_supplicant-wired@{ifname}')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/interfaces-macsec.py b/src/conf_mode/interfaces-macsec.py
index 870049a88..649ea8d50 100755
--- a/src/conf_mode/interfaces-macsec.py
+++ b/src/conf_mode/interfaces-macsec.py
@@ -67,7 +67,7 @@ def get_config(config=None):
macsec.update({'shutdown_required': {}})
if 'source_interface' in macsec:
- tmp = is_source_interface(conf, macsec['source_interface'], 'macsec')
+ tmp = is_source_interface(conf, macsec['source_interface'], ['macsec', 'pseudo-ethernet'])
if tmp and tmp != ifname: macsec.update({'is_source_interface' : tmp})
return macsec
@@ -102,12 +102,6 @@ def verify(macsec):
# gcm-aes-128 requires a 128bit long key - 64 characters (string) = 32byte = 256bit
raise ConfigError('gcm-aes-128 requires a 256bit long key!')
- if 'is_source_interface' in macsec:
- tmp = macsec['is_source_interface']
- src_ifname = macsec['source_interface']
- raise ConfigError(f'Can not use source-interface "{src_ifname}", it already ' \
- f'belongs to interface "{tmp}"!')
-
if 'source_interface' in macsec:
# MACsec adds a 40 byte overhead (32 byte MACsec + 8 bytes VLAN 802.1ad
# and 802.1q) - we need to check the underlaying MTU if our configured
diff --git a/src/conf_mode/interfaces-pseudo-ethernet.py b/src/conf_mode/interfaces-pseudo-ethernet.py
index f26a50a0e..20f2b1975 100755
--- a/src/conf_mode/interfaces-pseudo-ethernet.py
+++ b/src/conf_mode/interfaces-pseudo-ethernet.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
+# Copyright (C) 2019-2022 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -19,6 +19,7 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
+from vyos.configdict import is_source_interface
from vyos.configverify import verify_vrf
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
@@ -51,6 +52,10 @@ def get_config(config=None):
if 'source_interface' in peth:
_, peth['parent'] = get_interface_dict(conf, ['interfaces', 'ethernet'],
peth['source_interface'])
+ # test if source-interface is maybe already used by another interface
+ tmp = is_source_interface(conf, peth['source_interface'], ['macsec'])
+ if tmp and tmp != ifname: peth.update({'is_source_interface' : tmp})
+
return peth
def verify(peth):
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index b76ea9f9e..e75418ba5 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -44,8 +44,8 @@ if LooseVersion(kernel_version()) > LooseVersion('5.1'):
else:
k_mod = ['nft_nat', 'nft_chain_nat_ipv4']
-nftables_nat_config = '/tmp/vyos-nat-rules.nft'
-nftables_static_nat_conf = '/tmp/vyos-static-nat-rules.nft'
+nftables_nat_config = '/run/nftables_nat.conf'
+nftables_static_nat_conf = '/run/nftables_static-nat-rules.nft'
def get_handler(json, chain, target):
""" Get nftable rule handler number of given chain/target combination.
@@ -199,8 +199,6 @@ def generate(nat):
# dry-run newly generated configuration
tmp = run(f'nft -c -f {nftables_nat_config}')
if tmp > 0:
- if os.path.exists(nftables_nat_config):
- os.unlink(nftables_nat_config)
raise ConfigError('Configuration file errors encountered!')
tmp = run(f'nft -c -f {nftables_nat_config}')
@@ -210,8 +208,6 @@ def generate(nat):
def apply(nat):
cmd(f'nft -f {nftables_nat_config}')
cmd(f'nft -f {nftables_static_nat_conf}')
- if os.path.isfile(nftables_nat_config):
- os.unlink(nftables_nat_config)
return None
diff --git a/src/conf_mode/ntp.py b/src/conf_mode/ntp.py
index 5490a794d..0ecb4d736 100755
--- a/src/conf_mode/ntp.py
+++ b/src/conf_mode/ntp.py
@@ -17,6 +17,7 @@
import os
from vyos.config import Config
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.configverify import verify_interface_exists
from vyos.util import call
@@ -40,6 +41,10 @@ def get_config(config=None):
ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
ntp['config_file'] = config_file
+
+ tmp = is_node_changed(conf, base + ['vrf'])
+ if tmp: ntp.update({'restart_required': {}})
+
return ntp
def verify(ntp):
@@ -78,19 +83,25 @@ def generate(ntp):
return None
def apply(ntp):
+ systemd_service = 'ntp.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
if not ntp:
# NTP support is removed in the commit
- call('systemctl stop ntp.service')
+ call(f'systemctl stop {systemd_service}')
if os.path.exists(config_file):
os.unlink(config_file)
if os.path.isfile(systemd_override):
os.unlink(systemd_override)
+ return
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
- if ntp:
- call('systemctl restart ntp.service')
+ # we need to restart the service if e.g. the VRF name changed
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in ntp:
+ systemd_action = 'restart'
+ call(f'systemctl {systemd_action} {systemd_service}')
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/service_monitoring_telegraf.py b/src/conf_mode/service_monitoring_telegraf.py
index 62f5e1ddf..53df006a4 100755
--- a/src/conf_mode/service_monitoring_telegraf.py
+++ b/src/conf_mode/service_monitoring_telegraf.py
@@ -22,6 +22,8 @@ from shutil import rmtree
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import is_node_changed
+from vyos.configverify import verify_vrf
from vyos.ifconfig import Section
from vyos.template import render
from vyos.util import call
@@ -32,39 +34,14 @@ from vyos import ConfigError
from vyos import airbag
airbag.enable()
-
-base_dir = '/run/telegraf'
cache_dir = f'/etc/telegraf/.cache'
-config_telegraf = f'{base_dir}/vyos-telegraf.conf'
+config_telegraf = f'/run/telegraf/telegraf.conf'
custom_scripts_dir = '/etc/telegraf/custom_scripts'
syslog_telegraf = '/etc/rsyslog.d/50-telegraf.conf'
-systemd_telegraf_service = '/etc/systemd/system/vyos-telegraf.service'
-systemd_telegraf_override_dir = '/etc/systemd/system/vyos-telegraf.service.d'
-systemd_override = f'{systemd_telegraf_override_dir}/10-override.conf'
-
-
-def get_interfaces(type='', vlan=True):
- """
- Get interfaces
- get_interfaces()
- ['dum0', 'eth0', 'eth1', 'eth1.5', 'lo', 'tun0']
-
- get_interfaces("dummy")
- ['dum0']
- """
- interfaces = []
- ifaces = Section.interfaces(type)
- for iface in ifaces:
- if vlan == False and '.' in iface:
- continue
- interfaces.append(iface)
-
- return interfaces
+systemd_override = '/etc/systemd/system/telegraf.service.d/10-override.conf'
def get_nft_filter_chains():
- """
- Get nft chains for table filter
- """
+ """ Get nft chains for table filter """
nft = cmd('nft --json list table ip filter')
nft = json.loads(nft)
chain_list = []
@@ -76,9 +53,7 @@ def get_nft_filter_chains():
return chain_list
-
def get_config(config=None):
-
if config:
conf = config
else:
@@ -87,8 +62,12 @@ def get_config(config=None):
if not conf.exists(base):
return None
- monitoring = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
- no_tag_node_value_mangle=True)
+ monitoring = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ tmp = is_node_changed(conf, base + ['vrf'])
+ if tmp: monitoring.update({'restart_required': {}})
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
@@ -96,7 +75,7 @@ def get_config(config=None):
monitoring = dict_merge(default_values, monitoring)
monitoring['custom_scripts_dir'] = custom_scripts_dir
- monitoring['interfaces_ethernet'] = get_interfaces('ethernet', vlan=False)
+ monitoring['interfaces_ethernet'] = Section.interfaces('ethernet', vlan=False)
monitoring['nft_chains'] = get_nft_filter_chains()
# Redefine azure group-metrics 'single-table' and 'table-per-metric'
@@ -131,6 +110,8 @@ def verify(monitoring):
if not monitoring:
return None
+ verify_vrf(monitoring)
+
# Verify influxdb
if 'influxdb' in monitoring:
if 'authentication' not in monitoring['influxdb'] or \
@@ -173,7 +154,7 @@ def verify(monitoring):
def generate(monitoring):
if not monitoring:
# Delete config and systemd files
- config_files = [config_telegraf, systemd_telegraf_service, systemd_override, syslog_telegraf]
+ config_files = [config_telegraf, systemd_override, syslog_telegraf]
for file in config_files:
if os.path.isfile(file):
os.unlink(file)
@@ -190,33 +171,34 @@ def generate(monitoring):
chown(cache_dir, 'telegraf', 'telegraf')
- # Create systemd override dir
- if not os.path.exists(systemd_telegraf_override_dir):
- os.mkdir(systemd_telegraf_override_dir)
-
# Create custome scripts dir
if not os.path.exists(custom_scripts_dir):
os.mkdir(custom_scripts_dir)
# Render telegraf configuration and systemd override
- render(config_telegraf, 'monitoring/telegraf.j2', monitoring)
- render(systemd_telegraf_service, 'monitoring/systemd_vyos_telegraf_service.j2', monitoring)
- render(systemd_override, 'monitoring/override.conf.j2', monitoring, permission=0o640)
- render(syslog_telegraf, 'monitoring/syslog_telegraf.j2', monitoring)
-
- chown(base_dir, 'telegraf', 'telegraf')
+ render(config_telegraf, 'telegraf/telegraf.j2', monitoring, user='telegraf', group='telegraf')
+ render(systemd_override, 'telegraf/override.conf.j2', monitoring)
+ render(syslog_telegraf, 'telegraf/syslog_telegraf.j2', monitoring)
return None
def apply(monitoring):
# Reload systemd manager configuration
+ systemd_service = 'telegraf.service'
call('systemctl daemon-reload')
- if monitoring:
- call('systemctl restart vyos-telegraf.service')
- else:
- call('systemctl stop vyos-telegraf.service')
+ if not monitoring:
+ call(f'systemctl stop {systemd_service}')
+ return
+
+ # we need to restart the service if e.g. the VRF name changed
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in monitoring:
+ systemd_action = 'restart'
+
+ call(f'systemctl {systemd_action} {systemd_service}')
+
# Telegraf include custom rsyslog config changes
- call('systemctl restart rsyslog')
+ call('systemctl reload-or-restart rsyslog')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/service_upnp.py b/src/conf_mode/service_upnp.py
index 36f3e18a7..c798fd515 100755
--- a/src/conf_mode/service_upnp.py
+++ b/src/conf_mode/service_upnp.py
@@ -24,8 +24,6 @@ from ipaddress import IPv6Network
from vyos.config import Config
from vyos.configdict import dict_merge
-from vyos.configdict import get_interface_dict
-from vyos.configverify import verify_vrf
from vyos.util import call
from vyos.template import render
from vyos.template import is_ipv4
@@ -113,19 +111,28 @@ def verify(upnpd):
listen_dev = []
system_addrs_cidr = get_all_interface_addr(True, [], [netifaces.AF_INET, netifaces.AF_INET6])
system_addrs = get_all_interface_addr(False, [], [netifaces.AF_INET, netifaces.AF_INET6])
+ if 'listen' not in upnpd:
+ raise ConfigError(f'Listen address or interface is required!')
for listen_if_or_addr in upnpd['listen']:
if listen_if_or_addr not in netifaces.interfaces():
listen_dev.append(listen_if_or_addr)
- if (listen_if_or_addr not in system_addrs) and (listen_if_or_addr not in system_addrs_cidr) and (listen_if_or_addr not in netifaces.interfaces()):
+ if (listen_if_or_addr not in system_addrs) and (listen_if_or_addr not in system_addrs_cidr) and \
+ (listen_if_or_addr not in netifaces.interfaces()):
if is_ipv4(listen_if_or_addr) and IPv4Network(listen_if_or_addr).is_multicast:
- raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed to listen on. It is not an interface address nor a multicast address!')
+ raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed'
+ f'to listen on. It is not an interface address nor a multicast address!')
if is_ipv6(listen_if_or_addr) and IPv6Network(listen_if_or_addr).is_multicast:
- raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed to listen on. It is not an interface address nor a multicast address!')
+ raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed'
+ f'to listen on. It is not an interface address nor a multicast address!')
system_listening_dev_addrs_cidr = get_all_interface_addr(True, listen_dev, [netifaces.AF_INET6])
system_listening_dev_addrs = get_all_interface_addr(False, listen_dev, [netifaces.AF_INET6])
for listen_if_or_addr in upnpd['listen']:
- if listen_if_or_addr not in netifaces.interfaces() and (listen_if_or_addr not in system_listening_dev_addrs_cidr) and (listen_if_or_addr not in system_listening_dev_addrs) and is_ipv6(listen_if_or_addr) and (not IPv6Network(listen_if_or_addr).is_multicast):
+ if listen_if_or_addr not in netifaces.interfaces() and \
+ (listen_if_or_addr not in system_listening_dev_addrs_cidr) and \
+ (listen_if_or_addr not in system_listening_dev_addrs) and \
+ is_ipv6(listen_if_or_addr) and \
+ (not IPv6Network(listen_if_or_addr).is_multicast):
raise ConfigError(f'{listen_if_or_addr} must listen on the interface of the network card')
def generate(upnpd):
diff --git a/src/conf_mode/ssh.py b/src/conf_mode/ssh.py
index 28669694b..2bbd7142a 100755
--- a/src/conf_mode/ssh.py
+++ b/src/conf_mode/ssh.py
@@ -22,6 +22,7 @@ from syslog import LOG_INFO
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.util import call
from vyos.template import render
@@ -50,6 +51,10 @@ def get_config(config=None):
return None
ssh = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ tmp = is_node_changed(conf, base + ['vrf'])
+ if tmp: ssh.update({'restart_required': {}})
+
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
default_values = defaults(base)
@@ -104,17 +109,25 @@ def generate(ssh):
return None
def apply(ssh):
+ systemd_service_ssh = 'ssh.service'
+ systemd_service_sshguard = 'sshguard.service'
if not ssh:
# SSH access is removed in the commit
- call('systemctl stop ssh.service')
- call('systemctl stop sshguard.service')
+ call(f'systemctl stop {systemd_service_ssh}')
+ call(f'systemctl stop {systemd_service_sshguard}')
return None
+
if 'dynamic_protection' not in ssh:
- call('systemctl stop sshguard.service')
+ call(f'systemctl stop {systemd_service_sshguard}')
else:
- call('systemctl restart sshguard.service')
+ call(f'systemctl reload-or-restart {systemd_service_sshguard}')
+
+ # we need to restart the service if e.g. the VRF name changed
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in ssh:
+ systemd_action = 'restart'
- call('systemctl restart ssh.service')
+ call(f'systemctl {systemd_action} {systemd_service_ssh}')
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/system_console.py b/src/conf_mode/system_console.py
index 86985d765..e922edc4e 100755
--- a/src/conf_mode/system_console.py
+++ b/src/conf_mode/system_console.py
@@ -16,6 +16,7 @@
import os
import re
+from pathlib import Path
from vyos.config import Config
from vyos.configdict import dict_merge
@@ -68,18 +69,15 @@ def verify(console):
# amount of connected devices. We will resolve the fixed device name
# to its dynamic device file - and create a new dict entry for it.
by_bus_device = f'{by_bus_dir}/{device}'
- if os.path.isdir(by_bus_dir) and os.path.exists(by_bus_device):
- device = os.path.basename(os.readlink(by_bus_device))
-
- # If the device name still starts with usbXXX no matching tty was found
- # and it can not be used as a serial interface
- if device.startswith('usb'):
- raise ConfigError(f'Device {device} does not support beeing used as tty')
+ # If the device name still starts with usbXXX no matching tty was found
+ # and it can not be used as a serial interface
+ if not os.path.isdir(by_bus_dir) or not os.path.exists(by_bus_device):
+ raise ConfigError(f'Device {device} does not support beeing used as tty')
return None
def generate(console):
- base_dir = '/etc/systemd/system'
+ base_dir = '/run/systemd/system'
# Remove all serial-getty configuration files in advance
for root, dirs, files in os.walk(base_dir):
for basename in files:
@@ -90,7 +88,8 @@ def generate(console):
if not console or 'device' not in console:
return None
- for device, device_config in console['device'].items():
+ # replace keys in the config for ttyUSB items to use them in `apply()` later
+ for device in console['device'].copy():
if device.startswith('usb'):
# It is much easiert to work with the native ttyUSBn name when using
# getty, but that name may change across reboots - depending on the
@@ -98,9 +97,17 @@ def generate(console):
# to its dynamic device file - and create a new dict entry for it.
by_bus_device = f'{by_bus_dir}/{device}'
if os.path.isdir(by_bus_dir) and os.path.exists(by_bus_device):
- device = os.path.basename(os.readlink(by_bus_device))
+ device_updated = os.path.basename(os.readlink(by_bus_device))
+
+ # replace keys in the config to use them in `apply()` later
+ console['device'][device_updated] = console['device'][device]
+ del console['device'][device]
+ else:
+ raise ConfigError(f'Device {device} does not support beeing used as tty')
+ for device, device_config in console['device'].items():
config_file = base_dir + f'/serial-getty@{device}.service'
+ Path(f'{base_dir}/getty.target.wants').mkdir(exist_ok=True)
getty_wants_symlink = base_dir + f'/getty.target.wants/serial-getty@{device}.service'
render(config_file, 'getty/serial-getty.service.j2', device_config)
diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py
index bad9cfbd8..5ca32d23e 100755
--- a/src/conf_mode/vpn_ipsec.py
+++ b/src/conf_mode/vpn_ipsec.py
@@ -595,13 +595,11 @@ def wait_for_vici_socket(timeout=5, sleep_interval=0.1):
sleep(sleep_interval)
def apply(ipsec):
+ systemd_service = 'strongswan-starter.service'
if not ipsec:
- call('sudo ipsec stop')
+ call(f'systemctl stop {systemd_service}')
else:
- call('sudo ipsec restart')
- call('sudo ipsec rereadall')
- call('sudo ipsec reload')
-
+ call(f'systemctl reload-or-restart {systemd_service}')
if wait_for_vici_socket():
call('sudo swanctl -q')
diff --git a/src/conf_mode/vpn_openconnect.py b/src/conf_mode/vpn_openconnect.py
index a3e774678..240546817 100755
--- a/src/conf_mode/vpn_openconnect.py
+++ b/src/conf_mode/vpn_openconnect.py
@@ -25,6 +25,7 @@ from vyos.template import render
from vyos.util import call
from vyos.util import check_port_availability
from vyos.util import is_systemd_service_running
+from vyos.util import is_listen_port_bind_service
from vyos.util import dict_search
from vyos.xml import defaults
from vyos import ConfigError
@@ -77,8 +78,10 @@ def verify(ocserv):
if ocserv is None:
return None
# Check if listen-ports not binded other services
+ # It can be only listen by 'ocserv-main'
for proto, port in ocserv.get('listen_ports').items():
- if check_port_availability('0.0.0.0', int(port), proto) is not True:
+ if check_port_availability('0.0.0.0', int(port), proto) is not True and \
+ not is_listen_port_bind_service(int(port), 'ocserv-main'):
raise ConfigError(f'"{proto}" port "{port}" is used by another service')
# Check authentication
if "authentication" in ocserv:
diff --git a/src/conf_mode/vpn_sstp.py b/src/conf_mode/vpn_sstp.py
index 23e5162ba..2949ab290 100755
--- a/src/conf_mode/vpn_sstp.py
+++ b/src/conf_mode/vpn_sstp.py
@@ -26,7 +26,9 @@ from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
+from vyos.util import check_port_availability
from vyos.util import dict_search
+from vyos.util import is_listen_port_bind_service
from vyos.util import write_file
from vyos import ConfigError
from vyos import airbag
@@ -62,6 +64,12 @@ def verify(sstp):
if not sstp:
return None
+ port = sstp.get('port')
+ proto = 'tcp'
+ if check_port_availability('0.0.0.0', int(port), proto) is not True and \
+ not is_listen_port_bind_service(int(port), 'accel-pppd'):
+ raise ConfigError(f'"{proto}" port "{port}" is used by another service')
+
verify_accel_ppp_base_service(sstp)
if 'client_ip_pool' not in sstp and 'client_ipv6_pool' not in sstp:
diff --git a/src/etc/opennhrp/opennhrp-script.py b/src/etc/opennhrp/opennhrp-script.py
index 8274e6564..bf25a7331 100755
--- a/src/etc/opennhrp/opennhrp-script.py
+++ b/src/etc/opennhrp/opennhrp-script.py
@@ -18,44 +18,126 @@ import os
import re
import sys
import vici
+
from json import loads
+from pathlib import Path
+from vyos.logger import getLogger
from vyos.util import cmd
from vyos.util import process_named_running
-NHRP_CONFIG = "/run/opennhrp/opennhrp.conf"
+NHRP_CONFIG: str = '/run/opennhrp/opennhrp.conf'
+
+
+def vici_get_ipsec_uniqueid(conn: str, src_nbma: str,
+ dst_nbma: str) -> list[str]:
+ """ Find and return IKE SAs by src nbma and dst nbma
+
+ Args:
+ conn (str): a connection name
+ src_nbma (str): an IP address of NBMA source
+ dst_nbma (str): an IP address of NBMA destination
+
+ Returns:
+ list: a list of IKE connections that match a criteria
+ """
+ if not conn or not src_nbma or not dst_nbma:
+ logger.error(
+ f'Incomplete input data for resolving IKE unique ids: '
+ f'conn: {conn}, src_nbma: {src_nbma}, dst_nbma: {dst_nbma}')
+ return []
+
+ try:
+ logger.info(
+ f'Resolving IKE unique ids for: conn: {conn}, '
+ f'src_nbma: {src_nbma}, dst_nbma: {dst_nbma}')
+ session: vici.Session = vici.Session()
+ list_ikeid: list[str] = []
+ list_sa = session.list_sas({'ike': conn})
+ for sa in list_sa:
+ if sa[conn]['local-host'].decode('ascii') == src_nbma \
+ and sa[conn]['remote-host'].decode('ascii') == dst_nbma:
+ list_ikeid.append(sa[conn]['uniqueid'].decode('ascii'))
+ return list_ikeid
+ except Exception as err:
+ logger.error(f'Unable to find unique ids for IKE: {err}')
+ return []
+
+
+def vici_ike_terminate(list_ikeid: list[str]) -> bool:
+ """Terminating IKE SAs by list of IKE IDs
+
+ Args:
+ list_ikeid (list[str]): a list of IKE ids to terminate
+
+ Returns:
+ bool: result of termination action
+ """
+ if not list:
+ logger.warning('An empty list for termination was provided')
+ return False
+
+ try:
+ session = vici.Session()
+ for ikeid in list_ikeid:
+ logger.info(f'Terminating IKE SA with id {ikeid}')
+ session_generator = session.terminate(
+ {'ike-id': ikeid, 'timeout': '-1'})
+ # a dummy `for` loop is required because of requirements
+ # from vici. Without a full iteration on the output, the
+ # command to vici may not be executed completely
+ for _ in session_generator:
+ pass
+ return True
+ except Exception as err:
+ logger.error(f'Failed to terminate SA for IKE ids {list_ikeid}: {err}')
+ return False
+
+def parse_type_ipsec(interface: str) -> tuple[str, str]:
+ """Get DMVPN Type and NHRP Profile from the configuration
-def parse_type_ipsec(interface):
- with open(NHRP_CONFIG, 'r') as f:
- lines = f.readlines()
- match = rf'^interface {interface} #(hub|spoke)(?:\s([\w-]+))?$'
- for line in lines:
- m = re.match(match, line)
- if m:
- return m[1], m[2]
- return None, None
+ Args:
+ interface (str): a name of interface
+
+ Returns:
+ tuple[str, str]: `peer_type` and `profile_name`
+ """
+ if not interface:
+ logger.error('Cannot find peer type - no input provided')
+ return '', ''
+
+ config_file: str = Path(NHRP_CONFIG).read_text()
+ regex: str = rf'^interface {interface} #(?P<peer_type>hub|spoke) ?(?P<profile_name>[^\n]*)$'
+ match = re.search(regex, config_file, re.M)
+ if match:
+ return match.groupdict()['peer_type'], match.groupdict()[
+ 'profile_name']
+ return '', ''
def add_peer_route(nbma_src: str, nbma_dst: str, mtu: str) -> None:
"""Add a route to a NBMA peer
Args:
- nmba_src (str): a local IP address
+ nbma_src (str): a local IP address
nbma_dst (str): a remote IP address
mtu (str): a MTU for a route
"""
+ logger.info(f'Adding route from {nbma_src} to {nbma_dst} with MTU {mtu}')
# Find routes to a peer
- route_get_cmd = f'sudo ip -j route get {nbma_dst} from {nbma_src}'
+ route_get_cmd: str = f'sudo ip --json route get {nbma_dst} from {nbma_src}'
try:
route_info_data = loads(cmd(route_get_cmd))
except Exception as err:
- print(f'Unable to find a route to {nbma_dst}: {err}')
+ logger.error(f'Unable to find a route to {nbma_dst}: {err}')
+ return
# Check if an output has an expected format
if not isinstance(route_info_data, list):
- print(f'Garbage returned from the "{route_get_cmd}" command: \
- {route_info_data}')
+ logger.error(
+ f'Garbage returned from the "{route_get_cmd}" '
+ f'command: {route_info_data}')
return
# Add static routes to a peer
@@ -76,104 +158,222 @@ def add_peer_route(nbma_src: str, nbma_dst: str, mtu: str) -> None:
try:
cmd(route_add_cmd)
except Exception as err:
- print(f'Unable to add a route using command "{route_add_cmd}": \
- {err}')
+ logger.error(
+ f'Unable to add a route using command "{route_add_cmd}": '
+ f'{err}')
-def vici_initiate(conn, child_sa, src_addr, dest_addr):
- try:
- session = vici.Session()
- logs = session.initiate({
- 'ike': conn,
- 'child': child_sa,
- 'timeout': '-1',
- 'my-host': src_addr,
- 'other-host': dest_addr
- })
- for log in logs:
- message = log['msg'].decode('ascii')
- print('INIT LOG:', message)
- return True
- except:
- return None
+def vici_initiate(conn: str, child_sa: str, src_addr: str,
+ dest_addr: str) -> bool:
+ """Initiate IKE SA connection with specific peer
+ Args:
+ conn (str): an IKE connection name
+ child_sa (str): a child SA profile name
+ src_addr (str): NBMA local address
+ dest_addr (str): NBMA address of a peer
-def vici_terminate(conn, child_sa, src_addr, dest_addr):
+ Returns:
+ bool: a result of initiation command
+ """
+ logger.info(
+ f'Trying to initiate connection. Name: {conn}, child sa: {child_sa}, '
+ f'src_addr: {src_addr}, dst_addr: {dest_addr}')
try:
session = vici.Session()
- logs = session.terminate({
+ session_generator = session.initiate({
'ike': conn,
'child': child_sa,
'timeout': '-1',
'my-host': src_addr,
'other-host': dest_addr
})
- for log in logs:
- message = log['msg'].decode('ascii')
- print('TERM LOG:', message)
+ # a dummy `for` loop is required because of requirements
+ # from vici. Without a full iteration on the output, the
+ # command to vici may not be executed completely
+ for _ in session_generator:
+ pass
return True
- except:
- return None
+ except Exception as err:
+ logger.error(f'Unable to initiate connection {err}')
+ return False
+
+
+def vici_terminate(conn: str, src_addr: str, dest_addr: str) -> None:
+ """Find and terminate IKE SAs by local NBMA and remote NBMA addresses
+
+ Args:
+ conn (str): IKE connection name
+ src_addr (str): NBMA local address
+ dest_addr (str): NBMA address of a peer
+ """
+ logger.info(
+ f'Terminating IKE connection {conn} between {src_addr} '
+ f'and {dest_addr}')
+ ikeid_list: list[str] = vici_get_ipsec_uniqueid(conn, src_addr, dest_addr)
-def iface_up(interface):
- cmd(f'sudo ip route flush proto 42 dev {interface}')
- cmd(f'sudo ip neigh flush dev {interface}')
+ if not ikeid_list:
+ logger.warning(
+ f'No active sessions found for IKE profile {conn}, '
+ f'local NBMA {src_addr}, remote NBMA {dest_addr}')
+ else:
+ vici_ike_terminate(ikeid_list)
-def peer_up(dmvpn_type, conn):
- # src_addr = os.getenv('NHRP_SRCADDR')
+def iface_up(interface: str) -> None:
+ """Proceed tunnel interface UP event
+
+ Args:
+ interface (str): an interface name
+ """
+ if not interface:
+ logger.warning('No interface name provided for UP event')
+
+ logger.info(f'Turning up interface {interface}')
+ try:
+ cmd(f'sudo ip route flush proto 42 dev {interface}')
+ cmd(f'sudo ip neigh flush dev {interface}')
+ except Exception as err:
+ logger.error(
+ f'Unable to flush route on interface "{interface}": {err}')
+
+
+def peer_up(dmvpn_type: str, conn: str) -> None:
+ """Proceed NHRP peer UP event
+
+ Args:
+ dmvpn_type (str): a type of peer
+ conn (str): an IKE profile name
+ """
+ logger.info(f'Peer UP event for {dmvpn_type} using IKE profile {conn}')
src_nbma = os.getenv('NHRP_SRCNBMA')
- # dest_addr = os.getenv('NHRP_DESTADDR')
dest_nbma = os.getenv('NHRP_DESTNBMA')
dest_mtu = os.getenv('NHRP_DESTMTU')
+ if not src_nbma or not dest_nbma:
+ logger.error(
+ f'Can not get NHRP NBMA addresses: local {src_nbma}, '
+ f'remote {dest_nbma}')
+ return
+
+ logger.info(f'NBMA addresses: local {src_nbma}, remote {dest_nbma}')
if dest_mtu:
add_peer_route(src_nbma, dest_nbma, dest_mtu)
-
if conn and dmvpn_type == 'spoke' and process_named_running('charon'):
- vici_terminate(conn, 'dmvpn', src_nbma, dest_nbma)
+ vici_terminate(conn, src_nbma, dest_nbma)
vici_initiate(conn, 'dmvpn', src_nbma, dest_nbma)
-def peer_down(dmvpn_type, conn):
+def peer_down(dmvpn_type: str, conn: str) -> None:
+ """Proceed NHRP peer DOWN event
+
+ Args:
+ dmvpn_type (str): a type of peer
+ conn (str): an IKE profile name
+ """
+ logger.info(f'Peer DOWN event for {dmvpn_type} using IKE profile {conn}')
+
src_nbma = os.getenv('NHRP_SRCNBMA')
dest_nbma = os.getenv('NHRP_DESTNBMA')
+ if not src_nbma or not dest_nbma:
+ logger.error(
+ f'Can not get NHRP NBMA addresses: local {src_nbma}, '
+ f'remote {dest_nbma}')
+ return
+
+ logger.info(f'NBMA addresses: local {src_nbma}, remote {dest_nbma}')
if conn and dmvpn_type == 'spoke' and process_named_running('charon'):
- vici_terminate(conn, 'dmvpn', src_nbma, dest_nbma)
+ vici_terminate(conn, src_nbma, dest_nbma)
+ try:
+ cmd(f'sudo ip route del {dest_nbma} src {src_nbma} proto 42')
+ except Exception as err:
+ logger.error(
+ f'Unable to del route from {src_nbma} to {dest_nbma}: {err}')
- cmd(f'sudo ip route del {dest_nbma} src {src_nbma} proto 42')
+def route_up(interface: str) -> None:
+ """Proceed NHRP route UP event
+
+ Args:
+ interface (str): an interface name
+ """
+ logger.info(f'Route UP event for interface {interface}')
-def route_up(interface):
dest_addr = os.getenv('NHRP_DESTADDR')
dest_prefix = os.getenv('NHRP_DESTPREFIX')
next_hop = os.getenv('NHRP_NEXTHOP')
- cmd(f'sudo ip route replace {dest_addr}/{dest_prefix} proto 42 \
- via {next_hop} dev {interface}')
- cmd('sudo ip route flush cache')
+ if not dest_addr or not dest_prefix or not next_hop:
+ logger.error(
+ f'Can not get route details: dest_addr {dest_addr}, '
+ f'dest_prefix {dest_prefix}, next_hop {next_hop}')
+ return
+
+ logger.info(
+ f'Route details: dest_addr {dest_addr}, dest_prefix {dest_prefix}, '
+ f'next_hop {next_hop}')
+ try:
+ cmd(f'sudo ip route replace {dest_addr}/{dest_prefix} proto 42 \
+ via {next_hop} dev {interface}')
+ cmd('sudo ip route flush cache')
+ except Exception as err:
+ logger.error(
+ f'Unable replace or flush route to {dest_addr}/{dest_prefix} '
+ f'via {next_hop} dev {interface}: {err}')
+
+
+def route_down(interface: str) -> None:
+ """Proceed NHRP route DOWN event
+
+ Args:
+ interface (str): an interface name
+ """
+ logger.info(f'Route DOWN event for interface {interface}')
-def route_down(interface):
dest_addr = os.getenv('NHRP_DESTADDR')
dest_prefix = os.getenv('NHRP_DESTPREFIX')
- cmd(f'sudo ip route del {dest_addr}/{dest_prefix} proto 42')
- cmd('sudo ip route flush cache')
+ if not dest_addr or not dest_prefix:
+ logger.error(
+ f'Can not get route details: dest_addr {dest_addr}, '
+ f'dest_prefix {dest_prefix}')
+ return
+
+ logger.info(
+ f'Route details: dest_addr {dest_addr}, dest_prefix {dest_prefix}')
+ try:
+ cmd(f'sudo ip route del {dest_addr}/{dest_prefix} proto 42')
+ cmd('sudo ip route flush cache')
+ except Exception as err:
+ logger.error(
+ f'Unable delete or flush route to {dest_addr}/{dest_prefix}: '
+ f'{err}')
if __name__ == '__main__':
+ logger = getLogger('opennhrp-script', syslog=True)
+ logger.debug(
+ f'Running script with arguments: {sys.argv}, '
+ f'environment: {os.environ}')
+
action = sys.argv[1]
interface = os.getenv('NHRP_INTERFACE')
- dmvpn_type, profile_name = parse_type_ipsec(interface)
- dmvpn_conn = None
+ if not interface:
+ logger.error('Can not get NHRP interface name')
+ sys.exit(1)
- if profile_name:
- dmvpn_conn = f'dmvpn-{profile_name}-{interface}'
+ dmvpn_type, profile_name = parse_type_ipsec(interface)
+ if not dmvpn_type:
+ logger.info(f'Interface {interface} is not NHRP tunnel')
+ sys.exit()
+ dmvpn_conn: str = ''
+ if profile_name:
+ dmvpn_conn: str = f'dmvpn-{profile_name}-{interface}'
if action == 'interface-up':
iface_up(interface)
elif action == 'peer-register':
@@ -186,3 +386,5 @@ if __name__ == '__main__':
route_up(interface)
elif action == 'route-down':
route_down(interface)
+
+ sys.exit()
diff --git a/src/etc/systemd/system/wpa_supplicant-wired@.service.d/override.conf b/src/etc/systemd/system/wpa_supplicant-wired@.service.d/override.conf
new file mode 100644
index 000000000..030b89a2b
--- /dev/null
+++ b/src/etc/systemd/system/wpa_supplicant-wired@.service.d/override.conf
@@ -0,0 +1,11 @@
+[Unit]
+After=
+After=vyos-router.service
+
+[Service]
+WorkingDirectory=
+WorkingDirectory=/run/wpa_supplicant
+PIDFile=/run/wpa_supplicant/%I.pid
+ExecStart=
+ExecStart=/sbin/wpa_supplicant -c/run/wpa_supplicant/%I.conf -Dwired -P/run/wpa_supplicant/%I.pid -i%I
+ExecReload=/bin/kill -HUP $MAINPID
diff --git a/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py b/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py
index 0c7474156..6f14d6a8e 100755
--- a/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py
+++ b/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py
@@ -5,20 +5,6 @@ from vyos.ifconfig import Interface
import time
-def get_interfaces(type='', vlan=True):
- """
- Get interfaces:
- ['dum0', 'eth0', 'eth1', 'eth1.5', 'lo', 'tun0']
- """
- interfaces = []
- ifaces = Section.interfaces(type)
- for iface in ifaces:
- if vlan == False and '.' in iface:
- continue
- interfaces.append(iface)
-
- return interfaces
-
def get_interface_addresses(iface, link_local_v6=False):
"""
Get IP and IPv6 addresses from interface in one string
@@ -77,7 +63,7 @@ def get_interface_oper_state(iface):
return oper_state
-interfaces = get_interfaces()
+interfaces = Section.interfaces('')
for iface in interfaces:
print(f'show_interfaces,interface={iface} '
diff --git a/src/op_mode/conntrack.py b/src/op_mode/conntrack.py
index 036226418..b27aa6060 100755
--- a/src/op_mode/conntrack.py
+++ b/src/op_mode/conntrack.py
@@ -51,6 +51,21 @@ def _get_raw_data(family):
return _xml_to_dict(xml)
+def _get_raw_statistics():
+ entries = []
+ data = cmd('sudo conntrack -S')
+ data = data.replace(' \t', '').split('\n')
+ for entry in data:
+ entries.append(entry.split())
+ return entries
+
+
+def get_formatted_statistics(entries):
+ headers = ["CPU", "Found", "Invalid", "Insert", "Insert fail", "Drop", "Early drop", "Errors", "Search restart"]
+ output = tabulate(entries, headers, numalign="left")
+ return output
+
+
def get_formatted_output(dict_data):
"""
:param xml:
@@ -111,6 +126,14 @@ def show(raw: bool, family: str):
return get_formatted_output(conntrack_data)
+def show_statistics(raw: bool):
+ conntrack_statistics = _get_raw_statistics()
+ if raw:
+ return conntrack_statistics
+ else:
+ return get_formatted_statistics(conntrack_statistics)
+
+
if __name__ == '__main__':
try:
res = vyos.opmode.run(sys.modules[__name__])
diff --git a/src/op_mode/ipsec.py b/src/op_mode/ipsec.py
index 49c8e6142..a4d1b4cb1 100755
--- a/src/op_mode/ipsec.py
+++ b/src/op_mode/ipsec.py
@@ -16,13 +16,122 @@
import re
import sys
+
+from collections import OrderedDict
+from hurry import filesize
+from re import split as re_split
+from tabulate import tabulate
+
from vyos.util import call
+from vyos.util import convert_data
+from vyos.util import seconds_to_human
+
import vyos.opmode
SWANCTL_CONF = '/etc/swanctl/swanctl.conf'
+def _convert(text):
+ return int(text) if text.isdigit() else text.lower()
+
+
+def _alphanum_key(key):
+ return [_convert(c) for c in re_split('([0-9]+)', str(key))]
+
+
+def _get_vici_sas():
+ from vici import Session as vici_session
+
+ session = vici_session()
+ sas = list(session.list_sas())
+ return sas
+
+
+def _get_raw_data_sas():
+ get_sas = _get_vici_sas()
+ sas = convert_data(get_sas)
+ return sas
+
+
+def _get_formatted_output_sas(sas):
+ sa_data = []
+ for sa in sas:
+ for parent_sa in sa.values():
+ # create an item for each child-sa
+ for child_sa in parent_sa.get('child-sas', {}).values():
+ # prepare a list for output data
+ sa_out_name = sa_out_state = sa_out_uptime = sa_out_bytes = sa_out_packets = sa_out_remote_addr = sa_out_remote_id = sa_out_proposal = 'N/A'
+
+ # collect raw data
+ sa_name = child_sa.get('name')
+ sa_state = child_sa.get('state')
+ sa_uptime = child_sa.get('install-time')
+ sa_bytes_in = child_sa.get('bytes-in')
+ sa_bytes_out = child_sa.get('bytes-out')
+ sa_packets_in = child_sa.get('packets-in')
+ sa_packets_out = child_sa.get('packets-out')
+ sa_remote_addr = parent_sa.get('remote-host')
+ sa_remote_id = parent_sa.get('remote-id')
+ sa_proposal_encr_alg = child_sa.get('encr-alg')
+ sa_proposal_integ_alg = child_sa.get('integ-alg')
+ sa_proposal_encr_keysize = child_sa.get('encr-keysize')
+ sa_proposal_dh_group = child_sa.get('dh-group')
+
+ # format data to display
+ if sa_name:
+ sa_out_name = sa_name
+ if sa_state:
+ if sa_state == 'INSTALLED':
+ sa_out_state = 'up'
+ else:
+ sa_out_state = 'down'
+ if sa_uptime:
+ sa_out_uptime = seconds_to_human(sa_uptime)
+ if sa_bytes_in and sa_bytes_out:
+ bytes_in = filesize.size(int(sa_bytes_in))
+ bytes_out = filesize.size(int(sa_bytes_out))
+ sa_out_bytes = f'{bytes_in}/{bytes_out}'
+ if sa_packets_in and sa_packets_out:
+ packets_in = filesize.size(int(sa_packets_in),
+ system=filesize.si)
+ packets_out = filesize.size(int(sa_packets_out),
+ system=filesize.si)
+ packets_str = f'{packets_in}/{packets_out}'
+ sa_out_packets = re.sub(r'B', r'', packets_str)
+ if sa_remote_addr:
+ sa_out_remote_addr = sa_remote_addr
+ if sa_remote_id:
+ sa_out_remote_id = sa_remote_id
+ # format proposal
+ if sa_proposal_encr_alg:
+ sa_out_proposal = sa_proposal_encr_alg
+ if sa_proposal_encr_keysize:
+ sa_proposal_encr_keysize_str = sa_proposal_encr_keysize
+ sa_out_proposal = f'{sa_out_proposal}_{sa_proposal_encr_keysize_str}'
+ if sa_proposal_integ_alg:
+ sa_proposal_integ_alg_str = sa_proposal_integ_alg
+ sa_out_proposal = f'{sa_out_proposal}/{sa_proposal_integ_alg_str}'
+ if sa_proposal_dh_group:
+ sa_proposal_dh_group_str = sa_proposal_dh_group
+ sa_out_proposal = f'{sa_out_proposal}/{sa_proposal_dh_group_str}'
+
+ # add a new item to output data
+ sa_data.append([
+ sa_out_name, sa_out_state, sa_out_uptime, sa_out_bytes,
+ sa_out_packets, sa_out_remote_addr, sa_out_remote_id,
+ sa_out_proposal
+ ])
+
+ headers = [
+ "Connection", "State", "Uptime", "Bytes In/Out", "Packets In/Out",
+ "Remote address", "Remote ID", "Proposal"
+ ]
+ sa_data = sorted(sa_data, key=_alphanum_key)
+ output = tabulate(sa_data, headers)
+ return output
+
+
def get_peer_connections(peer, tunnel, return_all = False):
peer = peer.replace(':', '-')
search = rf'^[\s]*(peer_{peer}_(tunnel_[\d]+|vti)).*'
@@ -61,6 +170,13 @@ def reset_peer(peer: str, tunnel:str):
print('Peer reset result: ' + ('success' if result else 'failed'))
+def show_sa(raw: bool):
+ sa_data = _get_raw_data_sas()
+ if raw:
+ return sa_data
+ return _get_formatted_output_sas(sa_data)
+
+
if __name__ == '__main__':
try:
res = vyos.opmode.run(sys.modules[__name__])
diff --git a/src/op_mode/nat.py b/src/op_mode/nat.py
index 12fc4c782..1339d5b92 100755
--- a/src/op_mode/nat.py
+++ b/src/op_mode/nat.py
@@ -17,6 +17,7 @@
import jmespath
import json
import sys
+import xmltodict
from sys import exit
from tabulate import tabulate
@@ -27,7 +28,30 @@ from vyos.util import dict_search
import vyos.opmode
-def _get_json_data(direction):
+def _get_xml_translation(direction, family):
+ """
+ Get conntrack XML output --src-nat|--dst-nat
+ """
+ if direction == 'source':
+ opt = '--src-nat'
+ if direction == 'destination':
+ opt = '--dst-nat'
+ return cmd(f'sudo conntrack --dump --family {family} {opt} --output xml')
+
+
+def _xml_to_dict(xml):
+ """
+ Convert XML to dictionary
+ Return: dictionary
+ """
+ parse = xmltodict.parse(xml, attr_prefix='')
+ # If only one conntrack entry we must change dict
+ if 'meta' in parse['conntrack']['flow']:
+ return dict(conntrack={'flow': [parse['conntrack']['flow']]})
+ return parse
+
+
+def _get_json_data(direction, family):
"""
Get NAT format JSON
"""
@@ -35,14 +59,15 @@ def _get_json_data(direction):
chain = 'POSTROUTING'
if direction == 'destination':
chain = 'PREROUTING'
- return cmd(f'sudo nft --json list chain ip nat {chain}')
+ family = 'ip6' if family == 'inet6' else 'ip'
+ return cmd(f'sudo nft --json list chain {family} nat {chain}')
-def _get_raw_data_rules(direction):
+def _get_raw_data_rules(direction, family):
"""Get interested rules
:returns dict
"""
- data = _get_json_data(direction)
+ data = _get_json_data(direction, family)
data_dict = json.loads(data)
rules = []
for rule in data_dict['nftables']:
@@ -51,10 +76,28 @@ def _get_raw_data_rules(direction):
return rules
-def _get_formatted_output_rules(data, direction):
+def _get_raw_translation(direction, family):
+ """
+ Return: dictionary
+ """
+ xml = _get_xml_translation(direction, family)
+ if len(xml) == 0:
+ output = {'conntrack':
+ {
+ 'error': True,
+ 'reason': 'entries not found'
+ }
+ }
+ return output
+ return _xml_to_dict(xml)
+
+
+def _get_formatted_output_rules(data, direction, family):
# Add default values before loop
sport, dport, proto = 'any', 'any', 'any'
- saddr, daddr = '0.0.0.0/0', '0.0.0.0/0'
+ saddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
+ daddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
+
data_entries = []
for rule in data:
if 'comment' in rule['rule']:
@@ -69,11 +112,13 @@ def _get_formatted_output_rules(data, direction):
if 'prefix' in match['right'] or 'set' in match['right']:
# Merge dict src/dst l3_l4 parameters
my_dict = {**match['left']['payload'], **match['right']}
+ my_dict['op'] = match['op']
+ op = '!' if my_dict.get('op') == '!=' else ''
proto = my_dict.get('protocol').upper()
if my_dict['field'] == 'saddr':
- saddr = f'{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
+ saddr = f'{op}{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
elif my_dict['field'] == 'daddr':
- daddr = f'{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
+ daddr = f'{op}{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
elif my_dict['field'] == 'sport':
# Port range or single port
if jmespath.search('set[*].range', my_dict):
@@ -96,8 +141,8 @@ def _get_formatted_output_rules(data, direction):
if jmespath.search('left.payload.field', match) == 'daddr':
daddr = match.get('right')
else:
- saddr = '0.0.0.0/0'
- daddr = '0.0.0.0/0'
+ saddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
+ daddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
sport = 'any'
dport = 'any'
proto = 'any'
@@ -175,22 +220,83 @@ def _get_formatted_output_statistics(data, direction):
return output
-def show_rules(raw: bool, direction: str):
- nat_rules = _get_raw_data_rules(direction)
+def _get_formatted_translation(dict_data, nat_direction, family):
+ data_entries = []
+ if 'error' in dict_data['conntrack']:
+ return 'Entries not found'
+ for entry in dict_data['conntrack']['flow']:
+ orig_src, orig_dst, orig_sport, orig_dport = {}, {}, {}, {}
+ reply_src, reply_dst, reply_sport, reply_dport = {}, {}, {}, {}
+ proto = {}
+ for meta in entry['meta']:
+ direction = meta['direction']
+ if direction in ['original']:
+ if 'layer3' in meta:
+ orig_src = meta['layer3']['src']
+ orig_dst = meta['layer3']['dst']
+ if 'layer4' in meta:
+ if meta.get('layer4').get('sport'):
+ orig_sport = meta['layer4']['sport']
+ if meta.get('layer4').get('dport'):
+ orig_dport = meta['layer4']['dport']
+ proto = meta['layer4']['protoname']
+ if direction in ['reply']:
+ if 'layer3' in meta:
+ reply_src = meta['layer3']['src']
+ reply_dst = meta['layer3']['dst']
+ if 'layer4' in meta:
+ if meta.get('layer4').get('sport'):
+ reply_sport = meta['layer4']['sport']
+ if meta.get('layer4').get('dport'):
+ reply_dport = meta['layer4']['dport']
+ proto = meta['layer4']['protoname']
+ if direction == 'independent':
+ conn_id = meta['id']
+ timeout = meta['timeout']
+ orig_src = f'{orig_src}:{orig_sport}' if orig_sport else orig_src
+ orig_dst = f'{orig_dst}:{orig_dport}' if orig_dport else orig_dst
+ reply_src = f'{reply_src}:{reply_sport}' if reply_sport else reply_src
+ reply_dst = f'{reply_dst}:{reply_dport}' if reply_dport else reply_dst
+ state = meta['state'] if 'state' in meta else ''
+ mark = meta['mark']
+ zone = meta['zone'] if 'zone' in meta else ''
+ if nat_direction == 'source':
+ data_entries.append(
+ [orig_src, reply_dst, proto, timeout, mark, zone])
+ elif nat_direction == 'destination':
+ data_entries.append(
+ [orig_dst, reply_src, proto, timeout, mark, zone])
+
+ headers = ["Pre-NAT", "Post-NAT", "Proto", "Timeout", "Mark", "Zone"]
+ output = tabulate(data_entries, headers, numalign="left")
+ return output
+
+
+def show_rules(raw: bool, direction: str, family: str):
+ nat_rules = _get_raw_data_rules(direction, family)
if raw:
return nat_rules
else:
- return _get_formatted_output_rules(nat_rules, direction)
+ return _get_formatted_output_rules(nat_rules, direction, family)
-def show_statistics(raw: bool, direction: str):
- nat_statistics = _get_raw_data_rules(direction)
+def show_statistics(raw: bool, direction: str, family: str):
+ nat_statistics = _get_raw_data_rules(direction, family)
if raw:
return nat_statistics
else:
return _get_formatted_output_statistics(nat_statistics, direction)
+def show_translations(raw: bool, direction: str, family: str):
+ family = 'ipv6' if family == 'inet6' else 'ipv4'
+ nat_translation = _get_raw_translation(direction, family)
+ if raw:
+ return nat_translation
+ else:
+ return _get_formatted_translation(nat_translation, direction, family)
+
+
if __name__ == '__main__':
try:
res = vyos.opmode.run(sys.modules[__name__])
diff --git a/src/op_mode/openconnect-control.py b/src/op_mode/openconnect-control.py
index a128cc011..20c50e779 100755
--- a/src/op_mode/openconnect-control.py
+++ b/src/op_mode/openconnect-control.py
@@ -19,7 +19,6 @@ import argparse
import json
from vyos.config import Config
-from vyos.util import commit_in_progress
from vyos.util import popen
from vyos.util import run
from vyos.util import DEVNULL
@@ -60,10 +59,6 @@ def main():
# Check is Openconnect server configured
is_ocserv_configured()
- if commit_in_progress():
- print('Cannot restart openconnect while a commit is in progress')
- exit(1)
-
if args.action == "restart":
run("sudo systemctl restart ocserv.service")
sys.exit(0)
diff --git a/src/op_mode/openconnect.py b/src/op_mode/openconnect.py
new file mode 100755
index 000000000..00992c66a
--- /dev/null
+++ b/src/op_mode/openconnect.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import json
+
+from tabulate import tabulate
+from vyos.configquery import ConfigTreeQuery
+from vyos.util import rc_cmd
+
+import vyos.opmode
+
+
+occtl = '/usr/bin/occtl'
+occtl_socket = '/run/ocserv/occtl.socket'
+
+
+def _get_raw_data_sessions():
+ rc, out = rc_cmd(f'sudo {occtl} --json --socket-file {occtl_socket} show users')
+ if rc != 0:
+ output = {'openconnect':
+ {
+ 'configured': False,
+ 'return_code': rc,
+ 'reason': out
+ }
+ }
+ return output
+
+ sessions = json.loads(out)
+ return sessions
+
+
+def _get_formatted_sessions(data):
+ headers = ["Interface", "Username", "IP", "Remote IP", "RX", "TX", "State", "Uptime"]
+ ses_list = []
+ for ses in data:
+ ses_list.append([
+ ses["Device"], ses["Username"], ses["IPv4"], ses["Remote IP"],
+ ses["_RX"], ses["_TX"], ses["State"], ses["_Connected at"]
+ ])
+ if len(ses_list) > 0:
+ output = tabulate(ses_list, headers)
+ else:
+ output = 'No active openconnect sessions'
+ return output
+
+
+def show_sessions(raw: bool):
+ config = ConfigTreeQuery()
+ if not config.exists('vpn openconnect') and not raw:
+ print('Openconnect is not configured')
+ exit(0)
+
+ openconnect_data = _get_raw_data_sessions()
+ if raw:
+ return openconnect_data
+ return _get_formatted_sessions(openconnect_data)
+
+
+if __name__ == '__main__':
+ try:
+ res = vyos.opmode.run(sys.modules[__name__])
+ if res:
+ print(res)
+ except (ValueError, vyos.opmode.Error) as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/op_mode/restart_dhcp_relay.py b/src/op_mode/restart_dhcp_relay.py
index db5a48970..9203c009f 100755
--- a/src/op_mode/restart_dhcp_relay.py
+++ b/src/op_mode/restart_dhcp_relay.py
@@ -43,7 +43,7 @@ if __name__ == '__main__':
if commit_in_progress():
print('Cannot restart DHCP relay while a commit is in progress')
exit(1)
- call('systemctl restart isc-dhcp-server.service')
+ call('systemctl restart isc-dhcp-relay.service')
sys.exit(0)
elif args.ipv6:
@@ -54,7 +54,7 @@ if __name__ == '__main__':
if commit_in_progress():
print('Cannot restart DHCPv6 relay while commit is in progress')
exit(1)
- call('systemctl restart isc-dhcp-server6.service')
+ call('systemctl restart isc-dhcp-relay6.service')
sys.exit(0)
else:
diff --git a/src/op_mode/show_nat66_rules.py b/src/op_mode/show_nat66_rules.py
deleted file mode 100755
index 967ec9d37..000000000
--- a/src/op_mode/show_nat66_rules.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import jmespath
-import json
-
-from argparse import ArgumentParser
-from jinja2 import Template
-from sys import exit
-from vyos.util import cmd
-from vyos.util import dict_search
-
-parser = ArgumentParser()
-group = parser.add_mutually_exclusive_group()
-group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true")
-group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true")
-args = parser.parse_args()
-
-if args.source or args.destination:
- tmp = cmd('sudo nft -j list table ip6 nat')
- tmp = json.loads(tmp)
-
- format_nat66_rule = '{0: <10} {1: <50} {2: <50} {3: <10}'
- print(format_nat66_rule.format("Rule", "Source" if args.source else "Destination", "Translation", "Outbound Interface" if args.source else "Inbound Interface"))
- print(format_nat66_rule.format("----", "------" if args.source else "-----------", "-----------", "------------------" if args.source else "-----------------"))
-
- data_json = jmespath.search('nftables[?rule].rule[?chain]', tmp)
- for idx in range(0, len(data_json)):
- data = data_json[idx]
-
- # The following key values must exist
- # When the rule JSON does not have some keys, this is not a rule we can work with
- continue_rule = False
- for key in ['comment', 'chain', 'expr']:
- if key not in data:
- continue_rule = True
- continue
- if continue_rule:
- continue
-
- comment = data['comment']
-
- # Check the annotation to see if the annotation format is created by VYOS
- continue_rule = True
- for comment_prefix in ['SRC-NAT66-', 'DST-NAT66-']:
- if comment_prefix in comment:
- continue_rule = False
- if continue_rule:
- continue
-
- # When log is detected from the second index of expr, then this rule should be ignored
- if 'log' in data['expr'][2]:
- continue
-
- rule = comment.replace('SRC-NAT66-','')
- rule = rule.replace('DST-NAT66-','')
- chain = data['chain']
- if not ((args.source and chain == 'POSTROUTING') or (not args.source and chain == 'PREROUTING')):
- continue
- interface = dict_search('match.right', data['expr'][0])
- srcdest = dict_search('match.right.prefix.addr', data['expr'][2])
- if srcdest:
- addr_tmp = dict_search('match.right.prefix.len', data['expr'][2])
- if addr_tmp:
- srcdest = srcdest + '/' + str(addr_tmp)
- else:
- srcdest = dict_search('match.right', data['expr'][2])
-
- tran_addr_json = dict_search('snat.addr' if args.source else 'dnat.addr', data['expr'][3])
- if tran_addr_json:
- if isinstance(srcdest_json,str):
- tran_addr = tran_addr_json
-
- if 'prefix' in tran_addr_json:
- addr_tmp = dict_search('snat.addr.prefix.addr' if args.source else 'dnat.addr.prefix.addr', data['expr'][3])
- len_tmp = dict_search('snat.addr.prefix.len' if args.source else 'dnat.addr.prefix.len', data['expr'][3])
- if addr_tmp:
- tran_addr = addr_tmp + '/' + str(len_tmp)
- else:
- if 'masquerade' in data['expr'][3]:
- tran_addr = 'masquerade'
-
- print(format_nat66_rule.format(rule, srcdest, tran_addr, interface))
-
- exit(0)
-else:
- parser.print_help()
- exit(1)
-
diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py
index 049d59de7..0b1260912 100644
--- a/src/services/api/graphql/bindings.py
+++ b/src/services/api/graphql/bindings.py
@@ -17,6 +17,7 @@ import vyos.defaults
from . graphql.queries import query
from . graphql.mutations import mutation
from . graphql.directives import directives_dict
+from . graphql.errors import op_mode_error
from . utils.schema_from_op_mode import generate_op_mode_definitions
from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers
@@ -27,6 +28,6 @@ def generate_schema():
type_defs = load_schema_from_path(api_schema_dir)
- schema = make_executable_schema(type_defs, query, mutation, snake_case_fallback_resolvers, directives=directives_dict)
+ schema = make_executable_schema(type_defs, query, op_mode_error, mutation, snake_case_fallback_resolvers, directives=directives_dict)
return schema
diff --git a/src/services/api/graphql/graphql/errors.py b/src/services/api/graphql/graphql/errors.py
new file mode 100644
index 000000000..1066300e0
--- /dev/null
+++ b/src/services/api/graphql/graphql/errors.py
@@ -0,0 +1,8 @@
+
+from ariadne import InterfaceType
+
+op_mode_error = InterfaceType("OpModeError")
+
+@op_mode_error.type_resolver
+def resolve_op_mode_error(obj, *_):
+ return obj['name']
diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py
index 3e89fb239..1b77cff87 100644
--- a/src/services/api/graphql/graphql/mutations.py
+++ b/src/services/api/graphql/graphql/mutations.py
@@ -21,7 +21,9 @@ from makefun import with_signature
from .. import state
from .. import key_auth
-from api.graphql.recipes.session import Session
+from api.graphql.session.session import Session
+from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code
+from vyos.opmode import Error as OpModeError
mutation = ObjectType("Mutation")
@@ -71,7 +73,7 @@ def make_mutation_resolver(mutation_name, class_name, session_func):
# one may override the session functions with a local subclass
try:
- mod = import_module(f'api.graphql.recipes.{func_base_name}')
+ mod = import_module(f'api.graphql.session.override.{func_base_name}')
klass = getattr(mod, class_name)
except ImportError:
# otherwise, dynamically generate subclass to invoke subclass
@@ -86,10 +88,19 @@ def make_mutation_resolver(mutation_name, class_name, session_func):
"success": True,
"data": data
}
+ except OpModeError as e:
+ typename = type(e).__name__
+ return {
+ "success": False,
+ "errore": ['op_mode_error'],
+ "op_mode_error": {"name": f"{typename}",
+ "message": op_mode_err_msg.get(typename, "Unknown"),
+ "vyos_code": op_mode_err_code.get(typename, 9999)}
+ }
except Exception as error:
return {
"success": False,
- "errors": [str(error)]
+ "errors": [repr(error)]
}
return func_impl
diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py
index f6544709e..8ae61b704 100644
--- a/src/services/api/graphql/graphql/queries.py
+++ b/src/services/api/graphql/graphql/queries.py
@@ -21,7 +21,9 @@ from makefun import with_signature
from .. import state
from .. import key_auth
-from api.graphql.recipes.session import Session
+from api.graphql.session.session import Session
+from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code
+from vyos.opmode import Error as OpModeError
query = ObjectType("Query")
@@ -71,7 +73,7 @@ def make_query_resolver(query_name, class_name, session_func):
# one may override the session functions with a local subclass
try:
- mod = import_module(f'api.graphql.recipes.{func_base_name}')
+ mod = import_module(f'api.graphql.session.override.{func_base_name}')
klass = getattr(mod, class_name)
except ImportError:
# otherwise, dynamically generate subclass to invoke subclass
@@ -86,10 +88,19 @@ def make_query_resolver(query_name, class_name, session_func):
"success": True,
"data": data
}
+ except OpModeError as e:
+ typename = type(e).__name__
+ return {
+ "success": False,
+ "errors": ['op_mode_error'],
+ "op_mode_error": {"name": f"{typename}",
+ "message": op_mode_err_msg.get(typename, "Unknown"),
+ "vyos_code": op_mode_err_code.get(typename, 9999)}
+ }
except Exception as error:
return {
"success": False,
- "errors": [str(error)]
+ "errors": [repr(error)]
}
return func_impl
diff --git a/src/services/api/graphql/recipes/__init__.py b/src/services/api/graphql/session/__init__.py
index e69de29bb..e69de29bb 100644
--- a/src/services/api/graphql/recipes/__init__.py
+++ b/src/services/api/graphql/session/__init__.py
diff --git a/src/services/api/graphql/recipes/queries/system_status.py b/src/services/api/graphql/session/composite/system_status.py
index 8dadcc9f3..8dadcc9f3 100755
--- a/src/services/api/graphql/recipes/queries/system_status.py
+++ b/src/services/api/graphql/session/composite/system_status.py
diff --git a/src/services/api/graphql/session/errors/op_mode_errors.py b/src/services/api/graphql/session/errors/op_mode_errors.py
new file mode 100644
index 000000000..7ba75455d
--- /dev/null
+++ b/src/services/api/graphql/session/errors/op_mode_errors.py
@@ -0,0 +1,13 @@
+
+
+op_mode_err_msg = {
+ "UnconfiguredSubsystem": "subsystem is not configured or not running",
+ "DataUnavailable": "data currently unavailable",
+ "PermissionDenied": "client does not have permission"
+}
+
+op_mode_err_code = {
+ "UnconfiguredSubsystem": 2000,
+ "DataUnavailable": 2001,
+ "PermissionDenied": 1003
+}
diff --git a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py b/src/services/api/graphql/session/override/remove_firewall_address_group_members.py
index b91932e14..b91932e14 100644
--- a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py
+++ b/src/services/api/graphql/session/override/remove_firewall_address_group_members.py
diff --git a/src/services/api/graphql/recipes/session.py b/src/services/api/graphql/session/session.py
index ac185beb7..93e1c328e 100644
--- a/src/services/api/graphql/recipes/session.py
+++ b/src/services/api/graphql/session/session.py
@@ -22,6 +22,7 @@ from vyos.config import Config
from vyos.configtree import ConfigTree
from vyos.defaults import directories
from vyos.template import render
+from vyos.opmode import Error as OpModeError
from api.graphql.utils.util import load_op_mode_as_module, split_compound_op_mode_name
@@ -149,7 +150,7 @@ class Session:
return res
def system_status(self):
- import api.graphql.recipes.queries.system_status as system_status
+ import api.graphql.session.composite.system_status as system_status
session = self._session
data = self._data
@@ -177,10 +178,10 @@ class Session:
mod = load_op_mode_as_module(f'{scriptname}')
func = getattr(mod, func_name)
- if len(list(data)) > 0:
+ try:
res = func(True, **data)
- else:
- res = func(True)
+ except OpModeError as e:
+ raise e
return res
@@ -199,9 +200,9 @@ class Session:
mod = load_op_mode_as_module(f'{scriptname}')
func = getattr(mod, func_name)
- if len(list(data)) > 0:
+ try:
res = func(**data)
- else:
- res = func()
+ except OpModeError as e:
+ raise e
return res
diff --git a/src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl b/src/services/api/graphql/session/templates/create_dhcp_server.tmpl
index 70de43183..70de43183 100644
--- a/src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl
+++ b/src/services/api/graphql/session/templates/create_dhcp_server.tmpl
diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl b/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl
index a890d0086..a890d0086 100644
--- a/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl
+++ b/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl
diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl b/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl
index e9b660722..e9b660722 100644
--- a/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl
+++ b/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl
diff --git a/src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl b/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl
index d9d7ed691..d9d7ed691 100644
--- a/src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl
+++ b/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl
diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl b/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl
index 458f3e5fc..458f3e5fc 100644
--- a/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl
diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl
index 0efa0b226..0efa0b226 100644
--- a/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl
diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl b/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl
index f56c61231..f56c61231 100644
--- a/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl
diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl
index f98a5517c..f98a5517c 100644
--- a/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl
diff --git a/src/services/api/graphql/utils/schema_from_op_mode.py b/src/services/api/graphql/utils/schema_from_op_mode.py
index d27586747..379d15250 100755
--- a/src/services/api/graphql/utils/schema_from_op_mode.py
+++ b/src/services/api/graphql/utils/schema_from_op_mode.py
@@ -21,17 +21,21 @@
import os
import json
import typing
-from inspect import signature, getmembers, isfunction
+from inspect import signature, getmembers, isfunction, isclass, getmro
from jinja2 import Template
from vyos.defaults import directories
-from . util import load_as_module, is_op_mode_function_name, is_show_function_name
+if __package__ is None or __package__ == '':
+ from util import load_as_module, is_op_mode_function_name, is_show_function_name
+else:
+ from . util import load_as_module, is_op_mode_function_name, is_show_function_name
OP_MODE_PATH = directories['op_mode']
SCHEMA_PATH = directories['api_schema']
DATA_DIR = directories['data']
op_mode_include_file = os.path.join(DATA_DIR, 'op-mode-standardized.json')
+op_mode_error_schema = 'op_mode_error.graphql'
schema_data: dict = {'schema_name': '',
'schema_fields': []}
@@ -50,6 +54,7 @@ type {{ schema_name }} {
type {{ schema_name }}Result {
data: {{ schema_name }}
+ op_mode_error: OpModeError
success: Boolean!
errors: [String]
}
@@ -73,6 +78,7 @@ type {{ schema_name }} {
type {{ schema_name }}Result {
data: {{ schema_name }}
+ op_mode_error: OpModeError
success: Boolean!
errors: [String]
}
@@ -82,6 +88,21 @@ extend type Mutation {
}
"""
+error_template = """
+interface OpModeError {
+ name: String!
+ message: String!
+ vyos_code: Int!
+}
+{% for name in error_names %}
+type {{ name }} implements OpModeError {
+ name: String!
+ message: String!
+ vyos_code: Int!
+}
+{%- endfor %}
+"""
+
def _snake_to_pascal_case(name: str) -> str:
res = ''.join(map(str.title, name.split('_')))
return res
@@ -133,7 +154,30 @@ def create_schema(func_name: str, base_name: str, func: callable) -> str:
return res
+def create_error_schema():
+ from vyos import opmode
+
+ e = Exception
+ err_types = getmembers(opmode, isclass)
+ err_types = [k for k in err_types if issubclass(k[1], e)]
+ # drop base class, to be replaced by interface type. Find the class
+ # programmatically, in case the base class name changes.
+ for i in range(len(err_types)):
+ if err_types[i][1] in getmro(err_types[i-1][1]):
+ del err_types[i]
+ break
+ err_names = [k[0] for k in err_types]
+ error_data = {'error_names': err_names}
+ j2_template = Template(error_template)
+ res = j2_template.render(error_data)
+
+ return res
+
def generate_op_mode_definitions():
+ out = create_error_schema()
+ with open(f'{SCHEMA_PATH}/{op_mode_error_schema}', 'w') as f:
+ f.write(out)
+
with open(op_mode_include_file) as f:
op_mode_files = json.load(f)
diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server
index af8837e1e..190f3409d 100755
--- a/src/services/vyos-http-api-server
+++ b/src/services/vyos-http-api-server
@@ -678,6 +678,7 @@ if __name__ == '__main__':
server_config = load_server_config()
except Exception as err:
logger.critical(f"Failed to load the HTTP API server config: {err}")
+ sys.exit(1)
config_session = ConfigSession(os.getpid())
diff --git a/src/system/keepalived-fifo.py b/src/system/keepalived-fifo.py
index a8df232ae..a0fccd1d0 100755
--- a/src/system/keepalived-fifo.py
+++ b/src/system/keepalived-fifo.py
@@ -30,6 +30,7 @@ from vyos.ifconfig.vrrp import VRRP
from vyos.configquery import ConfigTreeQuery
from vyos.util import cmd
from vyos.util import dict_search
+from vyos.util import commit_in_progress
# configure logging
logger = logging.getLogger(__name__)
@@ -63,6 +64,17 @@ class KeepalivedFifo:
# load configuration
def _config_load(self):
+ # For VRRP configuration to be read, the commit must be finished
+ count = 1
+ while commit_in_progress():
+ if ( count <= 40 ):
+ logger.debug(f'commit in progress try: {count}')
+ else:
+ logger.error(f'commit still in progress after {count} continuing anyway')
+ break
+ count += 1
+ time.sleep(0.5)
+
try:
base = ['high-availability', 'vrrp']
conf = ConfigTreeQuery()
diff --git a/src/systemd/telegraf.service b/src/systemd/telegraf.service
new file mode 100644
index 000000000..553942ac6
--- /dev/null
+++ b/src/systemd/telegraf.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=The plugin-driven server agent for reporting metrics into InfluxDB
+Documentation=https://github.com/influxdata/telegraf
+After=network.target
+
+[Service]
+EnvironmentFile=-/etc/default/telegraf
+ExecStart=/usr/bin/telegraf --config /run/telegraf/vyos-telegraf.conf --config-directory /etc/telegraf/telegraf.d
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=on-failure
+RestartForceExitStatus=SIGPIPE
+KillMode=control-group
+
+[Install]
+WantedBy=multi-user.target