summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/activation-scripts/20-ethernet_offload.py103
-rwxr-xr-xsrc/completion/list_esi.sh20
-rwxr-xr-xsrc/completion/list_vni.sh20
-rw-r--r--src/completion/qos/list_traffic_match_group.py35
-rwxr-xr-xsrc/conf_mode/container.py21
-rwxr-xr-xsrc/conf_mode/interfaces_openvpn.py8
-rwxr-xr-xsrc/conf_mode/interfaces_tunnel.py19
-rwxr-xr-xsrc/conf_mode/load-balancing_reverse-proxy.py86
-rwxr-xr-xsrc/conf_mode/nat.py18
-rwxr-xr-xsrc/conf_mode/nat64.py10
-rwxr-xr-xsrc/conf_mode/nat66.py22
-rwxr-xr-xsrc/conf_mode/nat_cgnat.py125
-rwxr-xr-xsrc/conf_mode/protocols_bfd.py2
-rwxr-xr-xsrc/conf_mode/qos.py77
-rwxr-xr-xsrc/conf_mode/service_dhcpv6-server.py8
-rwxr-xr-xsrc/conf_mode/service_dns_forwarding.py15
-rwxr-xr-xsrc/conf_mode/service_suricata.py161
-rwxr-xr-xsrc/conf_mode/service_upnp.py157
-rw-r--r--src/etc/systemd/system/suricata.service.d/10-override.conf9
-rwxr-xr-xsrc/helpers/run-config-activation.py83
-rwxr-xr-xsrc/init/vyos-router23
-rwxr-xr-xsrc/migration-scripts/nat/7-to-862
-rwxr-xr-xsrc/migration-scripts/reverse-proxy/0-to-148
-rwxr-xr-xsrc/op_mode/cgnat.py96
-rw-r--r--src/op_mode/evpn.py46
-rwxr-xr-xsrc/op_mode/ikev2_profile_generator.py19
-rwxr-xr-xsrc/op_mode/image_installer.py34
-rwxr-xr-xsrc/op_mode/nat.py35
-rwxr-xr-xsrc/op_mode/pki.py13
-rwxr-xr-xsrc/op_mode/snmp_v3.py3
-rwxr-xr-xsrc/op_mode/version.py6
-rwxr-xr-xsrc/services/vyos-http-api-server46
-rw-r--r--src/systemd/miniupnpd.service13
-rwxr-xr-xsrc/validators/port-range-exclude7
34 files changed, 1117 insertions, 333 deletions
diff --git a/src/activation-scripts/20-ethernet_offload.py b/src/activation-scripts/20-ethernet_offload.py
new file mode 100755
index 000000000..33b0ea469
--- /dev/null
+++ b/src/activation-scripts/20-ethernet_offload.py
@@ -0,0 +1,103 @@
+# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+# T3619: mirror Linux Kernel defaults for ethernet offloading options into VyOS
+# CLI. See https://vyos.dev/T3619#102254 for all the details.
+# T3787: Remove deprecated UDP fragmentation offloading option
+# T6006: add to activation-scripts: migration-scripts/interfaces/20-to-21
+
+from vyos.ethtool import Ethtool
+from vyos.configtree import ConfigTree
+
+def activate(config: ConfigTree):
+ base = ['interfaces', 'ethernet']
+
+ if not config.exists(base):
+ return
+
+ for ifname in config.list_nodes(base):
+ eth = Ethtool(ifname)
+
+ # If GRO is enabled by the Kernel - we reflect this on the CLI. If GRO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'gro'])
+ enabled, fixed = eth.get_generic_receive_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'gro'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'gro'])
+
+ # If GSO is enabled by the Kernel - we reflect this on the CLI. If GSO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'gso'])
+ enabled, fixed = eth.get_generic_segmentation_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'gso'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'gso'])
+
+ # If LRO is enabled by the Kernel - we reflect this on the CLI. If LRO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'lro'])
+ enabled, fixed = eth.get_large_receive_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'lro'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'lro'])
+
+ # If SG is enabled by the Kernel - we reflect this on the CLI. If SG is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'sg'])
+ enabled, fixed = eth.get_scatter_gather()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'sg'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'sg'])
+
+ # If TSO is enabled by the Kernel - we reflect this on the CLI. If TSO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'tso'])
+ enabled, fixed = eth.get_tcp_segmentation_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'tso'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'tso'])
+
+ # Remove deprecated UDP fragmentation offloading option
+ if config.exists(base + [ifname, 'offload', 'ufo']):
+ config.delete(base + [ifname, 'offload', 'ufo'])
+
+ # Also while processing the interface configuration, not all adapters support
+ # changing the speed and duplex settings. If the desired speed and duplex
+ # values do not work for the NIC driver, we change them back to the default
+ # value of "auto" - which will be applied if the CLI node is deleted.
+ speed_path = base + [ifname, 'speed']
+ duplex_path = base + [ifname, 'duplex']
+ # speed and duplex must always be set at the same time if not set to "auto"
+ if config.exists(speed_path) and config.exists(duplex_path):
+ speed = config.return_value(speed_path)
+ duplex = config.return_value(duplex_path)
+ if speed != 'auto' and duplex != 'auto':
+ if not eth.check_speed_duplex(speed, duplex):
+ config.delete(speed_path)
+ config.delete(duplex_path)
+
+ # Also while processing the interface configuration, not all adapters support
+ # changing disabling flow-control - or change this setting. If disabling
+ # flow-control is not supported by the NIC, we remove the setting from CLI
+ flow_control_path = base + [ifname, 'disable-flow-control']
+ if config.exists(flow_control_path):
+ if not eth.check_flow_control():
+ config.delete(flow_control_path)
diff --git a/src/completion/list_esi.sh b/src/completion/list_esi.sh
new file mode 100755
index 000000000..b8373fa57
--- /dev/null
+++ b/src/completion/list_esi.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This script is completion helper to list all valid ESEs that are visible to FRR
+
+esiJson=$(vtysh -c 'show evpn es json')
+echo "$(echo "$esiJson" | jq -r '.[] | .esi')"
diff --git a/src/completion/list_vni.sh b/src/completion/list_vni.sh
new file mode 100755
index 000000000..f8bd4a993
--- /dev/null
+++ b/src/completion/list_vni.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This script is completion helper to list all configured VNIs that are visible to FRR
+
+vniJson=$(vtysh -c 'show evpn vni json')
+echo "$(echo "$vniJson" | jq -r 'keys | .[]')"
diff --git a/src/completion/qos/list_traffic_match_group.py b/src/completion/qos/list_traffic_match_group.py
new file mode 100644
index 000000000..015d7ada9
--- /dev/null
+++ b/src/completion/qos/list_traffic_match_group.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from vyos.config import Config
+
+
+def get_qos_traffic_match_group():
+ config = Config()
+ base = ['qos', 'traffic-match-group']
+ conf = config.get_config_dict(base, key_mangling=('-', '_'))
+ groups = []
+
+ for group in conf.get('traffic_match_group', []):
+ groups.append(group)
+
+ return groups
+
+
+if __name__ == "__main__":
+ groups = get_qos_traffic_match_group()
+ print(" ".join(groups))
+
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py
index a73a18ffa..3efeb9b40 100755
--- a/src/conf_mode/container.py
+++ b/src/conf_mode/container.py
@@ -16,6 +16,7 @@
import os
+from decimal import Decimal
from hashlib import sha256
from ipaddress import ip_address
from ipaddress import ip_network
@@ -28,6 +29,7 @@ from vyos.configdict import node_changed
from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.ifconfig import Interface
+from vyos.cpu import get_core_count
from vyos.utils.file import write_file
from vyos.utils.process import call
from vyos.utils.process import cmd
@@ -127,6 +129,11 @@ def verify(container):
f'locally. Please use "add container image {image}" to add it '\
f'to the system! Container "{name}" will not be started!')
+ if 'cpu_quota' in container_config:
+ cores = get_core_count()
+ if Decimal(container_config['cpu_quota']) > cores:
+ raise ConfigError(f'Cannot set limit to more cores than available "{name}"!')
+
if 'network' in container_config:
if len(container_config['network']) > 1:
raise ConfigError(f'Only one network can be specified for container "{name}"!')
@@ -257,6 +264,7 @@ def verify(container):
def generate_run_arguments(name, container_config):
image = container_config['image']
+ cpu_quota = container_config['cpu_quota']
memory = container_config['memory']
shared_memory = container_config['shared_memory']
restart = container_config['restart']
@@ -329,9 +337,13 @@ def generate_run_arguments(name, container_config):
prop = vol_config['propagation']
volume += f' --volume {svol}:{dvol}:{mode},{prop}'
- container_base_cmd = f'--detach --interactive --tty --replace {capabilities} ' \
+ host_pid = ''
+ if 'allow_host_pid' in container_config:
+ host_pid = '--pid host'
+
+ container_base_cmd = f'--detach --interactive --tty --replace {capabilities} --cpus {cpu_quota} ' \
f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \
- f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid}'
+ f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid} {host_pid}'
entrypoint = ''
if 'entrypoint' in container_config:
@@ -339,11 +351,6 @@ def generate_run_arguments(name, container_config):
entrypoint = json_write(container_config['entrypoint'].split()).replace('"', "&quot;")
entrypoint = f'--entrypoint &apos;{entrypoint}&apos;'
- hostname = ''
- if 'host_name' in container_config:
- hostname = container_config['host_name']
- hostname = f'--hostname {hostname}'
-
command = ''
if 'command' in container_config:
command = container_config['command'].strip()
diff --git a/src/conf_mode/interfaces_openvpn.py b/src/conf_mode/interfaces_openvpn.py
index 0ecffd3be..627cc90ba 100755
--- a/src/conf_mode/interfaces_openvpn.py
+++ b/src/conf_mode/interfaces_openvpn.py
@@ -168,6 +168,14 @@ def verify_pki(openvpn):
'verification, consult the documentation for details.')
if tls:
+ if mode == 'site-to-site':
+ # XXX: site-to-site with PSKs is the only mode that can work without TLS,
+ # so 'tls role' is not mandatory for it,
+ # but we need to check that if it uses peer certificate fingerprints rather than PSKs,
+ # then the TLS role is set
+ if ('shared_secret_key' not in tls) and ('role' not in tls):
+ raise ConfigError('"tls role" is required for site-to-site OpenVPN with TLS')
+
if (mode in ['server', 'client']) and ('ca_certificate' not in tls):
raise ConfigError(f'Must specify "tls ca-certificate" on openvpn interface {interface},\
it is required in server and client modes')
diff --git a/src/conf_mode/interfaces_tunnel.py b/src/conf_mode/interfaces_tunnel.py
index 43ba72857..98ef98d12 100755
--- a/src/conf_mode/interfaces_tunnel.py
+++ b/src/conf_mode/interfaces_tunnel.py
@@ -145,11 +145,20 @@ def verify(tunnel):
# If no IP GRE key is defined we can not have more then one GRE tunnel
# bound to any one interface/IP address and the same remote. This will
# result in a OS PermissionError: add tunnel "gre0" failed: File exists
- if (their_address == our_address or our_source_if == their_source_if) and \
- our_remote == their_remote:
- raise ConfigError(f'Missing required "ip key" parameter when '\
- 'running more then one GRE based tunnel on the '\
- 'same source-interface/source-address')
+ if our_remote == their_remote:
+ if our_address is not None and their_address == our_address:
+ # If set to the same values, this is always a fail
+ raise ConfigError(f'Missing required "ip key" parameter when '\
+ 'running more then one GRE based tunnel on the '\
+ 'same source-address')
+
+ if their_source_if == our_source_if and their_address == our_address:
+ # Note that lack of None check on these is deliberate.
+ # source-if and source-ip matching while unset (all None) is a fail
+ # source-ifs set and matching with unset source-ips is a fail
+ raise ConfigError(f'Missing required "ip key" parameter when '\
+ 'running more then one GRE based tunnel on the '\
+ 'same source-interface')
# Keys are not allowed with ipip and sit tunnels
if tunnel['encapsulation'] in ['ipip', 'sit']:
diff --git a/src/conf_mode/load-balancing_reverse-proxy.py b/src/conf_mode/load-balancing_reverse-proxy.py
index 1569d8d71..09c68dadd 100755
--- a/src/conf_mode/load-balancing_reverse-proxy.py
+++ b/src/conf_mode/load-balancing_reverse-proxy.py
@@ -26,9 +26,13 @@ from vyos.utils.dict import dict_search
from vyos.utils.process import call
from vyos.utils.network import check_port_availability
from vyos.utils.network import is_listen_port_bind_service
-from vyos.pki import wrap_certificate
-from vyos.pki import wrap_private_key
+from vyos.pki import find_chain
+from vyos.pki import load_certificate
+from vyos.pki import load_private_key
+from vyos.pki import encode_certificate
+from vyos.pki import encode_private_key
from vyos.template import render
+from vyos.utils.file import write_file
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -75,12 +79,21 @@ def verify(lb):
raise ConfigError(f'"TCP" port "{tmp_port}" is used by another service')
for back, back_config in lb['backend'].items():
- if 'http-check' in back_config:
- http_check = back_config['http-check']
+ if 'http_check' in back_config:
+ http_check = back_config['http_check']
if 'expect' in http_check and 'status' in http_check['expect'] and 'string' in http_check['expect']:
raise ConfigError(f'"expect status" and "expect string" can not be configured together!')
+
+ if 'health_check' in back_config:
+ if 'mode' not in back_config or back_config['mode'] != 'tcp':
+ raise ConfigError(f'backend "{back}" can only be configured with {back_config["health_check"]} ' +
+ f'health-check whilst in TCP mode!')
+ if 'http_check' in back_config:
+ raise ConfigError(f'backend "{back}" cannot be configured with both http-check and health-check!')
+
if 'server' not in back_config:
raise ConfigError(f'"{back} server" must be configured!')
+
for bk_server, bk_server_conf in back_config['server'].items():
if 'address' not in bk_server_conf or 'port' not in bk_server_conf:
raise ConfigError(f'"backend {back} server {bk_server} address and port" must be configured!')
@@ -92,12 +105,18 @@ def verify(lb):
if {'no_verify', 'ca_certificate'} <= set(back_config['ssl']):
raise ConfigError(f'backend {back} cannot have both ssl options no-verify and ca-certificate set!')
+ # Check if http-response-headers are configured in any frontend/backend where mode != http
+ for group in ['service', 'backend']:
+ for config_name, config in lb[group].items():
+ if 'http_response_headers' in config and ('mode' not in config or config['mode'] != 'http'):
+ raise ConfigError(f'{group} {config_name} must be set to http mode to use http_response_headers!')
+
for front, front_config in lb['service'].items():
for cert in dict_search('ssl.certificate', front_config) or []:
verify_pki_certificate(lb, cert)
for back, back_config in lb['backend'].items():
- tmp = dict_search('ssl.ca_certificate', front_config)
+ tmp = dict_search('ssl.ca_certificate', back_config)
if tmp: verify_pki_ca_certificate(lb, tmp)
@@ -118,51 +137,54 @@ def generate(lb):
if not os.path.isdir(load_balancing_dir):
os.mkdir(load_balancing_dir)
+ loaded_ca_certs = {load_certificate(c['certificate'])
+ for c in lb['pki']['ca'].values()} if 'ca' in lb['pki'] else {}
+
# SSL Certificates for frontend
for front, front_config in lb['service'].items():
- if 'ssl' in front_config:
-
- if 'certificate' in front_config['ssl']:
- cert_names = front_config['ssl']['certificate']
+ if 'ssl' not in front_config:
+ continue
- for cert_name in cert_names:
- pki_cert = lb['pki']['certificate'][cert_name]
- cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem')
- cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key')
+ if 'certificate' in front_config['ssl']:
+ cert_names = front_config['ssl']['certificate']
- with open(cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_cert['certificate']))
+ for cert_name in cert_names:
+ pki_cert = lb['pki']['certificate'][cert_name]
+ cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem')
+ cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key')
- if 'private' in pki_cert and 'key' in pki_cert['private']:
- with open(cert_key_path, 'w') as f:
- f.write(wrap_private_key(pki_cert['private']['key']))
+ loaded_pki_cert = load_certificate(pki_cert['certificate'])
+ cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
- if 'ca_certificate' in front_config['ssl']:
- ca_name = front_config['ssl']['ca_certificate']
- pki_ca_cert = lb['pki']['ca'][ca_name]
- ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
+ write_file(cert_file_path,
+ '\n'.join(encode_certificate(c) for c in cert_full_chain))
- with open(ca_cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_ca_cert['certificate']))
+ if 'private' in pki_cert and 'key' in pki_cert['private']:
+ loaded_key = load_private_key(pki_cert['private']['key'], passphrase=None, wrap_tags=True)
+ key_pem = encode_private_key(loaded_key, passphrase=None)
+ write_file(cert_key_path, key_pem)
# SSL Certificates for backend
for back, back_config in lb['backend'].items():
- if 'ssl' in back_config:
+ if 'ssl' not in back_config:
+ continue
- if 'ca_certificate' in back_config['ssl']:
- ca_name = back_config['ssl']['ca_certificate']
- pki_ca_cert = lb['pki']['ca'][ca_name]
- ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
+ if 'ca_certificate' in back_config['ssl']:
+ ca_name = back_config['ssl']['ca_certificate']
+ ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
+ ca_chains = []
- with open(ca_cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_ca_cert['certificate']))
+ pki_ca_cert = lb['pki']['ca'][ca_name]
+ loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
+ ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
+ ca_chains.append('\n'.join(encode_certificate(c) for c in ca_full_chain))
+ write_file(ca_cert_file_path, '\n'.join(ca_chains))
render(load_balancing_conf_file, 'load-balancing/haproxy.cfg.j2', lb)
render(systemd_override, 'load-balancing/override_haproxy.conf.j2', lb)
return None
-
def apply(lb):
call('systemctl daemon-reload')
if not lb:
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 4cd9b570d..f74bb217e 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -17,7 +17,6 @@
import os
from sys import exit
-from netifaces import interfaces
from vyos.base import Warning
from vyos.config import Config
@@ -30,6 +29,7 @@ from vyos.utils.dict import dict_search_args
from vyos.utils.process import cmd
from vyos.utils.process import run
from vyos.utils.network import is_addr_assigned
+from vyos.utils.network import interface_exists
from vyos import ConfigError
from vyos import airbag
@@ -149,8 +149,12 @@ def verify(nat):
if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
elif 'name' in config['outbound_interface']:
- if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
- Warning(f'NAT interface "{config["outbound_interface"]["name"]}" for source NAT rule "{rule}" does not exist!')
+ interface_name = config['outbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for source NAT rule "{rule}" does not exist!')
else:
group_name = config['outbound_interface']['group']
if group_name[0] == '!':
@@ -182,8 +186,12 @@ def verify(nat):
if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
elif 'name' in config['inbound_interface']:
- if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
- Warning(f'NAT interface "{config["inbound_interface"]["name"]}" for destination NAT rule "{rule}" does not exist!')
+ interface_name = config['inbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for destination NAT rule "{rule}" does not exist!')
else:
group_name = config['inbound_interface']['group']
if group_name[0] == '!':
diff --git a/src/conf_mode/nat64.py b/src/conf_mode/nat64.py
index c1e7ebf85..32a1c47d1 100755
--- a/src/conf_mode/nat64.py
+++ b/src/conf_mode/nat64.py
@@ -20,7 +20,7 @@ import csv
import os
import re
-from ipaddress import IPv6Network
+from ipaddress import IPv6Network, IPv6Address
from json import dumps as json_write
from vyos import ConfigError
@@ -103,8 +103,14 @@ def verify(nat64) -> None:
# Verify that source.prefix is set and is a /96
if not dict_search("source.prefix", instance):
raise ConfigError(f"Source NAT64 rule {rule} missing source prefix")
- if IPv6Network(instance["source"]["prefix"]).prefixlen != 96:
+ src_prefix = IPv6Network(instance["source"]["prefix"])
+ if src_prefix.prefixlen != 96:
raise ConfigError(f"Source NAT64 rule {rule} source prefix must be /96")
+ if (int(src_prefix[0]) & int(IPv6Address('0:0:0:0:ff00::'))) != 0:
+ raise ConfigError(
+ f'Source NAT64 rule {rule} source prefix is not RFC6052-compliant: '
+ 'bits 64 to 71 (9th octet) must be zeroed'
+ )
pools = dict_search("translation.pool", instance)
if pools:
diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py
index fe017527d..075738dad 100755
--- a/src/conf_mode/nat66.py
+++ b/src/conf_mode/nat66.py
@@ -17,15 +17,15 @@
import os
from sys import exit
-from netifaces import interfaces
from vyos.base import Warning
from vyos.config import Config
from vyos.configdep import set_dependents, call_dependents
from vyos.template import render
-from vyos.utils.process import cmd
-from vyos.utils.kernel import check_kmod
from vyos.utils.dict import dict_search
+from vyos.utils.kernel import check_kmod
+from vyos.utils.network import interface_exists
+from vyos.utils.process import cmd
from vyos.template import is_ipv6
from vyos import ConfigError
from vyos import airbag
@@ -64,8 +64,12 @@ def verify(nat):
if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
elif 'name' in config['outbound_interface']:
- if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
- Warning(f'NAT66 interface "{config["outbound_interface"]["name"]}" for source NAT66 rule "{rule}" does not exist!')
+ interface_name = config['outbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for source NAT66 rule "{rule}" does not exist!')
addr = dict_search('translation.address', config)
if addr != None:
@@ -88,8 +92,12 @@ def verify(nat):
if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
elif 'name' in config['inbound_interface']:
- if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
- Warning(f'NAT66 interface "{config["inbound_interface"]["name"]}" for destination NAT66 rule "{rule}" does not exist!')
+ interface_name = config['inbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for destination NAT66 rule "{rule}" does not exist!')
return None
diff --git a/src/conf_mode/nat_cgnat.py b/src/conf_mode/nat_cgnat.py
index f41d66c66..957b12c28 100755
--- a/src/conf_mode/nat_cgnat.py
+++ b/src/conf_mode/nat_cgnat.py
@@ -189,11 +189,6 @@ def verify(config):
if 'rule' not in config:
raise ConfigError(f'Rule must be defined!')
- # As PoC allow only one rule for CGNAT translations
- # one internal pool and one external pool
- if len(config['rule']) > 1:
- raise ConfigError(f'Only one rule is allowed for translations!')
-
for pool in ('external', 'internal'):
if pool not in config['pool']:
raise ConfigError(f'{pool} pool must be defined!')
@@ -203,6 +198,13 @@ def verify(config):
f'Range for "{pool} pool {pool_name}" must be defined!'
)
+ external_pools_query = "keys(pool.external)"
+ external_pools: list = jmespath.search(external_pools_query, config)
+ internal_pools_query = "keys(pool.internal)"
+ internal_pools: list = jmespath.search(internal_pools_query, config)
+
+ used_external_pools = {}
+ used_internal_pools = {}
for rule, rule_config in config['rule'].items():
if 'source' not in rule_config:
raise ConfigError(f'Rule "{rule}" source pool must be defined!')
@@ -212,49 +214,86 @@ def verify(config):
if 'translation' not in rule_config:
raise ConfigError(f'Rule "{rule}" translation pool must be defined!')
+ # Check if pool exists
+ internal_pool = rule_config['source']['pool']
+ if internal_pool not in internal_pools:
+ raise ConfigError(f'Internal pool "{internal_pool}" does not exist!')
+ external_pool = rule_config['translation']['pool']
+ if external_pool not in external_pools:
+ raise ConfigError(f'External pool "{external_pool}" does not exist!')
+
+ # Check pool duplication in different rules
+ if external_pool in used_external_pools:
+ raise ConfigError(
+ f'External pool "{external_pool}" is already used in rule '
+ f'{used_external_pools[external_pool]} and cannot be used in '
+ f'rule {rule}!'
+ )
+
+ if internal_pool in used_internal_pools:
+ raise ConfigError(
+ f'Internal pool "{internal_pool}" is already used in rule '
+ f'{used_internal_pools[internal_pool]} and cannot be used in '
+ f'rule {rule}!'
+ )
+
+ used_external_pools[external_pool] = rule
+ used_internal_pools[internal_pool] = rule
+
def generate(config):
if not config:
return None
- # first external pool as we allow only one as PoC
- ext_pool_name = jmespath.search("rule.*.translation | [0]", config).get('pool')
- int_pool_name = jmespath.search("rule.*.source | [0]", config).get('pool')
- ext_query = f"pool.external.{ext_pool_name}.range | keys(@)"
- int_query = f"pool.internal.{int_pool_name}.range"
- external_ranges = jmespath.search(ext_query, config)
- internal_ranges = [jmespath.search(int_query, config)]
-
- external_list_count = []
- external_list_hosts = []
- internal_list_count = []
- internal_list_hosts = []
- for ext_range in external_ranges:
- # External hosts count
- e_count = IPOperations(ext_range).get_ips_count()
- external_list_count.append(e_count)
- # External hosts list
- e_hosts = IPOperations(ext_range).convert_prefix_to_list_ips()
- external_list_hosts.extend(e_hosts)
- for int_range in internal_ranges:
- # Internal hosts count
- i_count = IPOperations(int_range).get_ips_count()
- internal_list_count.append(i_count)
- # Internal hosts list
- i_hosts = IPOperations(int_range).convert_prefix_to_list_ips()
- internal_list_hosts.extend(i_hosts)
-
- external_host_count = sum(external_list_count)
- internal_host_count = sum(internal_list_count)
- ports_per_user = int(
- jmespath.search(f'pool.external.{ext_pool_name}.per_user_limit.port', config)
- )
- external_port_range: str = jmespath.search(
- f'pool.external.{ext_pool_name}.external_port_range', config
- )
- proto_maps, other_maps = generate_port_rules(
- external_list_hosts, internal_list_hosts, ports_per_user, external_port_range
- )
+ proto_maps = []
+ other_maps = []
+
+ for rule, rule_config in config['rule'].items():
+ ext_pool_name: str = rule_config['translation']['pool']
+ int_pool_name: str = rule_config['source']['pool']
+
+ # Sort the external ranges by sequence
+ external_ranges: list = sorted(
+ config['pool']['external'][ext_pool_name]['range'],
+ key=lambda r: int(config['pool']['external'][ext_pool_name]['range'][r].get('seq', 999999))
+ )
+ internal_ranges: list = [range for range in config['pool']['internal'][int_pool_name]['range']]
+ external_list_hosts_count = []
+ external_list_hosts = []
+ internal_list_hosts_count = []
+ internal_list_hosts = []
+
+ for ext_range in external_ranges:
+ # External hosts count
+ e_count = IPOperations(ext_range).get_ips_count()
+ external_list_hosts_count.append(e_count)
+ # External hosts list
+ e_hosts = IPOperations(ext_range).convert_prefix_to_list_ips()
+ external_list_hosts.extend(e_hosts)
+
+ for int_range in internal_ranges:
+ # Internal hosts count
+ i_count = IPOperations(int_range).get_ips_count()
+ internal_list_hosts_count.append(i_count)
+ # Internal hosts list
+ i_hosts = IPOperations(int_range).convert_prefix_to_list_ips()
+ internal_list_hosts.extend(i_hosts)
+
+ external_host_count = sum(external_list_hosts_count)
+ internal_host_count = sum(internal_list_hosts_count)
+ ports_per_user = int(
+ jmespath.search(f'pool.external."{ext_pool_name}".per_user_limit.port', config)
+ )
+ external_port_range: str = jmespath.search(
+ f'pool.external."{ext_pool_name}".external_port_range', config
+ )
+
+ rule_proto_maps, rule_other_maps = generate_port_rules(
+ external_list_hosts, internal_list_hosts, ports_per_user, external_port_range
+ )
+
+ proto_maps.extend(rule_proto_maps)
+ other_maps.extend(rule_other_maps)
config['proto_map_elements'] = ', '.join(proto_maps)
config['other_map_elements'] = ', '.join(other_maps)
diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py
index 1c01a9013..1361bb1a9 100755
--- a/src/conf_mode/protocols_bfd.py
+++ b/src/conf_mode/protocols_bfd.py
@@ -49,7 +49,7 @@ def verify(bfd):
for peer, peer_config in bfd['peer'].items():
# IPv6 link local peers require an explicit local address/interface
if is_ipv6_link_local(peer):
- if 'source' not in peer_config or len(peer_config['source'] < 2):
+ if 'source' not in peer_config or len(peer_config['source']) < 2:
raise ConfigError('BFD IPv6 link-local peers require explicit local address and interface setting')
# IPv6 peers require an explicit local address
diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py
index 8a590cbc6..45248fb4a 100755
--- a/src/conf_mode/qos.py
+++ b/src/conf_mode/qos.py
@@ -17,6 +17,7 @@
from sys import exit
from netifaces import interfaces
+from vyos.base import Warning
from vyos.config import Config
from vyos.configdep import set_dependents
from vyos.configdep import call_dependents
@@ -89,6 +90,36 @@ def _clean_conf_dict(conf):
return conf
+def _get_group_filters(config: dict, group_name: str, visited=None) -> dict:
+ filters = dict()
+ if not visited:
+ visited = [group_name, ]
+ else:
+ if group_name in visited:
+ return filters
+ visited.append(group_name)
+
+ for filter, filter_config in config.get(group_name, {}).items():
+ if filter == 'match':
+ for match, match_config in filter_config.items():
+ filters[f'{group_name}-{match}'] = match_config
+ elif filter == 'match_group':
+ for group in filter_config:
+ filters.update(_get_group_filters(config, group, visited))
+
+ return filters
+
+
+def _get_group_match(config:dict, group_name:str) -> dict:
+ match = dict()
+ for key, val in _get_group_filters(config, group_name).items():
+ # delete duplicate matches
+ if val not in match.values():
+ match[key] = val
+
+ return match
+
+
def get_config(config=None):
if config:
conf = config
@@ -135,11 +166,27 @@ def get_config(config=None):
qos = conf.merge_defaults(qos, recursive=True)
+ if 'traffic_match_group' in qos:
+ for group, group_config in qos['traffic_match_group'].items():
+ if 'match_group' in group_config:
+ qos['traffic_match_group'][group]['match'] = _get_group_match(qos['traffic_match_group'], group)
+
for policy in qos.get('policy', []):
for p_name, p_config in qos['policy'][policy].items():
# cleanup empty match config
if 'class' in p_config:
for cls, cls_config in p_config['class'].items():
+ if 'match_group' in cls_config:
+ # merge group match to match
+ for group in cls_config['match_group']:
+ for match, match_conf in qos['traffic_match_group'].get(group, {'match': {}})['match'].items():
+ if 'match' not in cls_config:
+ cls_config['match'] = dict()
+ if match in cls_config['match']:
+ cls_config['match'][f'{group}-{match}'] = match_conf
+ else:
+ cls_config['match'][match] = match_conf
+
if 'match' in cls_config:
cls_config['match'] = _clean_conf_dict(cls_config['match'])
if cls_config['match'] == {}:
@@ -147,6 +194,22 @@ def get_config(config=None):
return qos
+
+def _verify_match(cls_config: dict) -> None:
+ if 'match' in cls_config:
+ for match, match_config in cls_config['match'].items():
+ if {'ip', 'ipv6'} <= set(match_config):
+ raise ConfigError(
+ f'Can not use both IPv6 and IPv4 in one match ({match})!')
+
+
+def _verify_match_group_exist(cls_config, qos):
+ if 'match_group' in cls_config:
+ for group in cls_config['match_group']:
+ if 'traffic_match_group' not in qos or group not in qos['traffic_match_group']:
+ Warning(f'Match group "{group}" does not exist!')
+
+
def verify(qos):
if not qos or 'interface' not in qos:
return None
@@ -174,11 +237,8 @@ def verify(qos):
# bandwidth is not mandatory for priority-queue - that is why this is on the exception list
if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']:
raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!')
- if 'match' in cls_config:
- for match, match_config in cls_config['match'].items():
- if {'ip', 'ipv6'} <= set(match_config):
- raise ConfigError(f'Can not use both IPv6 and IPv4 in one match ({match})!')
-
+ _verify_match(cls_config)
+ _verify_match_group_exist(cls_config, qos)
if policy_type in ['random_detect']:
if 'precedence' in policy_config:
for precedence, precedence_config in policy_config['precedence'].items():
@@ -216,8 +276,14 @@ def verify(qos):
if direction not in tmp:
raise ConfigError(f'Selected QoS policy on interface "{interface}" only supports "{tmp}"!')
+ if 'traffic_match_group' in qos:
+ for group, group_config in qos['traffic_match_group'].items():
+ _verify_match(group_config)
+ _verify_match_group_exist(group_config, qos)
+
return None
+
def generate(qos):
if not qos or 'interface' not in qos:
return None
@@ -254,6 +320,7 @@ def apply(qos):
return None
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/conf_mode/service_dhcpv6-server.py b/src/conf_mode/service_dhcpv6-server.py
index c7333dd3a..7af88007c 100755
--- a/src/conf_mode/service_dhcpv6-server.py
+++ b/src/conf_mode/service_dhcpv6-server.py
@@ -106,14 +106,14 @@ def verify(dhcpv6):
# Stop address must be greater or equal to start address
if not ip_address(stop) >= ip_address(start):
- raise ConfigError(f'Range stop address "{stop}" must be greater then or equal ' \
+ raise ConfigError(f'Range stop address "{stop}" must be greater than or equal ' \
f'to the range start address "{start}"!')
# DHCPv6 range start address must be unique - two ranges can't
# start with the same address - makes no sense
if start in range6_start:
raise ConfigError(f'Conflicting DHCPv6 lease range: '\
- f'Pool start address "{start}" defined multipe times!')
+ f'Pool start address "{start}" defined multiple times!')
range6_start.append(start)
@@ -121,7 +121,7 @@ def verify(dhcpv6):
# end with the same address - makes no sense
if stop in range6_stop:
raise ConfigError(f'Conflicting DHCPv6 lease range: '\
- f'Pool stop address "{stop}" defined multipe times!')
+ f'Pool stop address "{stop}" defined multiple times!')
range6_stop.append(stop)
@@ -180,7 +180,7 @@ def verify(dhcpv6):
if 'option' in subnet_config:
if 'vendor_option' in subnet_config['option']:
if len(dict_search('option.vendor_option.cisco.tftp_server', subnet_config)) > 2:
- raise ConfigError(f'No more then two Cisco tftp-servers should be defined for subnet "{subnet}"!')
+ raise ConfigError(f'No more than two Cisco tftp-servers should be defined for subnet "{subnet}"!')
# Subnets must be unique
if subnet in subnets:
diff --git a/src/conf_mode/service_dns_forwarding.py b/src/conf_mode/service_dns_forwarding.py
index 7e863073a..70686534f 100755
--- a/src/conf_mode/service_dns_forwarding.py
+++ b/src/conf_mode/service_dns_forwarding.py
@@ -102,7 +102,7 @@ def get_config(config=None):
'ttl': rdata['ttl'],
'value': address
})
- elif rtype in ['cname', 'ptr', 'ns']:
+ elif rtype in ['cname', 'ptr']:
if not 'target' in rdata:
dns['authoritative_zone_errors'].append(f'{subnode}.{node}: target is required')
continue
@@ -113,6 +113,19 @@ def get_config(config=None):
'ttl': rdata['ttl'],
'value': '{}.'.format(rdata['target'])
})
+ elif rtype == 'ns':
+ if not 'target' in rdata:
+ dns['authoritative_zone_errors'].append(f'{subnode}.{node}: at least one target is required')
+ continue
+
+ for target in rdata['target']:
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': f'{target}.'
+ })
+
elif rtype == 'mx':
if not 'server' in rdata:
dns['authoritative_zone_errors'].append(f'{subnode}.{node}: at least one server is required')
diff --git a/src/conf_mode/service_suricata.py b/src/conf_mode/service_suricata.py
new file mode 100755
index 000000000..69b369e0b
--- /dev/null
+++ b/src/conf_mode/service_suricata.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.template import render
+from vyos.utils.process import call
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+config_file = '/run/suricata/suricata.yaml'
+rotate_file = '/etc/logrotate.d/suricata'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['service', 'suricata']
+
+ if not conf.exists(base):
+ return None
+
+ suricata = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, with_recursive_defaults=True)
+
+ return suricata
+
+# https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
+def topological_sort(source):
+ sorted_nodes = []
+ permanent_marks = set()
+ temporary_marks = set()
+
+ def visit(n, v):
+ if n in permanent_marks:
+ return
+ if n in temporary_marks:
+ raise ConfigError('At least one cycle exists in the referenced groups')
+
+ temporary_marks.add(n)
+
+ for m in v.get('group', []):
+ m = m.lstrip('!')
+ if m not in source:
+ raise ConfigError(f'Undefined referenced group "{m}"')
+ visit(m, source[m])
+
+ temporary_marks.remove(n)
+ permanent_marks.add(n)
+ sorted_nodes.append((n, v))
+
+ while len(permanent_marks) < len(source):
+ n = next(n for n in source.keys() if n not in permanent_marks)
+ visit(n, source[n])
+
+ return sorted_nodes
+
+def verify(suricata):
+ if not suricata:
+ return None
+
+ if 'interface' not in suricata:
+ raise ConfigError('No interfaces configured!')
+
+ if 'address_group' not in suricata:
+ raise ConfigError('No address-group configured!')
+
+ if 'port_group' not in suricata:
+ raise ConfigError('No port-group configured!')
+
+ try:
+ topological_sort(suricata['address_group'])
+ except (ConfigError,StopIteration) as e:
+ raise ConfigError(f'Invalid address-group: {e}')
+
+ try:
+ topological_sort(suricata['port_group'])
+ except (ConfigError,StopIteration) as e:
+ raise ConfigError(f'Invalid port-group: {e}')
+
+def generate(suricata):
+ if not suricata:
+ for file in [config_file, rotate_file]:
+ if os.path.isfile(file):
+ os.unlink(file)
+
+ return None
+
+ # Config-related formatters
+ def to_var(s:str):
+ return s.replace('-','_').upper()
+
+ def to_val(s:str):
+ return s.replace('-',':')
+
+ def to_ref(s:str):
+ if s[0] == '!':
+ return '!$' + to_var(s[1:])
+ return '$' + to_var(s)
+
+ def to_config(kind:str):
+ def format_group(group):
+ (name, value) = group
+ property = [to_val(property) for property in value.get(kind,[])]
+ group = [to_ref(group) for group in value.get('group',[])]
+ return (to_var(name), property + group)
+ return format_group
+
+ # Format the address group
+ suricata['address_group'] = map(to_config('address'),
+ topological_sort(suricata['address_group']))
+
+ # Format the port group
+ suricata['port_group'] = map(to_config('port'),
+ topological_sort(suricata['port_group']))
+
+ render(config_file, 'ids/suricata.j2', {'suricata': suricata})
+ render(rotate_file, 'ids/suricata_logrotate.j2', suricata)
+ return None
+
+def apply(suricata):
+ systemd_service = 'suricata.service'
+ if not suricata or 'interface' not in suricata:
+ # Stop suricata service if removed
+ call(f'systemctl stop {systemd_service}')
+ else:
+ Warning('To fetch the latest rules, use "update suricata"; '
+ 'To periodically fetch the latest rules, '
+ 'use the task scheduler!')
+ call(f'systemctl restart {systemd_service}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/service_upnp.py b/src/conf_mode/service_upnp.py
deleted file mode 100755
index 0df8dc09e..000000000
--- a/src/conf_mode/service_upnp.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2021-2022 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from sys import exit
-import uuid
-import netifaces
-from ipaddress import IPv4Network
-from ipaddress import IPv6Network
-
-from vyos.config import Config
-from vyos.utils.process import call
-from vyos.template import render
-from vyos.template import is_ipv4
-from vyos.template import is_ipv6
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-config_file = r'/run/upnp/miniupnp.conf'
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['service', 'upnp']
- upnpd = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
-
- if not upnpd:
- return None
-
- upnpd = conf.merge_defaults(upnpd, recursive=True)
-
- uuidgen = uuid.uuid1()
- upnpd.update({'uuid': uuidgen})
-
- return upnpd
-
-def get_all_interface_addr(prefix, filter_dev, filter_family):
- list_addr = []
- for interface in netifaces.interfaces():
- if filter_dev and interface in filter_dev:
- continue
- addrs = netifaces.ifaddresses(interface)
- if netifaces.AF_INET in addrs.keys():
- if netifaces.AF_INET in filter_family:
- for addr in addrs[netifaces.AF_INET]:
- if prefix:
- # we need to manually assemble a list of IPv4 address/prefix
- prefix = '/' + \
- str(IPv4Network('0.0.0.0/' + addr['netmask']).prefixlen)
- list_addr.append(addr['addr'] + prefix)
- else:
- list_addr.append(addr['addr'])
- if netifaces.AF_INET6 in addrs.keys():
- if netifaces.AF_INET6 in filter_family:
- for addr in addrs[netifaces.AF_INET6]:
- if prefix:
- # we need to manually assemble a list of IPv4 address/prefix
- bits = bin(int(addr['netmask'].replace(':', '').split('/')[0], 16)).count('1')
- prefix = '/' + str(bits)
- list_addr.append(addr['addr'] + prefix)
- else:
- list_addr.append(addr['addr'])
-
- return list_addr
-
-def verify(upnpd):
- if not upnpd:
- return None
-
- if 'wan_interface' not in upnpd:
- raise ConfigError('To enable UPNP, you must have the "wan-interface" option!')
-
- if 'rule' in upnpd:
- for rule, rule_config in upnpd['rule'].items():
- for option in ['external_port_range', 'internal_port_range', 'ip', 'action']:
- if option not in rule_config:
- tmp = option.replace('_', '-')
- raise ConfigError(f'Every UPNP rule requires "{tmp}" to be set!')
-
- if 'stun' in upnpd:
- for option in ['host', 'port']:
- if option not in upnpd['stun']:
- raise ConfigError(f'A UPNP stun support must have an "{option}" option!')
-
- # Check the validity of the IP address
- listen_dev = []
- system_addrs_cidr = get_all_interface_addr(True, [], [netifaces.AF_INET, netifaces.AF_INET6])
- system_addrs = get_all_interface_addr(False, [], [netifaces.AF_INET, netifaces.AF_INET6])
- if 'listen' not in upnpd:
- raise ConfigError(f'Listen address or interface is required!')
- for listen_if_or_addr in upnpd['listen']:
- if listen_if_or_addr not in netifaces.interfaces():
- listen_dev.append(listen_if_or_addr)
- if (listen_if_or_addr not in system_addrs) and (listen_if_or_addr not in system_addrs_cidr) and \
- (listen_if_or_addr not in netifaces.interfaces()):
- if is_ipv4(listen_if_or_addr) and IPv4Network(listen_if_or_addr).is_multicast:
- raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed'
- f'to listen on. It is not an interface address nor a multicast address!')
- if is_ipv6(listen_if_or_addr) and IPv6Network(listen_if_or_addr).is_multicast:
- raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed'
- f'to listen on. It is not an interface address nor a multicast address!')
-
- system_listening_dev_addrs_cidr = get_all_interface_addr(True, listen_dev, [netifaces.AF_INET6])
- system_listening_dev_addrs = get_all_interface_addr(False, listen_dev, [netifaces.AF_INET6])
- for listen_if_or_addr in upnpd['listen']:
- if listen_if_or_addr not in netifaces.interfaces() and \
- (listen_if_or_addr not in system_listening_dev_addrs_cidr) and \
- (listen_if_or_addr not in system_listening_dev_addrs) and \
- is_ipv6(listen_if_or_addr) and \
- (not IPv6Network(listen_if_or_addr).is_multicast):
- raise ConfigError(f'{listen_if_or_addr} must listen on the interface of the network card')
-
-def generate(upnpd):
- if not upnpd:
- return None
-
- if os.path.isfile(config_file):
- os.unlink(config_file)
-
- render(config_file, 'firewall/upnpd.conf.j2', upnpd)
-
-def apply(upnpd):
- systemd_service_name = 'miniupnpd.service'
- if not upnpd:
- # Stop the UPNP service
- call(f'systemctl stop {systemd_service_name}')
- else:
- # Start the UPNP service
- call(f'systemctl restart {systemd_service_name}')
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/etc/systemd/system/suricata.service.d/10-override.conf b/src/etc/systemd/system/suricata.service.d/10-override.conf
new file mode 100644
index 000000000..781256cf5
--- /dev/null
+++ b/src/etc/systemd/system/suricata.service.d/10-override.conf
@@ -0,0 +1,9 @@
+[Service]
+ExecStart=
+ExecStart=/usr/bin/suricata -D --af-packet -c /run/suricata/suricata.yaml --pidfile /run/suricata/suricata.pid
+PIDFile=
+PIDFile=/run/suricata/suricata.pid
+ExecReload=
+ExecReload=/usr/bin/suricatasc -c reload-rules /run/suricata/suricata.socket ; /bin/kill -HUP $MAINPID
+ExecStop=
+ExecStop=/usr/bin/suricatasc -c shutdown /run/suricata/suricata.socket
diff --git a/src/helpers/run-config-activation.py b/src/helpers/run-config-activation.py
new file mode 100755
index 000000000..58293702a
--- /dev/null
+++ b/src/helpers/run-config-activation.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import logging
+from pathlib import Path
+from argparse import ArgumentParser
+
+from vyos.compose_config import ComposeConfig
+from vyos.compose_config import ComposeConfigError
+from vyos.defaults import directories
+
+parser = ArgumentParser()
+parser.add_argument('config_file', type=str,
+ help="configuration file to modify with system-specific settings")
+parser.add_argument('--test-script', type=str,
+ help="test effect of named script")
+
+args = parser.parse_args()
+
+checkpoint_file = '/run/vyos-activate-checkpoint'
+log_file = Path(directories['config']).joinpath('vyos-activate.log')
+
+logger = logging.getLogger(__name__)
+fh = logging.FileHandler(log_file)
+formatter = logging.Formatter('%(message)s')
+fh.setFormatter(formatter)
+logger.addHandler(fh)
+
+if 'vyos-activate-debug' in Path('/proc/cmdline').read_text():
+ print(f'\nactivate-debug enabled: file {checkpoint_file}_* on error')
+ debug = checkpoint_file
+ logger.setLevel(logging.DEBUG)
+else:
+ debug = None
+ logger.setLevel(logging.INFO)
+
+def sort_key(s: Path):
+ s = s.stem
+ pre, rem = re.match(r'(\d*)(?:-)?(.+)', s).groups()
+ return int(pre or 0), rem
+
+def file_ext(file_name: str) -> str:
+ """Return an identifier from file name for checkpoint file extension.
+ """
+ return Path(file_name).stem
+
+script_dir = Path(directories['activate'])
+
+if args.test_script:
+ script_list = [script_dir.joinpath(args.test_script)]
+else:
+ script_list = sorted(script_dir.glob('*.py'), key=sort_key)
+
+config_file = args.config_file
+config_str = Path(config_file).read_text()
+
+compose = ComposeConfig(config_str, checkpoint_file=debug)
+
+for file in script_list:
+ file = file.as_posix()
+ logger.info(f'calling {file}')
+ try:
+ compose.apply_file(file, func_name='activate')
+ except ComposeConfigError as e:
+ if debug:
+ compose.write(f'{compose.checkpoint_file}_{file_ext(file)}')
+ logger.error(f'config-activation error in {file}: {e}')
+
+compose.write(config_file, with_version=True)
diff --git a/src/init/vyos-router b/src/init/vyos-router
index 15e37df07..59004fdc1 100755
--- a/src/init/vyos-router
+++ b/src/init/vyos-router
@@ -22,6 +22,7 @@ declare progname=${0##*/}
declare action=$1; shift
declare -x BOOTFILE=$vyatta_sysconfdir/config/config.boot
+declare -x DEFAULT_BOOTFILE=$vyatta_sysconfdir/config.boot.default
# If vyos-config= boot option is present, use that file instead
for x in $(cat /proc/cmdline); do
@@ -129,9 +130,16 @@ unmount_encrypted_config() {
# if necessary, provide initial config
init_bootfile () {
+ # define and version default boot config if not present
+ if [ ! -r $DEFAULT_BOOTFILE ]; then
+ if [ -f $vyos_data_dir/config.boot.default ]; then
+ cp $vyos_data_dir/config.boot.default $DEFAULT_BOOTFILE
+ $vyos_libexec_dir/system-versions-foot.py >> $DEFAULT_BOOTFILE
+ fi
+ fi
if [ ! -r $BOOTFILE ] ; then
- if [ -f $vyatta_sysconfdir/config.boot.default ]; then
- cp $vyatta_sysconfdir/config.boot.default $BOOTFILE
+ if [ -f $DEFAULT_BOOTFILE ]; then
+ cp $DEFAULT_BOOTFILE $BOOTFILE
else
$vyos_libexec_dir/system-versions-foot.py > $BOOTFILE
fi
@@ -149,6 +157,15 @@ migrate_bootfile ()
fi
}
+# configure system-specific settings
+system_config ()
+{
+ if [ -x $vyos_libexec_dir/run-config-activation.py ]; then
+ log_progress_msg system
+ sg ${GROUP} -c "$vyos_libexec_dir/run-config-activation.py $BOOTFILE"
+ fi
+}
+
# load the initial config
load_bootfile ()
{
@@ -493,6 +510,8 @@ start ()
update_interface_config
+ disabled system_config || system_config
+
for s in ${subinit[@]} ; do
if ! disabled $s; then
log_progress_msg $s
diff --git a/src/migration-scripts/nat/7-to-8 b/src/migration-scripts/nat/7-to-8
new file mode 100755
index 000000000..ab2ffa6d3
--- /dev/null
+++ b/src/migration-scripts/nat/7-to-8
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# T6345: random - In kernel 5.0 and newer this is the same as fully-random.
+# In earlier kernels the port mapping will be randomized using a seeded
+# MD5 hash mix using source and destination address and destination port.
+# drop fully-random from CLI
+
+from sys import argv,exit
+from vyos.configtree import ConfigTree
+
+if len(argv) < 2:
+ print("Must specify file name!")
+ exit(1)
+
+file_name = argv[1]
+
+with open(file_name, 'r') as f:
+ config_file = f.read()
+
+config = ConfigTree(config_file)
+
+if not config.exists(['nat']):
+ # Nothing to do
+ exit(0)
+
+for direction in ['source', 'destination']:
+ # If a node doesn't exist, we obviously have nothing to do.
+ if not config.exists(['nat', direction]):
+ continue
+
+ # However, we also need to handle the case when a 'source' or 'destination' sub-node does exist,
+ # but there are no rules under it.
+ if not config.list_nodes(['nat', direction]):
+ continue
+
+ for rule in config.list_nodes(['nat', direction, 'rule']):
+ port_mapping = ['nat', direction, 'rule', rule, 'translation', 'options', 'port-mapping']
+ if config.exists(port_mapping):
+ tmp = config.return_value(port_mapping)
+ if tmp == 'fully-random':
+ config.set(port_mapping, value='random')
+
+try:
+ with open(file_name, 'w') as f:
+ f.write(config.to_string())
+except OSError as e:
+ print(f'Failed to save the modified config: {e}')
+ exit(1)
diff --git a/src/migration-scripts/reverse-proxy/0-to-1 b/src/migration-scripts/reverse-proxy/0-to-1
new file mode 100755
index 000000000..d61493815
--- /dev/null
+++ b/src/migration-scripts/reverse-proxy/0-to-1
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# T6409: Remove unused 'backend bk-example parameters' node
+
+from sys import argv, exit
+from vyos.configtree import ConfigTree
+
+if len(argv) < 2:
+ print("Must specify file name!")
+ exit(1)
+
+file_name = argv[1]
+
+with open(file_name, 'r') as f:
+ config_file = f.read()
+
+config = ConfigTree(config_file)
+base = ['load-balancing', 'reverse-proxy', 'backend']
+if not config.exists(base):
+ # Nothing to do
+ exit(0)
+
+# we need to run this for every configured network
+for backend in config.list_nodes(base):
+ param_node = base + [backend, 'parameters']
+ if config.exists(param_node):
+ config.delete(param_node)
+
+try:
+ with open(file_name, 'w') as f:
+ f.write(config.to_string())
+except OSError as e:
+ print("Failed to save the modified config: {}".format(e))
+ exit(1)
diff --git a/src/op_mode/cgnat.py b/src/op_mode/cgnat.py
new file mode 100755
index 000000000..9ad8f92f9
--- /dev/null
+++ b/src/op_mode/cgnat.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import sys
+import typing
+
+from tabulate import tabulate
+
+import vyos.opmode
+
+from vyos.configquery import ConfigTreeQuery
+from vyos.utils.process import cmd
+
+CGNAT_TABLE = 'cgnat'
+
+
+def _get_raw_data(external_address: str = '', internal_address: str = '') -> list[dict]:
+ """Get CGNAT dictionary and filter by external or internal address if provided."""
+ cmd_output = cmd(f'nft --json list table ip {CGNAT_TABLE}')
+ data = json.loads(cmd_output)
+
+ elements = data['nftables'][2]['map']['elem']
+ allocations = []
+ for elem in elements:
+ internal = elem[0] # internal
+ external = elem[1]['concat'][0] # external
+ start_port = elem[1]['concat'][1]['range'][0]
+ end_port = elem[1]['concat'][1]['range'][1]
+ port_range = f'{start_port}-{end_port}'
+
+ if (internal_address and internal != internal_address) or (
+ external_address and external != external_address
+ ):
+ continue
+
+ allocations.append(
+ {
+ 'internal_address': internal,
+ 'external_address': external,
+ 'port_range': port_range,
+ }
+ )
+
+ return allocations
+
+
+def _get_formatted_output(allocations: list[dict]) -> str:
+ # Convert the list of dictionaries to a list of tuples for tabulate
+ headers = ['Internal IP', 'External IP', 'Port range']
+ data = [
+ (alloc['internal_address'], alloc['external_address'], alloc['port_range'])
+ for alloc in allocations
+ ]
+ output = tabulate(data, headers, numalign="left")
+ return output
+
+
+def show_allocation(
+ raw: bool,
+ external_address: typing.Optional[str],
+ internal_address: typing.Optional[str],
+) -> str:
+ config = ConfigTreeQuery()
+ if not config.exists('nat cgnat'):
+ raise vyos.opmode.UnconfiguredSubsystem('CGNAT is not configured')
+
+ if raw:
+ return _get_raw_data(external_address, internal_address)
+
+ else:
+ raw_data = _get_raw_data(external_address, internal_address)
+ return _get_formatted_output(raw_data)
+
+
+if __name__ == '__main__':
+ try:
+ res = vyos.opmode.run(sys.modules[__name__])
+ if res:
+ print(res)
+ except (ValueError, vyos.opmode.Error) as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/op_mode/evpn.py b/src/op_mode/evpn.py
new file mode 100644
index 000000000..cae4ab9f5
--- /dev/null
+++ b/src/op_mode/evpn.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2016-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This script is a helper to run VTYSH commands for "show evpn", allowing for the --raw flag to output JSON
+
+import sys
+import typing
+import json
+
+import vyos.opmode
+from vyos.utils.process import cmd
+
+def show_evpn(raw: bool, command: typing.Optional[str]):
+ if raw:
+ command = f"{command} json"
+ evpnDict = {}
+ try:
+ evpnDict['evpn'] = json.loads(cmd(f"vtysh -c '{command}'"))
+ except:
+ raise vyos.opmode.DataUnavailable(f"\"{command.replace(' json', '')}\" is invalid or has no JSON option")
+
+ return evpnDict
+ else:
+ return cmd(f"vtysh -c '{command}'")
+
+if __name__ == '__main__':
+ try:
+ res = vyos.opmode.run(sys.modules[__name__])
+ if res:
+ print(res)
+ except (ValueError, vyos.opmode.Error) as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/op_mode/ikev2_profile_generator.py b/src/op_mode/ikev2_profile_generator.py
index 2b29f94bf..4ac4fb14a 100755
--- a/src/op_mode/ikev2_profile_generator.py
+++ b/src/op_mode/ikev2_profile_generator.py
@@ -144,15 +144,22 @@ tmp = reversed(tmp)
data['rfqdn'] = '.'.join(tmp)
pki = conf.get_config_dict(pki_base, get_first_key=True)
-ca_name = data['authentication']['x509']['ca_certificate']
cert_name = data['authentication']['x509']['certificate']
-ca_cert = load_certificate(pki['ca'][ca_name]['certificate'])
-cert = load_certificate(pki['certificate'][cert_name]['certificate'])
+data['certs'] = []
+
+for ca_name in data['authentication']['x509']['ca_certificate']:
+ tmp = {}
+ ca_cert = load_certificate(pki['ca'][ca_name]['certificate'])
+ cert = load_certificate(pki['certificate'][cert_name]['certificate'])
+
+
+ tmp['ca_cn'] = ca_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
+ tmp['cert_cn'] = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
+ tmp['ca_cert'] = conf.value(pki_base + ['ca', ca_name, 'certificate'])
+
+ data['certs'].append(tmp)
-data['ca_cn'] = ca_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
-data['cert_cn'] = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
-data['ca_cert'] = conf.value(pki_base + ['ca', ca_name, 'certificate'])
esp_proposals = conf.get_config_dict(ipsec_base + ['esp-group', data['esp_group'], 'proposal'],
key_mangling=('-', '_'), get_first_key=True)
diff --git a/src/op_mode/image_installer.py b/src/op_mode/image_installer.py
index 0d2d7076c..bdc16de15 100755
--- a/src/op_mode/image_installer.py
+++ b/src/op_mode/image_installer.py
@@ -40,13 +40,14 @@ from vyos.template import render
from vyos.utils.io import ask_input, ask_yes_no, select_entry
from vyos.utils.file import chmod_2775
from vyos.utils.process import cmd, run
-from vyos.version import get_remote_version
+from vyos.version import get_remote_version, get_version_data
# define text messages
MSG_ERR_NOT_LIVE: str = 'The system is already installed. Please use "add system image" instead.'
MSG_ERR_LIVE: str = 'The system is in live-boot mode. Please use "install image" instead.'
MSG_ERR_NO_DISK: str = 'No suitable disk was found. There must be at least one disk of 2GB or greater size.'
MSG_ERR_IMPROPER_IMAGE: str = 'Missing sha256sum.txt.\nEither this image is corrupted, or of era 1.2.x (md5sum) and would downgrade image tools;\ndisallowed in either case.'
+MSG_ERR_ARCHITECTURE_MISMATCH: str = 'Upgrading to a different image architecture will break your system.'
MSG_INFO_INSTALL_WELCOME: str = 'Welcome to VyOS installation!\nThis command will install VyOS to your permanent storage.'
MSG_INFO_INSTALL_EXIT: str = 'Exiting from VyOS installation'
MSG_INFO_INSTALL_SUCCESS: str = 'The image installed successfully; please reboot now.'
@@ -79,6 +80,9 @@ MSG_WARN_ROOT_SIZE_TOOSMALL: str = 'The size is too small. Try again'
MSG_WARN_IMAGE_NAME_WRONG: str = 'The suggested name is unsupported!\n'\
'It must be between 1 and 64 characters long and contains only the next characters: .+-_ a-z A-Z 0-9'
MSG_WARN_PASSWORD_CONFIRM: str = 'The entered values did not match. Try again'
+MSG_WARN_FLAVOR_MISMATCH: str = 'The running image flavor is "{0}". The new image flavor is "{1}".\n' \
+'Installing a different image flavor may cause functionality degradation or break your system.\n' \
+'Do you want to continue with installation?'
CONST_MIN_DISK_SIZE: int = 2147483648 # 2 GB
CONST_MIN_ROOT_SIZE: int = 1610612736 # 1.5 GB
# a reserved space: 2MB for header, 1 MB for BIOS partition, 256 MB for EFI
@@ -693,6 +697,31 @@ def is_raid_install(install_object: Union[disk.DiskDetails, raid.RaidDetails]) -
return False
+def validate_compatibility(iso_path: str) -> None:
+ """Check architecture and flavor compatibility with the running image
+
+ Args:
+ iso_path (str): a path to the mounted ISO image
+ """
+ old_data = get_version_data()
+ old_flavor = old_data.get('flavor', '')
+ old_architecture = old_data.get('architecture') or cmd('dpkg --print-architecture')
+
+ new_data = get_version_data(f'{iso_path}/version.json')
+ new_flavor = new_data.get('flavor', '')
+ new_architecture = new_data.get('architecture', '')
+
+ if not old_architecture == new_architecture:
+ print(MSG_ERR_ARCHITECTURE_MISMATCH)
+ cleanup()
+ exit(MSG_INFO_INSTALL_EXIT)
+
+ if not old_flavor == new_flavor:
+ if not ask_yes_no(MSG_WARN_FLAVOR_MISMATCH.format(old_flavor, new_flavor), default=False):
+ cleanup()
+ exit(MSG_INFO_INSTALL_EXIT)
+
+
def install_image() -> None:
"""Install an image to a disk
"""
@@ -876,6 +905,9 @@ def add_image(image_path: str, vrf: str = None, username: str = '',
Path(DIR_ISO_MOUNT).mkdir(mode=0o755, parents=True)
disk.partition_mount(iso_path, DIR_ISO_MOUNT, 'iso9660')
+ print('Validating image compatibility')
+ validate_compatibility(DIR_ISO_MOUNT)
+
# check sums
print('Validating image checksums')
if not Path(DIR_ISO_MOUNT).joinpath('sha256sum.txt').exists():
diff --git a/src/op_mode/nat.py b/src/op_mode/nat.py
index 2bc7e24fe..16a545cda 100755
--- a/src/op_mode/nat.py
+++ b/src/op_mode/nat.py
@@ -99,6 +99,23 @@ def _get_raw_translation(direction, family, address=None):
def _get_formatted_output_rules(data, direction, family):
+ def _get_ports_for_output(my_dict):
+ # Get and insert all configured ports or port ranges into output string
+ for index, port in enumerate(my_dict['set']):
+ if 'range' in str(my_dict['set'][index]):
+ output = my_dict['set'][index]['range']
+ output = '-'.join(map(str, output))
+ else:
+ output = str(port)
+ if index == 0:
+ output = str(output)
+ else:
+ output = ','.join([output,output])
+ # Handle case where configured ports are a negated list
+ if my_dict['op'] == '!=':
+ output = '!' + output
+ return(output)
+
# Add default values before loop
sport, dport, proto = 'any', 'any', 'any'
saddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
@@ -126,21 +143,9 @@ def _get_formatted_output_rules(data, direction, family):
elif my_dict['field'] == 'daddr':
daddr = f'{op}{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
elif my_dict['field'] == 'sport':
- # Port range or single port
- if jmespath.search('set[*].range', my_dict):
- sport = my_dict['set'][0]['range']
- sport = '-'.join(map(str, sport))
- else:
- sport = my_dict.get('set')
- sport = ','.join(map(str, sport))
+ sport = _get_ports_for_output(my_dict)
elif my_dict['field'] == 'dport':
- # Port range or single port
- if jmespath.search('set[*].range', my_dict):
- dport = my_dict["set"][0]["range"]
- dport = '-'.join(map(str, dport))
- else:
- dport = my_dict.get('set')
- dport = ','.join(map(str, dport))
+ dport = _get_ports_for_output(my_dict)
else:
field = jmespath.search('left.payload.field', match)
if field == 'saddr':
@@ -263,7 +268,7 @@ def _get_formatted_translation(dict_data, nat_direction, family, verbose):
proto = meta['layer4']['protoname']
if direction == 'independent':
conn_id = meta['id']
- timeout = meta['timeout']
+ timeout = meta.get('timeout', 'n/a')
orig_src = f'{orig_src}:{orig_sport}' if orig_sport else orig_src
orig_dst = f'{orig_dst}:{orig_dport}' if orig_dport else orig_dst
reply_src = f'{reply_src}:{reply_sport}' if reply_sport else reply_src
diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py
index b1ca6ee29..361b60e0e 100755
--- a/src/op_mode/pki.py
+++ b/src/op_mode/pki.py
@@ -876,7 +876,7 @@ def show_certificate_authority(name=None, pem=False):
print("Certificate Authorities:")
print(tabulate.tabulate(data, headers))
-def show_certificate(name=None, pem=False):
+def show_certificate(name=None, pem=False, fingerprint_hash=None):
headers = ['Name', 'Type', 'Subject CN', 'Issuer CN', 'Issued', 'Expiry', 'Revoked', 'Private Key', 'CA Present']
data = []
certs = get_config_certificate()
@@ -897,6 +897,9 @@ def show_certificate(name=None, pem=False):
if name and pem:
print(encode_certificate(cert))
return
+ elif name and fingerprint_hash:
+ print(get_certificate_fingerprint(cert, fingerprint_hash))
+ return
ca_name = get_certificate_ca(cert, ca_certs)
cert_subject_cn = cert.subject.rfc4514_string().split(",")[0]
@@ -923,12 +926,6 @@ def show_certificate(name=None, pem=False):
print("Certificates:")
print(tabulate.tabulate(data, headers))
-def show_certificate_fingerprint(name, hash):
- cert = get_config_certificate(name=name)
- cert = load_certificate(cert['certificate'])
-
- print(get_certificate_fingerprint(cert, hash))
-
def show_crl(name=None, pem=False):
headers = ['CA Name', 'Updated', 'Revokes']
data = []
@@ -1074,7 +1071,7 @@ if __name__ == '__main__':
if args.fingerprint is None:
show_certificate(None if args.certificate == 'all' else args.certificate, args.pem)
else:
- show_certificate_fingerprint(args.certificate, args.fingerprint)
+ show_certificate(args.certificate, fingerprint_hash=args.fingerprint)
elif args.crl:
show_crl(None if args.crl == 'all' else args.crl, args.pem)
else:
diff --git a/src/op_mode/snmp_v3.py b/src/op_mode/snmp_v3.py
index a1f76f0bc..abeb524dd 100755
--- a/src/op_mode/snmp_v3.py
+++ b/src/op_mode/snmp_v3.py
@@ -85,7 +85,7 @@ if __name__ == '__main__':
'user': [],
'view': []
}
-
+
if c.exists_effective('service snmp v3 group'):
for g in c.list_effective_nodes('service snmp v3 group'):
group = {
@@ -146,7 +146,6 @@ if __name__ == '__main__':
data['trap'].append(trap)
- print(data)
if args.all:
# Special case, print all templates !
tmpl = jinja2.Template(GROUP_OUTP_TMPL_SRC)
diff --git a/src/op_mode/version.py b/src/op_mode/version.py
index ad0293aca..09d69ad1d 100755
--- a/src/op_mode/version.py
+++ b/src/op_mode/version.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2016-2022 VyOS maintainers and contributors
+# Copyright (C) 2016-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -30,11 +30,15 @@ from jinja2 import Template
version_output_tmpl = """
Version: VyOS {{version}}
Release train: {{release_train}}
+Release flavor: {{flavor}}
Built by: {{built_by}}
Built on: {{built_on}}
Build UUID: {{build_uuid}}
Build commit ID: {{build_git}}
+{%- if build_comment %}
+Build comment: {{build_comment}}
+{% endif %}
Architecture: {{system_arch}}
Boot via: {{boot_via}}
diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server
index ecbf6fcf9..7f5233c6b 100755
--- a/src/services/vyos-http-api-server
+++ b/src/services/vyos-http-api-server
@@ -23,16 +23,17 @@ import logging
import signal
import traceback
import threading
+from enum import Enum
from time import sleep
-from typing import List, Union, Callable, Dict
+from typing import List, Union, Callable, Dict, Self
from fastapi import FastAPI, Depends, Request, Response, HTTPException
from fastapi import BackgroundTasks
from fastapi.responses import HTMLResponse
from fastapi.exceptions import RequestValidationError
from fastapi.routing import APIRoute
-from pydantic import BaseModel, StrictStr, validator
+from pydantic import BaseModel, StrictStr, validator, model_validator
from starlette.middleware.cors import CORSMiddleware
from starlette.datastructures import FormData
from starlette.formparsers import FormParser, MultiPartParser
@@ -177,16 +178,35 @@ class ConfigFileModel(ApiModel):
}
}
+
+class ImageOp(str, Enum):
+ add = "add"
+ delete = "delete"
+ show = "show"
+ set_default = "set_default"
+
+
class ImageModel(ApiModel):
- op: StrictStr
+ op: ImageOp
url: StrictStr = None
name: StrictStr = None
+ @model_validator(mode='after')
+ def check_data(self) -> Self:
+ if self.op == 'add':
+ if not self.url:
+ raise ValueError("Missing required field \"url\"")
+ elif self.op in ['delete', 'set_default']:
+ if not self.name:
+ raise ValueError("Missing required field \"name\"")
+
+ return self
+
class Config:
schema_extra = {
"example": {
"key": "id_key",
- "op": "add | delete",
+ "op": "add | delete | show | set_default",
"url": "imagelocation",
"name": "imagename",
}
@@ -668,19 +688,13 @@ def image_op(data: ImageModel):
try:
if op == 'add':
- if data.url:
- url = data.url
- else:
- return error(400, "Missing required field \"url\"")
- res = session.install_image(url)
+ res = session.install_image(data.url)
elif op == 'delete':
- if data.name:
- name = data.name
- else:
- return error(400, "Missing required field \"name\"")
- res = session.remove_image(name)
- else:
- return error(400, f"'{op}' is not a valid operation")
+ res = session.remove_image(data.name)
+ elif op == 'show':
+ res = session.show(["system", "image"])
+ elif op == 'set_default':
+ res = session.set_default_image(data.name)
except ConfigSessionError as e:
return error(400, str(e))
except Exception as e:
diff --git a/src/systemd/miniupnpd.service b/src/systemd/miniupnpd.service
deleted file mode 100644
index 51cb2eed8..000000000
--- a/src/systemd/miniupnpd.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=UPnP service
-ConditionPathExists=/run/upnp/miniupnp.conf
-After=vyos-router.service
-StartLimitIntervalSec=0
-
-[Service]
-WorkingDirectory=/run/upnp
-Type=simple
-ExecStart=/usr/sbin/miniupnpd -d -f /run/upnp/miniupnp.conf
-PrivateTmp=yes
-PIDFile=/run/miniupnpd.pid
-Restart=on-failure
diff --git a/src/validators/port-range-exclude b/src/validators/port-range-exclude
new file mode 100755
index 000000000..4c049e98f
--- /dev/null
+++ b/src/validators/port-range-exclude
@@ -0,0 +1,7 @@
+#!/bin/sh
+arg="$1"
+if [ "${arg:0:1}" != "!" ]; then
+ exit 1
+fi
+path=$(dirname "$0")
+${path}/port-range "${arg:1}"