summaryrefslogtreecommitdiff
path: root/src/conf_mode
diff options
context:
space:
mode:
Diffstat (limited to 'src/conf_mode')
-rwxr-xr-xsrc/conf_mode/container.py36
-rwxr-xr-xsrc/conf_mode/dns_dynamic.py151
-rwxr-xr-xsrc/conf_mode/firewall.py83
-rwxr-xr-xsrc/conf_mode/http-api.py150
-rwxr-xr-xsrc/conf_mode/https.py266
-rwxr-xr-xsrc/conf_mode/interfaces-ethernet.py225
-rwxr-xr-xsrc/conf_mode/interfaces_bonding.py (renamed from src/conf_mode/interfaces-bonding.py)59
-rwxr-xr-xsrc/conf_mode/interfaces_bridge.py (renamed from src/conf_mode/interfaces-bridge.py)20
-rwxr-xr-xsrc/conf_mode/interfaces_dummy.py (renamed from src/conf_mode/interfaces-dummy.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_ethernet.py391
-rwxr-xr-xsrc/conf_mode/interfaces_geneve.py (renamed from src/conf_mode/interfaces-geneve.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_input.py (renamed from src/conf_mode/interfaces-input.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_l2tpv3.py (renamed from src/conf_mode/interfaces-l2tpv3.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_loopback.py (renamed from src/conf_mode/interfaces-loopback.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_macsec.py (renamed from src/conf_mode/interfaces-macsec.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_openvpn.py (renamed from src/conf_mode/interfaces-openvpn.py)16
-rwxr-xr-xsrc/conf_mode/interfaces_pppoe.py (renamed from src/conf_mode/interfaces-pppoe.py)11
-rwxr-xr-xsrc/conf_mode/interfaces_pseudo-ethernet.py (renamed from src/conf_mode/interfaces-pseudo-ethernet.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_sstpc.py (renamed from src/conf_mode/interfaces-sstpc.py)6
-rwxr-xr-xsrc/conf_mode/interfaces_tunnel.py (renamed from src/conf_mode/interfaces-tunnel.py)4
-rwxr-xr-xsrc/conf_mode/interfaces_virtual-ethernet.py (renamed from src/conf_mode/interfaces-virtual-ethernet.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_vti.py (renamed from src/conf_mode/interfaces-vti.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_vxlan.py (renamed from src/conf_mode/interfaces-vxlan.py)62
-rwxr-xr-xsrc/conf_mode/interfaces_wireguard.py (renamed from src/conf_mode/interfaces-wireguard.py)33
-rwxr-xr-xsrc/conf_mode/interfaces_wireless.py (renamed from src/conf_mode/interfaces-wireless.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_wwan.py (renamed from src/conf_mode/interfaces-wwan.py)0
-rwxr-xr-xsrc/conf_mode/le_cert.py115
-rwxr-xr-xsrc/conf_mode/load-balancing_reverse-proxy.py (renamed from src/conf_mode/load-balancing-haproxy.py)45
-rwxr-xr-xsrc/conf_mode/load-balancing_wan.py (renamed from src/conf_mode/load-balancing-wan.py)0
-rwxr-xr-xsrc/conf_mode/nat.py32
-rwxr-xr-xsrc/conf_mode/nat64.py216
-rwxr-xr-xsrc/conf_mode/nat66.py34
-rwxr-xr-xsrc/conf_mode/netns.py5
-rwxr-xr-xsrc/conf_mode/pki.py235
-rwxr-xr-xsrc/conf_mode/policy_local-route.py (renamed from src/conf_mode/policy-local-route.py)79
-rwxr-xr-xsrc/conf_mode/policy_route.py (renamed from src/conf_mode/policy-route.py)4
-rwxr-xr-xsrc/conf_mode/protocols_bfd.py3
-rwxr-xr-xsrc/conf_mode/protocols_bgp.py47
-rwxr-xr-xsrc/conf_mode/protocols_igmp-proxy.py (renamed from src/conf_mode/igmp_proxy.py)0
-rwxr-xr-xsrc/conf_mode/protocols_igmp.py140
-rwxr-xr-xsrc/conf_mode/protocols_isis.py50
-rwxr-xr-xsrc/conf_mode/protocols_nhrp.py2
-rwxr-xr-xsrc/conf_mode/protocols_ospf.py15
-rwxr-xr-xsrc/conf_mode/protocols_pim.py207
-rwxr-xr-xsrc/conf_mode/protocols_pim6.py57
-rwxr-xr-xsrc/conf_mode/protocols_rpki.py6
-rwxr-xr-xsrc/conf_mode/protocols_segment-routing.py118
-rwxr-xr-xsrc/conf_mode/protocols_static_arp.py (renamed from src/conf_mode/arp.py)0
-rwxr-xr-xsrc/conf_mode/protocols_static_neighbor-proxy.py95
-rwxr-xr-xsrc/conf_mode/qos.py4
-rwxr-xr-xsrc/conf_mode/service_broadcast-relay.py (renamed from src/conf_mode/bcast_relay.py)0
-rwxr-xr-xsrc/conf_mode/service_config-sync.py (renamed from src/conf_mode/service_config_sync.py)0
-rwxr-xr-xsrc/conf_mode/service_conntrack-sync.py (renamed from src/conf_mode/conntrack_sync.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcp-relay.py (renamed from src/conf_mode/dhcp_relay.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcp-server.py (renamed from src/conf_mode/dhcp_server.py)184
-rwxr-xr-xsrc/conf_mode/service_dhcpv6-relay.py (renamed from src/conf_mode/dhcpv6_relay.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcpv6-server.py (renamed from src/conf_mode/dhcpv6_server.py)117
-rwxr-xr-xsrc/conf_mode/service_dns_dynamic.py192
-rwxr-xr-xsrc/conf_mode/service_dns_forwarding.py (renamed from src/conf_mode/dns_forwarding.py)36
-rwxr-xr-xsrc/conf_mode/service_event-handler.py (renamed from src/conf_mode/service_event_handler.py)0
-rwxr-xr-xsrc/conf_mode/service_https.py238
-rwxr-xr-xsrc/conf_mode/service_ids_ddos-protection.py (renamed from src/conf_mode/service_ids_fastnetmon.py)0
-rwxr-xr-xsrc/conf_mode/service_ipoe-server.py107
-rwxr-xr-xsrc/conf_mode/service_lldp.py (renamed from src/conf_mode/lldp.py)5
-rwxr-xr-xsrc/conf_mode/service_mdns_repeater.py (renamed from src/conf_mode/service_mdns-repeater.py)14
-rwxr-xr-xsrc/conf_mode/service_ndp-proxy.py91
-rwxr-xr-xsrc/conf_mode/service_ntp.py (renamed from src/conf_mode/ntp.py)4
-rwxr-xr-xsrc/conf_mode/service_pppoe-server.py26
-rwxr-xr-xsrc/conf_mode/service_salt-minion.py (renamed from src/conf_mode/salt-minion.py)0
-rwxr-xr-xsrc/conf_mode/service_snmp.py (renamed from src/conf_mode/snmp.py)16
-rwxr-xr-xsrc/conf_mode/service_ssh.py (renamed from src/conf_mode/ssh.py)0
-rwxr-xr-xsrc/conf_mode/service_tftp-server.py (renamed from src/conf_mode/tftp_server.py)0
-rwxr-xr-xsrc/conf_mode/system_acceleration.py (renamed from src/conf_mode/intel_qat.py)0
-rwxr-xr-xsrc/conf_mode/system_config-management.py (renamed from src/conf_mode/config_mgmt.py)0
-rwxr-xr-xsrc/conf_mode/system_conntrack.py (renamed from src/conf_mode/conntrack.py)7
-rwxr-xr-xsrc/conf_mode/system_console.py26
-rwxr-xr-xsrc/conf_mode/system_flow-accounting.py (renamed from src/conf_mode/flow_accounting_conf.py)34
-rwxr-xr-xsrc/conf_mode/system_frr.py4
-rwxr-xr-xsrc/conf_mode/system_host-name.py (renamed from src/conf_mode/host_name.py)5
-rwxr-xr-xsrc/conf_mode/system_ip.py (renamed from src/conf_mode/system-ip.py)0
-rwxr-xr-xsrc/conf_mode/system_ipv6.py (renamed from src/conf_mode/system-ipv6.py)0
-rwxr-xr-xsrc/conf_mode/system_login.py (renamed from src/conf_mode/system-login.py)19
-rwxr-xr-xsrc/conf_mode/system_login_banner.py (renamed from src/conf_mode/system-login-banner.py)0
-rwxr-xr-xsrc/conf_mode/system_logs.py (renamed from src/conf_mode/system-logs.py)0
-rwxr-xr-xsrc/conf_mode/system_option.py (renamed from src/conf_mode/system-option.py)11
-rwxr-xr-xsrc/conf_mode/system_proxy.py (renamed from src/conf_mode/system-proxy.py)0
-rwxr-xr-xsrc/conf_mode/system_sflow.py11
-rwxr-xr-xsrc/conf_mode/system_syslog.py (renamed from src/conf_mode/system-syslog.py)0
-rwxr-xr-xsrc/conf_mode/system_task-scheduler.py (renamed from src/conf_mode/task_scheduler.py)0
-rwxr-xr-xsrc/conf_mode/system_timezone.py (renamed from src/conf_mode/system-timezone.py)0
-rwxr-xr-xsrc/conf_mode/system_update-check.py (renamed from src/conf_mode/system_update_check.py)0
-rwxr-xr-xsrc/conf_mode/vpn_ipsec.py33
-rwxr-xr-xsrc/conf_mode/vpn_l2tp.py382
-rwxr-xr-xsrc/conf_mode/vpn_openconnect.py10
-rwxr-xr-xsrc/conf_mode/vpn_pptp.py256
-rwxr-xr-xsrc/conf_mode/vpn_sstp.py22
-rwxr-xr-xsrc/conf_mode/vpp.py207
-rwxr-xr-xsrc/conf_mode/vrf.py45
98 files changed, 2656 insertions, 2468 deletions
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py
index daad9186e..321d00abf 100755
--- a/src/conf_mode/container.py
+++ b/src/conf_mode/container.py
@@ -142,11 +142,17 @@ def verify(container):
for address in container_config['network'][network_name]['address']:
network = None
if is_ipv4(address):
- network = [x for x in container['network'][network_name]['prefix'] if is_ipv4(x)][0]
- cnt_ipv4 += 1
+ try:
+ network = [x for x in container['network'][network_name]['prefix'] if is_ipv4(x)][0]
+ cnt_ipv4 += 1
+ except:
+ raise ConfigError(f'Network "{network_name}" does not contain an IPv4 prefix!')
elif is_ipv6(address):
- network = [x for x in container['network'][network_name]['prefix'] if is_ipv6(x)][0]
- cnt_ipv6 += 1
+ try:
+ network = [x for x in container['network'][network_name]['prefix'] if is_ipv6(x)][0]
+ cnt_ipv6 += 1
+ except:
+ raise ConfigError(f'Network "{network_name}" does not contain an IPv6 prefix!')
# Specified container IP address must belong to network prefix
if ip_address(address) not in ip_network(network):
@@ -208,6 +214,10 @@ def verify(container):
if {'allow_host_networks', 'network'} <= set(container_config):
raise ConfigError(f'"allow-host-networks" and "network" for "{name}" cannot be both configured at the same time!')
+ # gid cannot be set without uid
+ if 'gid' in container_config and 'uid' not in container_config:
+ raise ConfigError(f'Cannot set "gid" without "uid" for container')
+
# Add new network
if 'network' in container:
for network, network_config in container['network'].items():
@@ -232,9 +242,9 @@ def verify(container):
# A network attached to a container can not be deleted
if {'network_remove', 'name'} <= set(container):
for network in container['network_remove']:
- for container, container_config in container['name'].items():
- if 'network' in container_config and network in container_config['network']:
- raise ConfigError(f'Can not remove network "{network}", used by container "{container}"!')
+ for c, c_config in container['name'].items():
+ if 'network' in c_config and network in c_config['network']:
+ raise ConfigError(f'Can not remove network "{network}", used by container "{c}"!')
if 'registry' in container:
for registry, registry_config in container['registry'].items():
@@ -302,6 +312,14 @@ def generate_run_arguments(name, container_config):
# If listen_addresses is empty, just include the standard publish command
port += f' --publish {sport}:{dport}/{protocol}'
+ # Set uid and gid
+ uid = ''
+ if 'uid' in container_config:
+ uid = container_config['uid']
+ if 'gid' in container_config:
+ uid += ':' + container_config['gid']
+ uid = f'--user {uid}'
+
# Bind volume
volume = ''
if 'volume' in container_config:
@@ -314,7 +332,7 @@ def generate_run_arguments(name, container_config):
container_base_cmd = f'--detach --interactive --tty --replace {cap_add} ' \
f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \
- f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label}'
+ f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid}'
entrypoint = ''
if 'entrypoint' in container_config:
@@ -349,7 +367,7 @@ def generate_run_arguments(name, container_config):
else:
ip_param += f' --ip {address}'
- return f'{container_base_cmd} --net {networks} {ip_param} {entrypoint} {image} {command} {command_arguments}'.strip()
+ return f'{container_base_cmd} --no-healthcheck --net {networks} {ip_param} {entrypoint} {image} {command} {command_arguments}'.strip()
def generate(container):
# bail out early - looks like removal from running config
diff --git a/src/conf_mode/dns_dynamic.py b/src/conf_mode/dns_dynamic.py
deleted file mode 100755
index 874c4b689..000000000
--- a/src/conf_mode/dns_dynamic.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2018-2023 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from sys import exit
-
-from vyos.config import Config
-from vyos.configverify import verify_interface_exists
-from vyos.template import render
-from vyos.utils.process import call
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-config_file = r'/run/ddclient/ddclient.conf'
-systemd_override = r'/run/systemd/system/ddclient.service.d/override.conf'
-
-# Protocols that require zone
-zone_necessary = ['cloudflare', 'godaddy', 'hetzner', 'gandi', 'nfsn']
-
-# Protocols that do not require username
-username_unnecessary = ['1984', 'cloudflare', 'cloudns', 'duckdns', 'freemyip', 'hetzner', 'keysystems', 'njalla']
-
-# Protocols that support TTL
-ttl_supported = ['cloudflare', 'gandi', 'hetzner', 'dnsexit', 'godaddy', 'nfsn']
-
-# Protocols that support both IPv4 and IPv6
-dualstack_supported = ['cloudflare', 'dyndns2', 'freedns', 'njalla']
-
-# dyndns2 protocol in ddclient honors dual stack for selective servers
-# because of the way it is implemented in ddclient
-dyndns_dualstack_servers = ['members.dyndns.org', 'dynv6.com']
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['service', 'dns', 'dynamic']
- if not conf.exists(base):
- return None
-
- dyndns = conf.get_config_dict(base, key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True,
- with_recursive_defaults=True)
-
- dyndns['config_file'] = config_file
- return dyndns
-
-def verify(dyndns):
- # bail out early - looks like removal from running config
- if not dyndns or 'address' not in dyndns:
- return None
-
- for address in dyndns['address']:
- # If dyndns address is an interface, ensure it exists
- if address != 'web':
- verify_interface_exists(address)
-
- # RFC2136 - configuration validation
- if 'rfc2136' in dyndns['address'][address]:
- for config in dyndns['address'][address]['rfc2136'].values():
- for field in ['host_name', 'zone', 'server', 'key']:
- if field not in config:
- raise ConfigError(f'"{field.replace("_", "-")}" is required for RFC2136 '
- f'based Dynamic DNS service on "{address}"')
-
- # Dynamic DNS service provider - configuration validation
- if 'service' in dyndns['address'][address]:
- for service, config in dyndns['address'][address]['service'].items():
- error_msg = f'is required for Dynamic DNS service "{service}" on "{address}"'
-
- for field in ['host_name', 'password', 'protocol']:
- if field not in config:
- raise ConfigError(f'"{field.replace("_", "-")}" {error_msg}')
-
- if config['protocol'] in zone_necessary and 'zone' not in config:
- raise ConfigError(f'"zone" {error_msg}')
-
- if config['protocol'] not in zone_necessary and 'zone' in config:
- raise ConfigError(f'"{config["protocol"]}" does not support "zone"')
-
- if config['protocol'] not in username_unnecessary and 'username' not in config:
- raise ConfigError(f'"username" {error_msg}')
-
- if config['protocol'] not in ttl_supported and 'ttl' in config:
- raise ConfigError(f'"{config["protocol"]}" does not support "ttl"')
-
- if config['ip_version'] == 'both':
- if config['protocol'] not in dualstack_supported:
- raise ConfigError(f'"{config["protocol"]}" does not support '
- f'both IPv4 and IPv6 at the same time')
- # dyndns2 protocol in ddclient honors dual stack only for dyn.com (dyndns.org)
- if config['protocol'] == 'dyndns2' and 'server' in config and config['server'] not in dyndns_dualstack_servers:
- raise ConfigError(f'"{config["protocol"]}" does not support '
- f'both IPv4 and IPv6 at the same time for "{config["server"]}"')
-
- if {'wait_time', 'expiry_time'} <= config.keys() and int(config['expiry_time']) < int(config['wait_time']):
- raise ConfigError(f'"expiry-time" must be greater than "wait-time"')
-
- return None
-
-def generate(dyndns):
- # bail out early - looks like removal from running config
- if not dyndns or 'address' not in dyndns:
- return None
-
- render(config_file, 'dns-dynamic/ddclient.conf.j2', dyndns, permission=0o600)
- render(systemd_override, 'dns-dynamic/override.conf.j2', dyndns)
- return None
-
-def apply(dyndns):
- systemd_service = 'ddclient.service'
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
-
- # bail out early - looks like removal from running config
- if not dyndns or 'address' not in dyndns:
- call(f'systemctl stop {systemd_service}')
- if os.path.exists(config_file):
- os.unlink(config_file)
- else:
- call(f'systemctl reload-or-restart {systemd_service}')
-
- return None
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py
index f6480ab0a..acb7dfa41 100755
--- a/src/conf_mode/firewall.py
+++ b/src/conf_mode/firewall.py
@@ -23,7 +23,7 @@ from sys import exit
from vyos.base import Warning
from vyos.config import Config
-from vyos.configdict import node_changed
+from vyos.configdict import is_node_changed
from vyos.configdiff import get_config_diff, Diff
from vyos.configdep import set_dependents, call_dependents
from vyos.configverify import verify_interface_exists
@@ -42,9 +42,6 @@ from vyos import airbag
airbag.enable()
-nat_conf_script = 'nat.py'
-policy_route_conf_script = 'policy-route.py'
-
nftables_conf = '/run/nftables.conf'
sysfs_config = {
@@ -133,7 +130,7 @@ def get_config(config=None):
with_recursive_defaults=True)
- firewall['group_resync'] = bool('group' in firewall or node_changed(conf, base + ['group']))
+ firewall['group_resync'] = bool('group' in firewall or is_node_changed(conf, base + ['group']))
if firewall['group_resync']:
# Update nat and policy-route as firewall groups were updated
set_dependents('group_resync', conf)
@@ -272,7 +269,7 @@ def verify_rule(firewall, rule_conf, ipv6):
raise ConfigError(f'{side} port-group and port cannot both be defined')
if 'log_options' in rule_conf:
- if 'log' not in rule_conf or 'enable' not in rule_conf['log']:
+ if 'log' not in rule_conf:
raise ConfigError('log-options defined, but log is not enable')
if 'snapshot_length' in rule_conf['log_options'] and 'group' not in rule_conf['log_options']:
@@ -283,8 +280,8 @@ def verify_rule(firewall, rule_conf, ipv6):
for direction in ['inbound_interface','outbound_interface']:
if direction in rule_conf:
- if 'interface_name' in rule_conf[direction] and 'interface_group' in rule_conf[direction]:
- raise ConfigError(f'Cannot specify both interface-group and interface-name for {direction}')
+ if 'name' in rule_conf[direction] and 'group' in rule_conf[direction]:
+ raise ConfigError(f'Cannot specify both interface group and interface name for {direction}')
def verify_nested_group(group_name, group, groups, seen):
if 'include' not in group:
@@ -374,12 +371,82 @@ def verify(firewall):
for rule_id, rule_conf in name_conf['rule'].items():
verify_rule(firewall, rule_conf, True)
+ #### ZONESSSS
+ local_zone = False
+ zone_interfaces = []
+
+ if 'zone' in firewall:
+ for zone, zone_conf in firewall['zone'].items():
+ if 'local_zone' not in zone_conf and 'interface' not in zone_conf:
+ raise ConfigError(f'Zone "{zone}" has no interfaces and is not the local zone')
+
+ if 'local_zone' in zone_conf:
+ if local_zone:
+ raise ConfigError('There cannot be multiple local zones')
+ if 'interface' in zone_conf:
+ raise ConfigError('Local zone cannot have interfaces assigned')
+ if 'intra_zone_filtering' in zone_conf:
+ raise ConfigError('Local zone cannot use intra-zone-filtering')
+ local_zone = True
+
+ if 'interface' in zone_conf:
+ found_duplicates = [intf for intf in zone_conf['interface'] if intf in zone_interfaces]
+
+ if found_duplicates:
+ raise ConfigError(f'Interfaces cannot be assigned to multiple zones')
+
+ zone_interfaces += zone_conf['interface']
+
+ if 'intra_zone_filtering' in zone_conf:
+ intra_zone = zone_conf['intra_zone_filtering']
+
+ if len(intra_zone) > 1:
+ raise ConfigError('Only one intra-zone-filtering action must be specified')
+
+ if 'firewall' in intra_zone:
+ v4_name = dict_search_args(intra_zone, 'firewall', 'name')
+ if v4_name and not dict_search_args(firewall, 'ipv4', 'name', v4_name):
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ v6_name = dict_search_args(intra_zone, 'firewall', 'ipv6_name')
+ if v6_name and not dict_search_args(firewall, 'ipv6', 'name', v6_name):
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
+ if not v4_name and not v6_name:
+ raise ConfigError('No firewall names specified for intra-zone-filtering')
+
+ if 'from' in zone_conf:
+ for from_zone, from_conf in zone_conf['from'].items():
+ if from_zone not in firewall['zone']:
+ raise ConfigError(f'Zone "{zone}" refers to a non-existent or deleted zone "{from_zone}"')
+
+ v4_name = dict_search_args(from_conf, 'firewall', 'name')
+ if v4_name and not dict_search_args(firewall, 'ipv4', 'name', v4_name):
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ v6_name = dict_search_args(from_conf, 'firewall', 'ipv6_name')
+ if v6_name and not dict_search_args(firewall, 'ipv6', 'name', v6_name):
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
return None
def generate(firewall):
if not os.path.exists(nftables_conf):
firewall['first_install'] = True
+ if 'zone' in firewall:
+ for local_zone, local_zone_conf in firewall['zone'].items():
+ if 'local_zone' not in local_zone_conf:
+ continue
+
+ local_zone_conf['from_local'] = {}
+
+ for zone, zone_conf in firewall['zone'].items():
+ if zone == local_zone or 'from' not in zone_conf:
+ continue
+ if local_zone in zone_conf['from']:
+ local_zone_conf['from_local'][zone] = zone_conf['from'][local_zone]
+
render(nftables_conf, 'firewall/nftables.j2', firewall)
return None
diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py
deleted file mode 100755
index 793a90d88..000000000
--- a/src/conf_mode/http-api.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import os
-import json
-
-from time import sleep
-from copy import deepcopy
-
-import vyos.defaults
-
-from vyos.config import Config
-from vyos.configdep import set_dependents, call_dependents
-from vyos.template import render
-from vyos.utils.process import call
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-api_conf_file = '/etc/vyos/http-api.conf'
-systemd_service = '/run/systemd/system/vyos-http-api.service'
-
-vyos_conf_scripts_dir=vyos.defaults.directories['conf_mode']
-
-def _translate_values_to_boolean(d: dict) -> dict:
- for k in list(d):
- if d[k] == {}:
- d[k] = True
- elif isinstance(d[k], dict):
- _translate_values_to_boolean(d[k])
- else:
- pass
-
-def get_config(config=None):
- http_api = deepcopy(vyos.defaults.api_data)
- x = http_api.get('api_keys')
- if x is None:
- default_key = None
- else:
- default_key = x[0]
- keys_added = False
-
- if config:
- conf = config
- else:
- conf = Config()
-
- # reset on creation/deletion of 'api' node
- https_base = ['service', 'https']
- if conf.exists(https_base):
- set_dependents("https", conf)
-
- base = ['service', 'https', 'api']
- if not conf.exists(base):
- return None
-
- api_dict = conf.get_config_dict(base, key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True,
- with_recursive_defaults=True)
-
- # One needs to 'flatten' the keys dict from the config into the
- # http-api.conf format for api_keys:
- if 'keys' in api_dict:
- api_dict['api_keys'] = []
- for el in list(api_dict['keys'].get('id', {})):
- key = api_dict['keys']['id'][el].get('key', '')
- if key:
- api_dict['api_keys'].append({'id': el, 'key': key})
- del api_dict['keys']
-
- # Do we run inside a VRF context?
- vrf_path = ['service', 'https', 'vrf']
- if conf.exists(vrf_path):
- http_api['vrf'] = conf.return_value(vrf_path)
-
- if 'api_keys' in api_dict:
- keys_added = True
-
- if api_dict.from_defaults(['graphql']):
- del api_dict['graphql']
-
- http_api.update(api_dict)
-
- if keys_added and default_key:
- if default_key in http_api['api_keys']:
- http_api['api_keys'].remove(default_key)
-
- # Finally, translate entries in http_api into boolean settings for
- # backwards compatability of JSON http-api.conf file
- _translate_values_to_boolean(http_api)
-
- return http_api
-
-def verify(http_api):
- return None
-
-def generate(http_api):
- if http_api is None:
- if os.path.exists(systemd_service):
- os.unlink(systemd_service)
- return None
-
- if not os.path.exists('/etc/vyos'):
- os.mkdir('/etc/vyos')
-
- with open(api_conf_file, 'w') as f:
- json.dump(http_api, f, indent=2)
-
- render(systemd_service, 'https/vyos-http-api.service.j2', http_api)
- return None
-
-def apply(http_api):
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
- service_name = 'vyos-http-api.service'
-
- if http_api is not None:
- call(f'systemctl restart {service_name}')
- else:
- call(f'systemctl stop {service_name}')
-
- # Let uvicorn settle before restarting Nginx
- sleep(1)
-
- call_dependents()
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- sys.exit(1)
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
deleted file mode 100755
index 010490c7e..000000000
--- a/src/conf_mode/https.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2022 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-
-from copy import deepcopy
-
-import vyos.defaults
-import vyos.certbot_util
-
-from vyos.config import Config
-from vyos.configverify import verify_vrf
-from vyos import ConfigError
-from vyos.pki import wrap_certificate
-from vyos.pki import wrap_private_key
-from vyos.template import render
-from vyos.utils.process import call
-from vyos.utils.network import check_port_availability
-from vyos.utils.network import is_listen_port_bind_service
-from vyos.utils.file import write_file
-
-from vyos import airbag
-airbag.enable()
-
-config_file = '/etc/nginx/sites-available/default'
-systemd_override = r'/run/systemd/system/nginx.service.d/override.conf'
-cert_dir = '/etc/ssl/certs'
-key_dir = '/etc/ssl/private'
-certbot_dir = vyos.defaults.directories['certbot']
-
-# https config needs to coordinate several subsystems: api, certbot,
-# self-signed certificate, as well as the virtual hosts defined within the
-# https config definition itself. Consequently, one needs a general dict,
-# encompassing the https and other configs, and a list of such virtual hosts
-# (server blocks in nginx terminology) to pass to the jinja2 template.
-default_server_block = {
- 'id' : '',
- 'address' : '*',
- 'port' : '443',
- 'name' : ['_'],
- 'api' : {},
- 'vyos_cert' : {},
- 'certbot' : False
-}
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['service', 'https']
- if not conf.exists(base):
- return None
-
- https = conf.get_config_dict(base, get_first_key=True)
-
- if https:
- https['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- return https
-
-def verify(https):
- if https is None:
- return None
-
- if 'certificates' in https:
- certificates = https['certificates']
-
- if 'certificate' in certificates:
- if not https['pki']:
- raise ConfigError("PKI is not configured")
-
- cert_name = certificates['certificate']
-
- if cert_name not in https['pki']['certificate']:
- raise ConfigError("Invalid certificate on https configuration")
-
- pki_cert = https['pki']['certificate'][cert_name]
-
- if 'certificate' not in pki_cert:
- raise ConfigError("Missing certificate on https configuration")
-
- if 'private' not in pki_cert or 'key' not in pki_cert['private']:
- raise ConfigError("Missing certificate private key on https configuration")
-
- if 'certbot' in https['certificates']:
- vhost_names = []
- for vh, vh_conf in https.get('virtual-host', {}).items():
- vhost_names += vh_conf.get('server-name', [])
- domains = https['certificates']['certbot'].get('domain-name', [])
- domains_found = [domain for domain in domains if domain in vhost_names]
- if not domains_found:
- raise ConfigError("At least one 'virtual-host <id> server-name' "
- "matching the 'certbot domain-name' is required.")
-
- server_block_list = []
-
- # organize by vhosts
- vhost_dict = https.get('virtual-host', {})
-
- if not vhost_dict:
- # no specified virtual hosts (server blocks); use default
- server_block_list.append(default_server_block)
- else:
- for vhost in list(vhost_dict):
- server_block = deepcopy(default_server_block)
- data = vhost_dict.get(vhost, {})
- server_block['address'] = data.get('listen-address', '*')
- server_block['port'] = data.get('listen-port', '443')
- server_block_list.append(server_block)
-
- for entry in server_block_list:
- _address = entry.get('address')
- _address = '0.0.0.0' if _address == '*' else _address
- _port = entry.get('port')
- proto = 'tcp'
- if check_port_availability(_address, int(_port), proto) is not True and \
- not is_listen_port_bind_service(int(_port), 'nginx'):
- raise ConfigError(f'"{proto}" port "{_port}" is used by another service')
-
- verify_vrf(https)
- return None
-
-def generate(https):
- if https is None:
- return None
-
- server_block_list = []
-
- # organize by vhosts
-
- vhost_dict = https.get('virtual-host', {})
-
- if not vhost_dict:
- # no specified virtual hosts (server blocks); use default
- server_block_list.append(default_server_block)
- else:
- for vhost in list(vhost_dict):
- server_block = deepcopy(default_server_block)
- server_block['id'] = vhost
- data = vhost_dict.get(vhost, {})
- server_block['address'] = data.get('listen-address', '*')
- server_block['port'] = data.get('listen-port', '443')
- name = data.get('server-name', ['_'])
- server_block['name'] = name
- allow_client = data.get('allow-client', {})
- server_block['allow_client'] = allow_client.get('address', [])
- server_block_list.append(server_block)
-
- # get certificate data
-
- cert_dict = https.get('certificates', {})
-
- if 'certificate' in cert_dict:
- cert_name = cert_dict['certificate']
- pki_cert = https['pki']['certificate'][cert_name]
-
- cert_path = os.path.join(cert_dir, f'{cert_name}.pem')
- key_path = os.path.join(key_dir, f'{cert_name}.pem')
-
- server_cert = str(wrap_certificate(pki_cert['certificate']))
- if 'ca-certificate' in cert_dict:
- ca_cert = cert_dict['ca-certificate']
- server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate']))
-
- write_file(cert_path, server_cert)
- write_file(key_path, wrap_private_key(pki_cert['private']['key']))
-
- vyos_cert_data = {
- 'crt': cert_path,
- 'key': key_path
- }
-
- for block in server_block_list:
- block['vyos_cert'] = vyos_cert_data
-
- # letsencrypt certificate using certbot
-
- certbot = False
- cert_domains = cert_dict.get('certbot', {}).get('domain-name', [])
- if cert_domains:
- certbot = True
- for domain in cert_domains:
- sub_list = vyos.certbot_util.choose_server_block(server_block_list,
- domain)
- if sub_list:
- for sb in sub_list:
- sb['certbot'] = True
- sb['certbot_dir'] = certbot_dir
- # certbot organizes certificates by first domain
- sb['certbot_domain_dir'] = cert_domains[0]
-
- # get api data
-
- api_set = False
- api_data = {}
- if 'api' in list(https):
- api_set = True
- api_data = vyos.defaults.api_data
- api_settings = https.get('api', {})
- if api_settings:
- port = api_settings.get('port', '')
- if port:
- api_data['port'] = port
- vhosts = https.get('api-restrict', {}).get('virtual-host', [])
- if vhosts:
- api_data['vhost'] = vhosts[:]
- if 'socket' in list(api_settings):
- api_data['socket'] = True
-
- if api_data:
- vhost_list = api_data.get('vhost', [])
- if not vhost_list:
- for block in server_block_list:
- block['api'] = api_data
- else:
- for block in server_block_list:
- if block['id'] in vhost_list:
- block['api'] = api_data
-
- if 'server_block_list' not in https or not https['server_block_list']:
- https['server_block_list'] = [default_server_block]
-
- data = {
- 'server_block_list': server_block_list,
- 'api_set': api_set,
- 'certbot': certbot
- }
-
- render(config_file, 'https/nginx.default.j2', data)
- render(systemd_override, 'https/override.conf.j2', https)
- return None
-
-def apply(https):
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
- if https is not None:
- call('systemctl restart nginx.service')
- else:
- call('systemctl stop nginx.service')
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- sys.exit(1)
diff --git a/src/conf_mode/interfaces-ethernet.py b/src/conf_mode/interfaces-ethernet.py
deleted file mode 100755
index f3e65ad5e..000000000
--- a/src/conf_mode/interfaces-ethernet.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from glob import glob
-from sys import exit
-
-from vyos.base import Warning
-from vyos.config import Config
-from vyos.configdict import get_interface_dict
-from vyos.configdict import is_node_changed
-from vyos.configverify import verify_address
-from vyos.configverify import verify_dhcpv6
-from vyos.configverify import verify_eapol
-from vyos.configverify import verify_interface_exists
-from vyos.configverify import verify_mirror_redirect
-from vyos.configverify import verify_mtu
-from vyos.configverify import verify_mtu_ipv6
-from vyos.configverify import verify_vlan_config
-from vyos.configverify import verify_vrf
-from vyos.configverify import verify_bond_bridge_member
-from vyos.ethtool import Ethtool
-from vyos.ifconfig import EthernetIf
-from vyos.pki import find_chain
-from vyos.pki import encode_certificate
-from vyos.pki import load_certificate
-from vyos.pki import wrap_private_key
-from vyos.template import render
-from vyos.utils.process import call
-from vyos.utils.dict import dict_search
-from vyos.utils.file import write_file
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-# XXX: wpa_supplicant works on the source interface
-cfg_dir = '/run/wpa_supplicant'
-wpa_suppl_conf = '/run/wpa_supplicant/{ifname}.conf'
-
-def get_config(config=None):
- """
- Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
- interface name will be added or a deleted flag
- """
- if config:
- conf = config
- else:
- conf = Config()
-
- # This must be called prior to get_interface_dict(), as this function will
- # alter the config level (config.set_level())
- pki = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- base = ['interfaces', 'ethernet']
- ifname, ethernet = get_interface_dict(conf, base)
-
- if 'deleted' not in ethernet:
- if pki: ethernet['pki'] = pki
-
- tmp = is_node_changed(conf, base + [ifname, 'speed'])
- if tmp: ethernet.update({'speed_duplex_changed': {}})
-
- tmp = is_node_changed(conf, base + [ifname, 'duplex'])
- if tmp: ethernet.update({'speed_duplex_changed': {}})
-
- return ethernet
-
-def verify(ethernet):
- if 'deleted' in ethernet:
- return None
-
- ifname = ethernet['ifname']
- verify_interface_exists(ifname)
- verify_mtu(ethernet)
- verify_mtu_ipv6(ethernet)
- verify_dhcpv6(ethernet)
- verify_address(ethernet)
- verify_vrf(ethernet)
- verify_bond_bridge_member(ethernet)
- verify_eapol(ethernet)
- verify_mirror_redirect(ethernet)
-
- ethtool = Ethtool(ifname)
- # No need to check speed and duplex keys as both have default values.
- if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or
- (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')):
- raise ConfigError('Speed/Duplex missmatch. Must be both auto or manually configured')
-
- if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto':
- # We need to verify if the requested speed and duplex setting is
- # supported by the underlaying NIC.
- speed = ethernet['speed']
- duplex = ethernet['duplex']
- if not ethtool.check_speed_duplex(speed, duplex):
- raise ConfigError(f'Adapter does not support changing speed and duplex '\
- f'settings to: {speed}/{duplex}!')
-
- if 'disable_flow_control' in ethernet:
- if not ethtool.check_flow_control():
- raise ConfigError('Adapter does not support changing flow-control settings!')
-
- if 'ring_buffer' in ethernet:
- max_rx = ethtool.get_ring_buffer_max('rx')
- if not max_rx:
- raise ConfigError('Driver does not support RX ring-buffer configuration!')
-
- max_tx = ethtool.get_ring_buffer_max('tx')
- if not max_tx:
- raise ConfigError('Driver does not support TX ring-buffer configuration!')
-
- rx = dict_search('ring_buffer.rx', ethernet)
- if rx and int(rx) > int(max_rx):
- raise ConfigError(f'Driver only supports a maximum RX ring-buffer '\
- f'size of "{max_rx}" bytes!')
-
- tx = dict_search('ring_buffer.tx', ethernet)
- if tx and int(tx) > int(max_tx):
- raise ConfigError(f'Driver only supports a maximum TX ring-buffer '\
- f'size of "{max_tx}" bytes!')
-
- # verify offloading capabilities
- if dict_search('offload.rps', ethernet) != None:
- if not os.path.exists(f'/sys/class/net/{ifname}/queues/rx-0/rps_cpus'):
- raise ConfigError('Interface does not suport RPS!')
-
- driver = ethtool.get_driver_name()
- # T3342 - Xen driver requires special treatment
- if driver == 'vif':
- if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None:
- raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\
- 'for MTU size larger then 1500 bytes')
-
- if {'is_bond_member', 'mac'} <= set(ethernet):
- Warning(f'changing mac address "{mac}" will be ignored as "{ifname}" ' \
- f'is a member of bond "{is_bond_member}"'.format(**ethernet))
-
- # use common function to verify VLAN configuration
- verify_vlan_config(ethernet)
- return None
-
-def generate(ethernet):
- # render real configuration file once
- wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet)
-
- if 'deleted' in ethernet:
- # delete configuration on interface removal
- if os.path.isfile(wpa_supplicant_conf):
- os.unlink(wpa_supplicant_conf)
- return None
-
- if 'eapol' in ethernet:
- ifname = ethernet['ifname']
-
- render(wpa_supplicant_conf, 'ethernet/wpa_supplicant.conf.j2', ethernet)
-
- cert_file_path = os.path.join(cfg_dir, f'{ifname}_cert.pem')
- cert_key_path = os.path.join(cfg_dir, f'{ifname}_cert.key')
-
- cert_name = ethernet['eapol']['certificate']
- pki_cert = ethernet['pki']['certificate'][cert_name]
-
- loaded_pki_cert = load_certificate(pki_cert['certificate'])
- loaded_ca_certs = {load_certificate(c['certificate'])
- for c in ethernet['pki']['ca'].values()} if 'ca' in ethernet['pki'] else {}
-
- cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
-
- write_file(cert_file_path,
- '\n'.join(encode_certificate(c) for c in cert_full_chain))
- write_file(cert_key_path, wrap_private_key(pki_cert['private']['key']))
-
- if 'ca_certificate' in ethernet['eapol']:
- ca_cert_file_path = os.path.join(cfg_dir, f'{ifname}_ca.pem')
- ca_chains = []
-
- for ca_cert_name in ethernet['eapol']['ca_certificate']:
- pki_ca_cert = ethernet['pki']['ca'][ca_cert_name]
- loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
- ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
- ca_chains.append('\n'.join(encode_certificate(c) for c in ca_full_chain))
-
- write_file(ca_cert_file_path, '\n'.join(ca_chains))
-
- return None
-
-def apply(ethernet):
- ifname = ethernet['ifname']
- # take care about EAPoL supplicant daemon
- eapol_action='stop'
-
- e = EthernetIf(ifname)
- if 'deleted' in ethernet:
- # delete interface
- e.remove()
- else:
- e.update(ethernet)
- if 'eapol' in ethernet:
- eapol_action='reload-or-restart'
-
- call(f'systemctl {eapol_action} wpa_supplicant-wired@{ifname}')
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/interfaces-bonding.py b/src/conf_mode/interfaces_bonding.py
index 0bd306ed0..8184d8415 100755
--- a/src/conf_mode/interfaces-bonding.py
+++ b/src/conf_mode/interfaces_bonding.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2022 VyOS maintainers and contributors
+# Copyright (C) 2019-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -18,7 +18,6 @@ import os
from sys import exit
from netifaces import interfaces
-
from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
@@ -34,11 +33,16 @@ from vyos.configverify import verify_source_interface
from vyos.configverify import verify_vlan_config
from vyos.configverify import verify_vrf
from vyos.ifconfig import BondIf
+from vyos.ifconfig.ethernet import EthernetIf
from vyos.ifconfig import Section
+from vyos.template import render_to_string
from vyos.utils.dict import dict_search
+from vyos.utils.dict import dict_to_paths_values
from vyos.configdict import has_address_configured
from vyos.configdict import has_vrf_configured
+from vyos.configdep import set_dependents, call_dependents
from vyos import ConfigError
+from vyos import frr
from vyos import airbag
airbag.enable()
@@ -90,7 +94,6 @@ def get_config(config=None):
# determine which members have been removed
interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface'])
-
# Reset config level to interfaces
old_level = conf.get_level()
conf.set_level(['interfaces'])
@@ -102,6 +105,10 @@ def get_config(config=None):
tmp = {}
for interface in interfaces_removed:
+ # if member is deleted from bond, add dependencies to call
+ # ethernet commit again in apply function
+ # to apply options under ethernet section
+ set_dependents('ethernet', conf, interface)
section = Section.section(interface) # this will be 'ethernet' for 'eth0'
if conf.exists([section, interface, 'disable']):
tmp[interface] = {'disable': ''}
@@ -116,9 +123,21 @@ def get_config(config=None):
if dict_search('member.interface', bond):
for interface, interface_config in bond['member']['interface'].items():
+
+ interface_ethernet_config = conf.get_config_dict(
+ ['interfaces', 'ethernet', interface],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=False,
+ with_recursive_defaults=False)
+
+ interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config)
+
# Check if member interface is a new member
if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]):
bond['shutdown_required'] = {}
+ interface_config['new_added'] = {}
# Check if member interface is disabled
conf.set_level(['interfaces'])
@@ -151,7 +170,6 @@ def get_config(config=None):
# bond members must not have a VRF attached
tmp = has_vrf_configured(conf, interface)
if tmp: interface_config['has_vrf'] = {}
-
return bond
@@ -212,6 +230,14 @@ def verify(bond):
if 'has_vrf' in interface_config:
raise ConfigError(error_msg + 'it has a VRF assigned!')
+ if 'new_added' in interface_config and 'config_paths' in interface_config:
+ for option_path, option_value in interface_config['config_paths'].items():
+ if option_path in EthernetIf.get_bond_member_allowed_options() :
+ continue
+ if option_path in BondIf.get_inherit_bond_options():
+ continue
+ raise ConfigError(error_msg + f'it has a "{option_path.replace(".", " ")}" assigned!')
+
if 'primary' in bond:
if bond['primary'] not in bond['member']['interface']:
raise ConfigError(f'Primary interface of bond "{bond_name}" must be a member interface')
@@ -223,17 +249,38 @@ def verify(bond):
return None
def generate(bond):
+ bond['frr_zebra_config'] = ''
+ if 'deleted' not in bond:
+ bond['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', bond)
return None
def apply(bond):
- b = BondIf(bond['ifname'])
-
+ ifname = bond['ifname']
+ b = BondIf(ifname)
if 'deleted' in bond:
# delete interface
b.remove()
else:
b.update(bond)
+ if dict_search('member.interface_remove', bond):
+ try:
+ call_dependents()
+ except ConfigError:
+ raise ConfigError('Error in updating ethernet interface '
+ 'after deleting it from bond')
+
+ zebra_daemon = 'zebra'
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+
+ # The route-map used for the FIB (zebra) is part of the zebra daemon
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True)
+ if 'frr_zebra_config' in bond:
+ frr_cfg.add_before(frr.default_add_before, bond['frr_zebra_config'])
+ frr_cfg.commit_configuration(zebra_daemon)
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/interfaces-bridge.py b/src/conf_mode/interfaces_bridge.py
index c82f01e53..29991e2da 100755
--- a/src/conf_mode/interfaces-bridge.py
+++ b/src/conf_mode/interfaces_bridge.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
+# Copyright (C) 2019-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -28,7 +28,8 @@ from vyos.configverify import verify_vrf
from vyos.ifconfig import BridgeIf
from vyos.configdict import has_address_configured
from vyos.configdict import has_vrf_configured
-
+from vyos.configdep import set_dependents
+from vyos.configdep import call_dependents
from vyos.utils.dict import dict_search
from vyos import ConfigError
@@ -48,7 +49,7 @@ def get_config(config=None):
ifname, bridge = get_interface_dict(conf, base)
# determine which members have been removed
- tmp = node_changed(conf, base + [ifname, 'member', 'interface'], key_mangling=('-', '_'))
+ tmp = node_changed(conf, base + [ifname, 'member', 'interface'])
if tmp:
if 'member' in bridge:
bridge['member'].update({'interface_remove' : tmp })
@@ -83,6 +84,12 @@ def get_config(config=None):
if 'enable_vlan' in bridge and tmp:
bridge['member']['interface'][interface].update({'has_vlan' : ''})
+ # When using VXLAN member interfaces that are configured for Single
+ # VXLAN Device (SVD) we need to call the VXLAN conf-mode script to re-create
+ # VLAN to VNI mappings if required
+ if interface.startswith('vxlan'):
+ set_dependents('vxlan', conf, interface)
+
# delete empty dictionary keys - no need to run code paths if nothing is there to do
if 'member' in bridge:
if 'interface' in bridge['member'] and len(bridge['member']['interface']) == 0:
@@ -159,6 +166,13 @@ def apply(bridge):
else:
br.update(bridge)
+ for interface in dict_search('member.interface', bridge) or []:
+ if interface.startswith('vxlan'):
+ try:
+ call_dependents()
+ except ConfigError:
+ raise ConfigError('Error in updating VXLAN interface after changing bridge!')
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/interfaces-dummy.py b/src/conf_mode/interfaces_dummy.py
index db768b94d..db768b94d 100755
--- a/src/conf_mode/interfaces-dummy.py
+++ b/src/conf_mode/interfaces_dummy.py
diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py
new file mode 100755
index 000000000..2c0f846c3
--- /dev/null
+++ b/src/conf_mode/interfaces_ethernet.py
@@ -0,0 +1,391 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from glob import glob
+from sys import exit
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.configdict import get_interface_dict
+from vyos.configdict import is_node_changed
+from vyos.configverify import verify_address
+from vyos.configverify import verify_dhcpv6
+from vyos.configverify import verify_eapol
+from vyos.configverify import verify_interface_exists
+from vyos.configverify import verify_mirror_redirect
+from vyos.configverify import verify_mtu
+from vyos.configverify import verify_mtu_ipv6
+from vyos.configverify import verify_vlan_config
+from vyos.configverify import verify_vrf
+from vyos.configverify import verify_bond_bridge_member
+from vyos.ethtool import Ethtool
+from vyos.ifconfig import EthernetIf
+from vyos.ifconfig import BondIf
+from vyos.pki import find_chain
+from vyos.pki import encode_certificate
+from vyos.pki import load_certificate
+from vyos.pki import wrap_private_key
+from vyos.template import render
+from vyos.utils.process import call
+from vyos.utils.dict import dict_search
+from vyos.utils.dict import dict_to_paths_values
+from vyos.utils.dict import dict_set
+from vyos.utils.dict import dict_delete
+from vyos.utils.file import write_file
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+# XXX: wpa_supplicant works on the source interface
+cfg_dir = '/run/wpa_supplicant'
+wpa_suppl_conf = '/run/wpa_supplicant/{ifname}.conf'
+
+def update_bond_options(conf: Config, eth_conf: dict) -> list:
+ """
+ Return list of blocked options if interface is a bond member
+ :param conf: Config object
+ :type conf: Config
+ :param eth_conf: Ethernet config dictionary
+ :type eth_conf: dict
+ :return: List of blocked options
+ :rtype: list
+ """
+ blocked_list = []
+ bond_name = list(eth_conf['is_bond_member'].keys())[0]
+ config_without_defaults = conf.get_config_dict(
+ ['interfaces', 'ethernet', eth_conf['ifname']],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=False,
+ with_recursive_defaults=False)
+ config_with_defaults = conf.get_config_dict(
+ ['interfaces', 'ethernet', eth_conf['ifname']],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=True,
+ with_recursive_defaults=True)
+ bond_config_with_defaults = conf.get_config_dict(
+ ['interfaces', 'bonding', bond_name],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=True,
+ with_recursive_defaults=True)
+ eth_dict_paths = dict_to_paths_values(config_without_defaults)
+ eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']]
+
+ #if option is configured under ethernet section
+ for option_path, option_value in eth_dict_paths.items():
+ bond_option_value = dict_search(option_path, bond_config_with_defaults)
+
+ #If option is allowed for changing then continue
+ if option_path in EthernetIf.get_bond_member_allowed_options():
+ continue
+ # if option is inherited from bond then set valued from bond interface
+ if option_path in BondIf.get_inherit_bond_options():
+ # If option equals to bond option then do nothing
+ if option_value == bond_option_value:
+ continue
+ else:
+ # if ethernet has option and bond interface has
+ # then copy it from bond
+ if bond_option_value is not None:
+ if is_node_changed(conf, eth_path_base + option_path.split('.')):
+ Warning(
+ f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \
+ f' Interface "{eth_conf["ifname"]}" is a bond member.' \
+ f' Option is inherited from bond "{bond_name}"')
+ dict_set(option_path, bond_option_value, eth_conf)
+ continue
+ # if ethernet has option and bond interface does not have
+ # then delete it form dict and do not apply it
+ else:
+ if is_node_changed(conf, eth_path_base + option_path.split('.')):
+ Warning(
+ f'Cannot apply "{option_path.replace(".", " ")}".' \
+ f' Interface "{eth_conf["ifname"]}" is a bond member.' \
+ f' Option is inherited from bond "{bond_name}"')
+ dict_delete(option_path, eth_conf)
+ blocked_list.append(option_path)
+
+ # if inherited option is not configured under ethernet section but configured under bond section
+ for option_path in BondIf.get_inherit_bond_options():
+ bond_option_value = dict_search(option_path, bond_config_with_defaults)
+ if bond_option_value is not None:
+ if option_path not in eth_dict_paths:
+ if is_node_changed(conf, eth_path_base + option_path.split('.')):
+ Warning(
+ f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \
+ f' Interface "{eth_conf["ifname"]}" is a bond member. ' \
+ f'Option is inherited from bond "{bond_name}"')
+ dict_set(option_path, bond_option_value, eth_conf)
+ eth_conf['bond_blocked_changes'] = blocked_list
+ return None
+
+def get_config(config=None):
+ """
+ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
+ interface name will be added or a deleted flag
+ """
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['interfaces', 'ethernet']
+ ifname, ethernet = get_interface_dict(conf, base, with_pki=True)
+
+ if 'is_bond_member' in ethernet:
+ update_bond_options(conf, ethernet)
+
+ tmp = is_node_changed(conf, base + [ifname, 'speed'])
+ if tmp: ethernet.update({'speed_duplex_changed': {}})
+
+ tmp = is_node_changed(conf, base + [ifname, 'duplex'])
+ if tmp: ethernet.update({'speed_duplex_changed': {}})
+
+ return ethernet
+
+def verify_speed_duplex(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify speed and duplex
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or
+ (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')):
+ raise ConfigError(
+ 'Speed/Duplex missmatch. Must be both auto or manually configured')
+
+ if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto':
+ # We need to verify if the requested speed and duplex setting is
+ # supported by the underlaying NIC.
+ speed = ethernet['speed']
+ duplex = ethernet['duplex']
+ if not ethtool.check_speed_duplex(speed, duplex):
+ raise ConfigError(
+ f'Adapter does not support changing speed ' \
+ f'and duplex settings to: {speed}/{duplex}!')
+
+
+def verify_flow_control(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify flow control
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if 'disable_flow_control' in ethernet:
+ if not ethtool.check_flow_control():
+ raise ConfigError(
+ 'Adapter does not support changing flow-control settings!')
+
+
+def verify_ring_buffer(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify ring buffer
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if 'ring_buffer' in ethernet:
+ max_rx = ethtool.get_ring_buffer_max('rx')
+ if not max_rx:
+ raise ConfigError(
+ 'Driver does not support RX ring-buffer configuration!')
+
+ max_tx = ethtool.get_ring_buffer_max('tx')
+ if not max_tx:
+ raise ConfigError(
+ 'Driver does not support TX ring-buffer configuration!')
+
+ rx = dict_search('ring_buffer.rx', ethernet)
+ if rx and int(rx) > int(max_rx):
+ raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \
+ f'size of "{max_rx}" bytes!')
+
+ tx = dict_search('ring_buffer.tx', ethernet)
+ if tx and int(tx) > int(max_tx):
+ raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \
+ f'size of "{max_tx}" bytes!')
+
+
+def verify_offload(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify offloading capabilities
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if dict_search('offload.rps', ethernet) != None:
+ if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'):
+ raise ConfigError('Interface does not suport RPS!')
+ driver = ethtool.get_driver_name()
+ # T3342 - Xen driver requires special treatment
+ if driver == 'vif':
+ if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None:
+ raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\
+ 'for MTU size larger then 1500 bytes')
+
+
+def verify_allowedbond_changes(ethernet: dict):
+ """
+ Verify changed options if interface is in bonding
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ """
+ if 'bond_blocked_changes' in ethernet:
+ for option in ethernet['bond_blocked_changes']:
+ raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \
+ f' on interface "{ethernet["ifname"]}".' \
+ f' Interface is a bond member')
+
+
+def verify(ethernet):
+ if 'deleted' in ethernet:
+ return None
+ if 'is_bond_member' in ethernet:
+ verify_bond_member(ethernet)
+ else:
+ verify_ethernet(ethernet)
+
+
+def verify_bond_member(ethernet):
+ """
+ Verification function for ethernet interface which is in bonding
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ """
+ ifname = ethernet['ifname']
+ verify_interface_exists(ifname)
+ verify_eapol(ethernet)
+ verify_mirror_redirect(ethernet)
+ ethtool = Ethtool(ifname)
+ verify_speed_duplex(ethernet, ethtool)
+ verify_flow_control(ethernet, ethtool)
+ verify_ring_buffer(ethernet, ethtool)
+ verify_offload(ethernet, ethtool)
+ verify_allowedbond_changes(ethernet)
+
+def verify_ethernet(ethernet):
+ """
+ Verification function for simple ethernet interface
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ """
+ ifname = ethernet['ifname']
+ verify_interface_exists(ifname)
+ verify_mtu(ethernet)
+ verify_mtu_ipv6(ethernet)
+ verify_dhcpv6(ethernet)
+ verify_address(ethernet)
+ verify_vrf(ethernet)
+ verify_bond_bridge_member(ethernet)
+ verify_eapol(ethernet)
+ verify_mirror_redirect(ethernet)
+ ethtool = Ethtool(ifname)
+ # No need to check speed and duplex keys as both have default values.
+ verify_speed_duplex(ethernet, ethtool)
+ verify_flow_control(ethernet, ethtool)
+ verify_ring_buffer(ethernet, ethtool)
+ verify_offload(ethernet, ethtool)
+ # use common function to verify VLAN configuration
+ verify_vlan_config(ethernet)
+ return None
+
+
+def generate(ethernet):
+ # render real configuration file once
+ wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet)
+
+ if 'deleted' in ethernet:
+ # delete configuration on interface removal
+ if os.path.isfile(wpa_supplicant_conf):
+ os.unlink(wpa_supplicant_conf)
+ return None
+
+ if 'eapol' in ethernet:
+ ifname = ethernet['ifname']
+
+ render(wpa_supplicant_conf, 'ethernet/wpa_supplicant.conf.j2', ethernet)
+
+ cert_file_path = os.path.join(cfg_dir, f'{ifname}_cert.pem')
+ cert_key_path = os.path.join(cfg_dir, f'{ifname}_cert.key')
+
+ cert_name = ethernet['eapol']['certificate']
+ pki_cert = ethernet['pki']['certificate'][cert_name]
+
+ loaded_pki_cert = load_certificate(pki_cert['certificate'])
+ loaded_ca_certs = {load_certificate(c['certificate'])
+ for c in ethernet['pki']['ca'].values()} if 'ca' in ethernet['pki'] else {}
+
+ cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
+
+ write_file(cert_file_path,
+ '\n'.join(encode_certificate(c) for c in cert_full_chain))
+ write_file(cert_key_path, wrap_private_key(pki_cert['private']['key']))
+
+ if 'ca_certificate' in ethernet['eapol']:
+ ca_cert_file_path = os.path.join(cfg_dir, f'{ifname}_ca.pem')
+ ca_chains = []
+
+ for ca_cert_name in ethernet['eapol']['ca_certificate']:
+ pki_ca_cert = ethernet['pki']['ca'][ca_cert_name]
+ loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
+ ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
+ ca_chains.append(
+ '\n'.join(encode_certificate(c) for c in ca_full_chain))
+
+ write_file(ca_cert_file_path, '\n'.join(ca_chains))
+
+ return None
+
+def apply(ethernet):
+ ifname = ethernet['ifname']
+ # take care about EAPoL supplicant daemon
+ eapol_action='stop'
+
+ e = EthernetIf(ifname)
+ if 'deleted' in ethernet:
+ # delete interface
+ e.remove()
+ else:
+ e.update(ethernet)
+ if 'eapol' in ethernet:
+ eapol_action='reload-or-restart'
+
+ call(f'systemctl {eapol_action} wpa_supplicant-wired@{ifname}')
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/interfaces-geneve.py b/src/conf_mode/interfaces_geneve.py
index f6694ddde..f6694ddde 100755
--- a/src/conf_mode/interfaces-geneve.py
+++ b/src/conf_mode/interfaces_geneve.py
diff --git a/src/conf_mode/interfaces-input.py b/src/conf_mode/interfaces_input.py
index ad248843d..ad248843d 100755
--- a/src/conf_mode/interfaces-input.py
+++ b/src/conf_mode/interfaces_input.py
diff --git a/src/conf_mode/interfaces-l2tpv3.py b/src/conf_mode/interfaces_l2tpv3.py
index e1db3206e..e1db3206e 100755
--- a/src/conf_mode/interfaces-l2tpv3.py
+++ b/src/conf_mode/interfaces_l2tpv3.py
diff --git a/src/conf_mode/interfaces-loopback.py b/src/conf_mode/interfaces_loopback.py
index 08d34477a..08d34477a 100755
--- a/src/conf_mode/interfaces-loopback.py
+++ b/src/conf_mode/interfaces_loopback.py
diff --git a/src/conf_mode/interfaces-macsec.py b/src/conf_mode/interfaces_macsec.py
index 0a927ac88..0a927ac88 100755
--- a/src/conf_mode/interfaces-macsec.py
+++ b/src/conf_mode/interfaces_macsec.py
diff --git a/src/conf_mode/interfaces-openvpn.py b/src/conf_mode/interfaces_openvpn.py
index bdeb44837..45569dd21 100755
--- a/src/conf_mode/interfaces-openvpn.py
+++ b/src/conf_mode/interfaces_openvpn.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2023 VyOS maintainers and contributors
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -89,16 +89,12 @@ def get_config(config=None):
conf = Config()
base = ['interfaces', 'openvpn']
- ifname, openvpn = get_interface_dict(conf, base)
+ ifname, openvpn = get_interface_dict(conf, base, with_pki=True)
openvpn['auth_user_pass_file'] = '/run/openvpn/{ifname}.pw'.format(**openvpn)
if 'deleted' in openvpn:
return openvpn
- openvpn['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True,
- no_tag_node_value_mangle=True)
-
if is_node_changed(conf, base + [ifname, 'openvpn-option']):
openvpn.update({'restart_required': {}})
if is_node_changed(conf, base + [ifname, 'enable-dco']):
@@ -167,9 +163,10 @@ def verify_pki(openvpn):
raise ConfigError(f'Invalid shared-secret on openvpn interface {interface}')
# If PSK settings are correct, warn about its deprecation
- DeprecationWarning("OpenVPN shared-secret support will be removed in future VyOS versions.\n\
- Please migrate your site-to-site tunnels to TLS.\n\
- You can use self-signed certificates with peer fingerprint verification, consult the documentation for details.")
+ DeprecationWarning('OpenVPN shared-secret support will be removed in future '\
+ 'VyOS versions. Please migrate your site-to-site tunnels to '\
+ 'TLS. You can use self-signed certificates with peer fingerprint '\
+ 'verification, consult the documentation for details.')
if tls:
if (mode in ['server', 'client']) and ('ca_certificate' not in tls):
@@ -729,4 +726,3 @@ if __name__ == '__main__':
except ConfigError as e:
print(e)
exit(1)
-
diff --git a/src/conf_mode/interfaces-pppoe.py b/src/conf_mode/interfaces_pppoe.py
index fca91253c..42f084309 100755
--- a/src/conf_mode/interfaces-pppoe.py
+++ b/src/conf_mode/interfaces_pppoe.py
@@ -61,6 +61,12 @@ def get_config(config=None):
# bail out early - no need to further process other nodes
break
+ if 'deleted' not in pppoe:
+ # We always set the MRU value to the MTU size. This code path only re-creates
+ # the old behavior if MRU is not set on the CLI.
+ if 'mru' not in pppoe:
+ pppoe['mru'] = pppoe['mtu']
+
return pppoe
def verify(pppoe):
@@ -77,6 +83,11 @@ def verify(pppoe):
if {'connect_on_demand', 'vrf'} <= set(pppoe):
raise ConfigError('On-demand dialing and VRF can not be used at the same time')
+ # both MTU and MRU have default values, thus we do not need to check
+ # if the key exists
+ if int(pppoe['mru']) > int(pppoe['mtu']):
+ raise ConfigError('PPPoE MRU needs to be lower then MTU!')
+
return None
def generate(pppoe):
diff --git a/src/conf_mode/interfaces-pseudo-ethernet.py b/src/conf_mode/interfaces_pseudo-ethernet.py
index dce5c2358..dce5c2358 100755
--- a/src/conf_mode/interfaces-pseudo-ethernet.py
+++ b/src/conf_mode/interfaces_pseudo-ethernet.py
diff --git a/src/conf_mode/interfaces-sstpc.py b/src/conf_mode/interfaces_sstpc.py
index b588910dc..b9d7a74fb 100755
--- a/src/conf_mode/interfaces-sstpc.py
+++ b/src/conf_mode/interfaces_sstpc.py
@@ -45,7 +45,7 @@ def get_config(config=None):
else:
conf = Config()
base = ['interfaces', 'sstpc']
- ifname, sstpc = get_interface_dict(conf, base)
+ ifname, sstpc = get_interface_dict(conf, base, with_pki=True)
# We should only terminate the SSTP client session if critical parameters
# change. All parameters that can be changed on-the-fly (like interface
@@ -57,10 +57,6 @@ def get_config(config=None):
# bail out early - no need to further process other nodes
break
- # Load PKI certificates for later processing
- sstpc['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True,
- no_tag_node_value_mangle=True)
return sstpc
def verify(sstpc):
diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces_tunnel.py
index 91aed9cc3..efa5ebc64 100755
--- a/src/conf_mode/interfaces-tunnel.py
+++ b/src/conf_mode/interfaces_tunnel.py
@@ -24,7 +24,7 @@ from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
-from vyos.configverify import verify_interface_exists
+from vyos.configverify import verify_source_interface
from vyos.configverify import verify_mtu_ipv6
from vyos.configverify import verify_mirror_redirect
from vyos.configverify import verify_vrf
@@ -166,7 +166,7 @@ def verify(tunnel):
verify_mirror_redirect(tunnel)
if 'source_interface' in tunnel:
- verify_interface_exists(tunnel['source_interface'])
+ verify_source_interface(tunnel)
# TTL != 0 and nopmtudisc are incompatible, parameters and ip use default
# values, thus the keys are always present.
diff --git a/src/conf_mode/interfaces-virtual-ethernet.py b/src/conf_mode/interfaces_virtual-ethernet.py
index 8efe89c41..8efe89c41 100755
--- a/src/conf_mode/interfaces-virtual-ethernet.py
+++ b/src/conf_mode/interfaces_virtual-ethernet.py
diff --git a/src/conf_mode/interfaces-vti.py b/src/conf_mode/interfaces_vti.py
index 9871810ae..9871810ae 100755
--- a/src/conf_mode/interfaces-vti.py
+++ b/src/conf_mode/interfaces_vti.py
diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces_vxlan.py
index 05f68112a..4251e611b 100755
--- a/src/conf_mode/interfaces-vxlan.py
+++ b/src/conf_mode/interfaces_vxlan.py
@@ -34,6 +34,7 @@ from vyos.configverify import verify_bond_bridge_member
from vyos.ifconfig import Interface
from vyos.ifconfig import VXLANIf
from vyos.template import is_ipv6
+from vyos.utils.dict import dict_search
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -53,14 +54,20 @@ def get_config(config=None):
# VXLAN interfaces are picky and require recreation if certain parameters
# change. But a VXLAN interface should - of course - not be re-created if
# it's description or IP address is adjusted. Feels somehow logic doesn't it?
- for cli_option in ['parameters', 'external', 'gpe', 'group', 'port', 'remote',
+ for cli_option in ['parameters', 'gpe', 'group', 'port', 'remote',
'source-address', 'source-interface', 'vni']:
if is_node_changed(conf, base + [ifname, cli_option]):
vxlan.update({'rebuild_required': {}})
break
+ # When dealing with VNI filtering we need to know what VNI was actually removed,
+ # so build up a dict matching the vlan_to_vni structure but with removed values.
tmp = node_changed(conf, base + [ifname, 'vlan-to-vni'], recursive=True)
- if tmp: vxlan.update({'vlan_to_vni_removed': tmp})
+ if tmp:
+ vxlan.update({'vlan_to_vni_removed': {}})
+ for vlan in tmp:
+ vni = leaf_node_changed(conf, base + [ifname, 'vlan-to-vni', vlan, 'vni'])
+ vxlan['vlan_to_vni_removed'].update({vlan : {'vni' : vni[0]}})
# We need to verify that no other VXLAN tunnel is configured when external
# mode is in use - Linux Kernel limitation
@@ -94,17 +101,34 @@ def verify(vxlan):
if not any(tmp in ['group', 'remote', 'source_address', 'source_interface'] for tmp in vxlan):
raise ConfigError('Group, remote, source-address or source-interface must be configured')
- if 'vni' not in vxlan and 'external' not in vxlan:
- raise ConfigError(
- 'Must either configure VXLAN "vni" or use "external" CLI option!')
-
- if {'external', 'vni'} <= set(vxlan):
- raise ConfigError('Can not specify both "external" and "VNI"!')
-
- if {'external', 'other_tunnels'} <= set(vxlan):
- other_tunnels = ', '.join(vxlan['other_tunnels'])
- raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\
- f'CLI option is used. Additional tunnels: {other_tunnels}')
+ if 'vni' not in vxlan and dict_search('parameters.external', vxlan) == None:
+ raise ConfigError('Must either configure VXLAN "vni" or use "external" CLI option!')
+
+ if dict_search('parameters.external', vxlan) != None:
+ if 'vni' in vxlan:
+ raise ConfigError('Can not specify both "external" and "VNI"!')
+
+ if 'other_tunnels' in vxlan:
+ # When multiple VXLAN interfaces are defined and "external" is used,
+ # all VXLAN interfaces need to have vni-filter enabled!
+ # See Linux Kernel commit f9c4bb0b245cee35ef66f75bf409c9573d934cf9
+ other_vni_filter = False
+ for tunnel, tunnel_config in vxlan['other_tunnels'].items():
+ if dict_search('parameters.vni_filter', tunnel_config) != None:
+ other_vni_filter = True
+ break
+ # eqivalent of the C foo ? 'a' : 'b' statement
+ vni_filter = True and (dict_search('parameters.vni_filter', vxlan) != None) or False
+ # If either one is enabled, so must be the other. Both can be off and both can be on
+ if (vni_filter and not other_vni_filter) or (not vni_filter and other_vni_filter):
+ raise ConfigError(f'Using multiple VXLAN interfaces with "external" '\
+ 'requires all VXLAN interfaces to have "vni-filter" configured!')
+
+ if not vni_filter and not other_vni_filter:
+ other_tunnels = ', '.join(vxlan['other_tunnels'])
+ raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\
+ f'CLI option is used and "vni-filter" is unset. '\
+ f'Additional tunnels: {other_tunnels}')
if 'gpe' in vxlan and 'external' not in vxlan:
raise ConfigError(f'VXLAN-GPE is only supported when "external" '\
@@ -164,10 +188,22 @@ def verify(vxlan):
raise ConfigError(f'VNI "{vni}" is already assigned to a different VLAN!')
vnis_used.append(vni)
+ if dict_search('parameters.neighbor_suppress', vxlan) != None:
+ if 'is_bridge_member' not in vxlan:
+ raise ConfigError('Neighbor suppression requires that VXLAN interface '\
+ 'is member of a bridge interface!')
+
verify_mtu_ipv6(vxlan)
verify_address(vxlan)
verify_bond_bridge_member(vxlan)
verify_mirror_redirect(vxlan)
+
+ # We use a defaultValue for port, thus it's always safe to use
+ if vxlan['port'] == '8472':
+ Warning('Starting from VyOS 1.4, the default port for VXLAN '\
+ 'has been changed to 4789. This matches the IANA assigned '\
+ 'standard port number!')
+
return None
def generate(vxlan):
diff --git a/src/conf_mode/interfaces-wireguard.py b/src/conf_mode/interfaces_wireguard.py
index 122d9589a..79e5d3f44 100755
--- a/src/conf_mode/interfaces-wireguard.py
+++ b/src/conf_mode/interfaces_wireguard.py
@@ -51,17 +51,9 @@ def get_config(config=None):
tmp = is_node_changed(conf, base + [ifname, 'port'])
if tmp: wireguard['port_changed'] = {}
- # Determine which Wireguard peer has been removed.
- # Peers can only be removed with their public key!
- if 'peer' in wireguard:
- peer_remove = {}
- for peer, peer_config in wireguard['peer'].items():
- # T4702: If anything on a peer changes we remove the peer first and re-add it
- if is_node_changed(conf, base + [ifname, 'peer', peer]):
- if 'public_key' in peer_config:
- peer_remove = dict_merge({'peer_remove' : {peer : peer_config['public_key']}}, peer_remove)
- if peer_remove:
- wireguard.update(peer_remove)
+ # T4702: If anything on a peer changes we remove the peer first and re-add it
+ if is_node_changed(conf, base + [ifname, 'peer']):
+ wireguard.update({'rebuild_required': {}})
return wireguard
@@ -113,12 +105,21 @@ def verify(wireguard):
public_keys.append(peer['public_key'])
def apply(wireguard):
- tmp = WireGuardIf(wireguard['ifname'])
- if 'deleted' in wireguard:
- tmp.remove()
- return None
+ if 'rebuild_required' in wireguard or 'deleted' in wireguard:
+ wg = WireGuardIf(**wireguard)
+ # WireGuard only supports peer removal based on the configured public-key,
+ # by deleting the entire interface this is the shortcut instead of parsing
+ # out all peers and removing them one by one.
+ #
+ # Peer reconfiguration will always come with a short downtime while the
+ # WireGuard interface is recreated (see below)
+ wg.remove()
+
+ # Create the new interface if required
+ if 'deleted' not in wireguard:
+ wg = WireGuardIf(**wireguard)
+ wg.update(wireguard)
- tmp.update(wireguard)
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/interfaces-wireless.py b/src/conf_mode/interfaces_wireless.py
index 02b4a2500..02b4a2500 100755
--- a/src/conf_mode/interfaces-wireless.py
+++ b/src/conf_mode/interfaces_wireless.py
diff --git a/src/conf_mode/interfaces-wwan.py b/src/conf_mode/interfaces_wwan.py
index 2515dc838..2515dc838 100755
--- a/src/conf_mode/interfaces-wwan.py
+++ b/src/conf_mode/interfaces_wwan.py
diff --git a/src/conf_mode/le_cert.py b/src/conf_mode/le_cert.py
deleted file mode 100755
index 06c7e7b72..000000000
--- a/src/conf_mode/le_cert.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import os
-
-import vyos.defaults
-from vyos.config import Config
-from vyos import ConfigError
-from vyos.utils.process import cmd
-from vyos.utils.process import call
-from vyos.utils.process import is_systemd_service_running
-
-from vyos import airbag
-airbag.enable()
-
-vyos_conf_scripts_dir = vyos.defaults.directories['conf_mode']
-vyos_certbot_dir = vyos.defaults.directories['certbot']
-
-dependencies = [
- 'https.py',
-]
-
-def request_certbot(cert):
- email = cert.get('email')
- if email is not None:
- email_flag = '-m {0}'.format(email)
- else:
- email_flag = ''
-
- domains = cert.get('domains')
- if domains is not None:
- domain_flag = '-d ' + ' -d '.join(domains)
- else:
- domain_flag = ''
-
- certbot_cmd = f'certbot certonly --config-dir {vyos_certbot_dir} -n --nginx --agree-tos --no-eff-email --expand {email_flag} {domain_flag}'
-
- cmd(certbot_cmd,
- raising=ConfigError,
- message="The certbot request failed for the specified domains.")
-
-def get_config():
- conf = Config()
- if not conf.exists('service https certificates certbot'):
- return None
- else:
- conf.set_level('service https certificates certbot')
-
- cert = {}
-
- if conf.exists('domain-name'):
- cert['domains'] = conf.return_values('domain-name')
-
- if conf.exists('email'):
- cert['email'] = conf.return_value('email')
-
- return cert
-
-def verify(cert):
- if cert is None:
- return None
-
- if 'domains' not in cert:
- raise ConfigError("At least one domain name is required to"
- " request a letsencrypt certificate.")
-
- if 'email' not in cert:
- raise ConfigError("An email address is required to request"
- " a letsencrypt certificate.")
-
-def generate(cert):
- if cert is None:
- return None
-
- # certbot will attempt to reload nginx, even with 'certonly';
- # start nginx if not active
- if not is_systemd_service_running('nginx.service'):
- call('systemctl start nginx.service')
-
- request_certbot(cert)
-
-def apply(cert):
- if cert is not None:
- call('systemctl restart certbot.timer')
- else:
- call('systemctl stop certbot.timer')
- return None
-
- for dep in dependencies:
- cmd(f'{vyos_conf_scripts_dir}/{dep}', raising=ConfigError)
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- sys.exit(1)
-
diff --git a/src/conf_mode/load-balancing-haproxy.py b/src/conf_mode/load-balancing_reverse-proxy.py
index 8fe429653..7338fe573 100755
--- a/src/conf_mode/load-balancing-haproxy.py
+++ b/src/conf_mode/load-balancing_reverse-proxy.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2023 VyOS maintainers and contributors
+# Copyright (C) 2023-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -43,17 +43,14 @@ def get_config(config=None):
conf = Config()
base = ['load-balancing', 'reverse-proxy']
+ if not conf.exists(base):
+ return None
lb = conf.get_config_dict(base,
get_first_key=True,
key_mangling=('-', '_'),
- no_tag_node_value_mangle=True)
-
- if lb:
- lb['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- if lb:
- lb = conf.merge_defaults(lb, recursive=True)
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True,
+ with_pki=True)
return lb
@@ -94,8 +91,8 @@ def generate(lb):
if os.path.isfile(file):
os.unlink(file)
# Delete old directories
- #if os.path.isdir(load_balancing_dir):
- # rmtree(load_balancing_dir, ignore_errors=True)
+ if os.path.isdir(load_balancing_dir):
+ rmtree(load_balancing_dir, ignore_errors=True)
return None
@@ -106,26 +103,26 @@ def generate(lb):
# SSL Certificates for frontend
for front, front_config in lb['service'].items():
if 'ssl' in front_config:
- cert_file_path = os.path.join(load_balancing_dir, 'cert.pem')
- cert_key_path = os.path.join(load_balancing_dir, 'cert.pem.key')
- ca_cert_file_path = os.path.join(load_balancing_dir, 'ca.pem')
if 'certificate' in front_config['ssl']:
- #cert_file_path = os.path.join(load_balancing_dir, 'cert.pem')
- #cert_key_path = os.path.join(load_balancing_dir, 'cert.key')
- cert_name = front_config['ssl']['certificate']
- pki_cert = lb['pki']['certificate'][cert_name]
+ cert_names = front_config['ssl']['certificate']
+
+ for cert_name in cert_names:
+ pki_cert = lb['pki']['certificate'][cert_name]
+ cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem')
+ cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key')
- with open(cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_cert['certificate']))
+ with open(cert_file_path, 'w') as f:
+ f.write(wrap_certificate(pki_cert['certificate']))
- if 'private' in pki_cert and 'key' in pki_cert['private']:
- with open(cert_key_path, 'w') as f:
- f.write(wrap_private_key(pki_cert['private']['key']))
+ if 'private' in pki_cert and 'key' in pki_cert['private']:
+ with open(cert_key_path, 'w') as f:
+ f.write(wrap_private_key(pki_cert['private']['key']))
if 'ca_certificate' in front_config['ssl']:
ca_name = front_config['ssl']['ca_certificate']
pki_ca_cert = lb['pki']['ca'][ca_name]
+ ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
with open(ca_cert_file_path, 'w') as f:
f.write(wrap_certificate(pki_ca_cert['certificate']))
@@ -133,11 +130,11 @@ def generate(lb):
# SSL Certificates for backend
for back, back_config in lb['backend'].items():
if 'ssl' in back_config:
- ca_cert_file_path = os.path.join(load_balancing_dir, 'ca.pem')
if 'ca_certificate' in back_config['ssl']:
ca_name = back_config['ssl']['ca_certificate']
pki_ca_cert = lb['pki']['ca'][ca_name]
+ ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
with open(ca_cert_file_path, 'w') as f:
f.write(wrap_certificate(pki_ca_cert['certificate']))
diff --git a/src/conf_mode/load-balancing-wan.py b/src/conf_mode/load-balancing_wan.py
index 5da0b906b..5da0b906b 100755
--- a/src/conf_mode/load-balancing-wan.py
+++ b/src/conf_mode/load-balancing_wan.py
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 52a7a71fd..26822b755 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -69,6 +69,10 @@ def get_config(config=None):
nat['firewall_group'] = conf.get_config_dict(['firewall', 'group'], key_mangling=('-', '_'), get_first_key=True,
no_tag_node_value_mangle=True)
+ # Remove dynamic firewall groups if present:
+ if 'dynamic_group' in nat['firewall_group']:
+ del nat['firewall_group']['dynamic_group']
+
return nat
def verify_rule(config, err_msg, groups_dict):
@@ -80,15 +84,8 @@ def verify_rule(config, err_msg, groups_dict):
dict_search('source.port', config)):
if config['protocol'] not in ['tcp', 'udp', 'tcp_udp']:
- raise ConfigError(f'{err_msg}\n' \
- 'ports can only be specified when protocol is '\
- 'either tcp, udp or tcp_udp!')
-
- if is_ip_network(dict_search('translation.address', config)):
- raise ConfigError(f'{err_msg}\n' \
- 'Cannot use ports with an IPv4 network as translation address as it\n' \
- 'statically maps a whole network of addresses onto another\n' \
- 'network of addresses')
+ raise ConfigError(f'{err_msg} ports can only be specified when '\
+ 'protocol is either tcp, udp or tcp_udp!')
for side in ['destination', 'source']:
if side in config:
@@ -151,8 +148,11 @@ def verify(nat):
err_msg = f'Source NAT configuration error in rule {rule}:'
if 'outbound_interface' in config:
- if config['outbound_interface'] not in 'any' and config['outbound_interface'] not in interfaces():
- Warning(f'rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
+ if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
+ elif 'name' in config['outbound_interface']:
+ if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
+ Warning(f'NAT interface "{config["outbound_interface"]["name"]}" for source NAT rule "{rule}" does not exist!')
if not dict_search('translation.address', config) and not dict_search('translation.port', config):
if 'exclude' not in config and 'backend' not in config['load_balance']:
@@ -172,8 +172,11 @@ def verify(nat):
err_msg = f'Destination NAT configuration error in rule {rule}:'
if 'inbound_interface' in config:
- if config['inbound_interface'] not in 'any' and config['inbound_interface'] not in interfaces():
- Warning(f'rule "{rule}" interface "{config["inbound_interface"]}" does not exist on this system')
+ if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
+ elif 'name' in config['inbound_interface']:
+ if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
+ Warning(f'NAT interface "{config["inbound_interface"]["name"]}" for destination NAT rule "{rule}" does not exist!')
if not dict_search('translation.address', config) and not dict_search('translation.port', config) and 'redirect' not in config['translation']:
if 'exclude' not in config and 'backend' not in config['load_balance']:
@@ -187,8 +190,7 @@ def verify(nat):
err_msg = f'Static NAT configuration error in rule {rule}:'
if 'inbound_interface' not in config:
- raise ConfigError(f'{err_msg}\n' \
- 'inbound-interface not specified')
+ raise ConfigError(f'{err_msg} inbound-interface not specified')
# common rule verification
verify_rule(config, err_msg, nat['firewall_group'])
diff --git a/src/conf_mode/nat64.py b/src/conf_mode/nat64.py
new file mode 100755
index 000000000..6026c61d0
--- /dev/null
+++ b/src/conf_mode/nat64.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# pylint: disable=empty-docstring,missing-module-docstring
+
+import csv
+import os
+import re
+
+from ipaddress import IPv6Network
+from json import dumps as json_write
+
+from vyos import ConfigError
+from vyos import airbag
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.configdict import is_node_changed
+from vyos.utils.dict import dict_search
+from vyos.utils.file import write_file
+from vyos.utils.kernel import check_kmod
+from vyos.utils.process import cmd
+from vyos.utils.process import run
+
+airbag.enable()
+
+INSTANCE_REGEX = re.compile(r"instance-(\d+)")
+JOOL_CONFIG_DIR = "/run/jool"
+
+
+def get_config(config: Config | None = None) -> None:
+ if config is None:
+ config = Config()
+
+ base = ["nat64"]
+ nat64 = config.get_config_dict(base, key_mangling=("-", "_"), get_first_key=True)
+
+ base_src = base + ["source", "rule"]
+
+ # Load in existing instances so we can destroy any unknown
+ lines = cmd("jool instance display --csv").splitlines()
+ for _, instance, _ in csv.reader(lines):
+ match = INSTANCE_REGEX.fullmatch(instance)
+ if not match:
+ # FIXME: Instances that don't match should be ignored but WARN'ed to the user
+ continue
+ num = match.group(1)
+
+ rules = nat64.setdefault("source", {}).setdefault("rule", {})
+ # Mark it for deletion
+ if num not in rules:
+ rules[num] = {"deleted": True}
+ continue
+
+ # If the user changes the mode, recreate the instance else Jool fails with:
+ # Jool error: Sorry; you can't change an instance's framework for now.
+ if is_node_changed(config, base_src + [f"instance-{num}", "mode"]):
+ rules[num]["recreate"] = True
+
+ # If the user changes the pool6, recreate the instance else Jool fails with:
+ # Jool error: Sorry; you can't change a NAT64 instance's pool6 for now.
+ if dict_search("source.prefix", rules[num]) and is_node_changed(
+ config,
+ base_src + [num, "source", "prefix"],
+ ):
+ rules[num]["recreate"] = True
+
+ return nat64
+
+
+def verify(nat64) -> None:
+ if not nat64:
+ # no need to verify the CLI as nat64 is going to be deactivated
+ return
+
+ if dict_search("source.rule", nat64):
+ # Ensure only 1 netfilter instance per namespace
+ nf_rules = filter(
+ lambda i: "deleted" not in i and i.get('mode') == "netfilter",
+ nat64["source"]["rule"].values(),
+ )
+ next(nf_rules, None) # Discard the first element
+ if next(nf_rules, None) is not None:
+ raise ConfigError(
+ "Jool permits only 1 NAT64 netfilter instance (per network namespace)"
+ )
+
+ for rule, instance in nat64["source"]["rule"].items():
+ if "deleted" in instance:
+ continue
+
+ # Verify that source.prefix is set and is a /96
+ if not dict_search("source.prefix", instance):
+ raise ConfigError(f"Source NAT64 rule {rule} missing source prefix")
+ if IPv6Network(instance["source"]["prefix"]).prefixlen != 96:
+ raise ConfigError(f"Source NAT64 rule {rule} source prefix must be /96")
+
+ pools = dict_search("translation.pool", instance)
+ if pools:
+ for num, pool in pools.items():
+ if "address" not in pool:
+ raise ConfigError(
+ f"Source NAT64 rule {rule} translation pool "
+ f"{num} missing address/prefix"
+ )
+ if "port" not in pool:
+ raise ConfigError(
+ f"Source NAT64 rule {rule} translation pool "
+ f"{num} missing port(-range)"
+ )
+
+
+def generate(nat64) -> None:
+ os.makedirs(JOOL_CONFIG_DIR, exist_ok=True)
+
+ if dict_search("source.rule", nat64):
+ for rule, instance in nat64["source"]["rule"].items():
+ if "deleted" in instance:
+ # Delete the unused instance file
+ os.unlink(os.path.join(JOOL_CONFIG_DIR, f"instance-{rule}.json"))
+ continue
+
+ name = f"instance-{rule}"
+ config = {
+ "instance": name,
+ "framework": "netfilter",
+ "global": {
+ "pool6": instance["source"]["prefix"],
+ "manually-enabled": "disable" not in instance,
+ },
+ # "bib": [],
+ }
+
+ if "description" in instance:
+ config["comment"] = instance["description"]
+
+ if dict_search("translation.pool", instance):
+ pool4 = []
+ # mark
+ mark = ''
+ if dict_search("match.mark", instance):
+ mark = instance["match"]["mark"]
+
+ for pool in instance["translation"]["pool"].values():
+ if "disable" in pool:
+ continue
+
+ protos = pool.get("protocol", {}).keys() or ("tcp", "udp", "icmp")
+ for proto in protos:
+ obj = {
+ "protocol": proto.upper(),
+ "prefix": pool["address"],
+ "port range": pool["port"],
+ }
+ if mark:
+ obj["mark"] = int(mark)
+ if "description" in pool:
+ obj["comment"] = pool["description"]
+
+ pool4.append(obj)
+
+ if pool4:
+ config["pool4"] = pool4
+
+ write_file(f'{JOOL_CONFIG_DIR}/{name}.json', json_write(config, indent=2))
+
+
+def apply(nat64) -> None:
+ if not nat64:
+ return
+
+ if dict_search("source.rule", nat64):
+ # Deletions first to avoid conflicts
+ for rule, instance in nat64["source"]["rule"].items():
+ if not any(k in instance for k in ("deleted", "recreate")):
+ continue
+
+ ret = run(f"jool instance remove instance-{rule}")
+ if ret != 0:
+ raise ConfigError(
+ f"Failed to remove nat64 source rule {rule} (jool instance instance-{rule})"
+ )
+
+ # Now creations
+ for rule, instance in nat64["source"]["rule"].items():
+ if "deleted" in instance:
+ continue
+
+ name = f"instance-{rule}"
+ ret = run(f"jool -i {name} file handle {JOOL_CONFIG_DIR}/{name}.json")
+ if ret != 0:
+ raise ConfigError(f"Failed to set jool instance {name}")
+
+
+if __name__ == "__main__":
+ try:
+ check_kmod(["jool"])
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py
index 46d796bc8..4c1ead258 100755
--- a/src/conf_mode/nat66.py
+++ b/src/conf_mode/nat66.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2020-2021 VyOS maintainers and contributors
+# Copyright (C) 2020-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -36,7 +36,6 @@ airbag.enable()
k_mod = ['nft_nat', 'nft_chain_nat']
nftables_nat66_config = '/run/nftables_nat66.nft'
-ndppd_config = '/run/ndppd/ndppd.conf'
def get_config(config=None):
if config:
@@ -62,11 +61,13 @@ def verify(nat):
if dict_search('source.rule', nat):
for rule, config in dict_search('source.rule', nat).items():
err_msg = f'Source NAT66 configuration error in rule {rule}:'
- if 'outbound_interface' not in config:
- raise ConfigError(f'{err_msg} outbound-interface not specified')
- if config['outbound_interface'] not in interfaces():
- raise ConfigError(f'rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
+ if 'outbound_interface' in config:
+ if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
+ elif 'name' in config['outbound_interface']:
+ if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
+ Warning(f'NAT66 interface "{config["outbound_interface"]["name"]}" for source NAT66 rule "{rule}" does not exist!')
addr = dict_search('translation.address', config)
if addr != None:
@@ -85,12 +86,12 @@ def verify(nat):
for rule, config in dict_search('destination.rule', nat).items():
err_msg = f'Destination NAT66 configuration error in rule {rule}:'
- if 'inbound_interface' not in config:
- raise ConfigError(f'{err_msg}\n' \
- 'inbound-interface not specified')
- else:
- if config['inbound_interface'] not in 'any' and config['inbound_interface'] not in interfaces():
- Warning(f'rule "{rule}" interface "{config["inbound_interface"]}" does not exist on this system')
+ if 'inbound_interface' in config:
+ if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
+ elif 'name' in config['inbound_interface']:
+ if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
+ Warning(f'NAT66 interface "{config["inbound_interface"]["name"]}" for destination NAT66 rule "{rule}" does not exist!')
return None
@@ -99,7 +100,6 @@ def generate(nat):
nat['first_install'] = True
render(nftables_nat66_config, 'firewall/nftables-nat66.j2', nat, permission=0o755)
- render(ndppd_config, 'ndppd/ndppd.conf.j2', nat, permission=0o755)
return None
def apply(nat):
@@ -107,14 +107,6 @@ def apply(nat):
return None
cmd(f'nft -f {nftables_nat66_config}')
-
- if 'deleted' in nat or not dict_search('source.rule', nat):
- cmd('systemctl stop ndppd')
- if os.path.isfile(ndppd_config):
- os.unlink(ndppd_config)
- else:
- cmd('systemctl restart ndppd')
-
call_dependents()
return None
diff --git a/src/conf_mode/netns.py b/src/conf_mode/netns.py
index 95ab83dbc..7cee33bc6 100755
--- a/src/conf_mode/netns.py
+++ b/src/conf_mode/netns.py
@@ -77,8 +77,8 @@ def verify(netns):
if 'netns_remove' in netns:
for name, config in netns['netns_remove'].items():
if 'interface' in config:
- raise ConfigError(f'Can not remove NETNS "{name}", it still has '\
- f'member interfaces!')
+ raise ConfigError(f'Can not remove network namespace "{name}", it '\
+ f'still has member interfaces!')
if 'name' in netns:
for name, config in netns['name'].items():
@@ -87,7 +87,6 @@ def verify(netns):
return None
-
def generate(netns):
if not netns:
return None
diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py
index 34ba2fe69..4be40e99e 100755
--- a/src/conf_mode/pki.py
+++ b/src/conf_mode/pki.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2021 VyOS maintainers and contributors
+# Copyright (C) 2021-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -14,59 +14,66 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import os
+
+from sys import argv
from sys import exit
from vyos.config import Config
-from vyos.configdep import set_dependents, call_dependents
+from vyos.config import config_dict_merge
+from vyos.configdep import set_dependents
+from vyos.configdep import call_dependents
from vyos.configdict import node_changed
+from vyos.configdiff import Diff
+from vyos.defaults import directories
from vyos.pki import is_ca_certificate
from vyos.pki import load_certificate
from vyos.pki import load_public_key
from vyos.pki import load_private_key
from vyos.pki import load_crl
from vyos.pki import load_dh_parameters
+from vyos.utils.boot import boot_configuration_complete
+from vyos.utils.dict import dict_search
from vyos.utils.dict import dict_search_args
from vyos.utils.dict import dict_search_recursive
+from vyos.utils.process import call
+from vyos.utils.process import cmd
+from vyos.utils.process import is_systemd_service_active
from vyos import ConfigError
from vyos import airbag
airbag.enable()
-# keys to recursively search for under specified path, script to call if update required
+vyos_certbot_dir = directories['certbot']
+
+# keys to recursively search for under specified path
sync_search = [
{
'keys': ['certificate'],
'path': ['service', 'https'],
- 'script': '/usr/libexec/vyos/conf_mode/https.py'
},
{
'keys': ['certificate', 'ca_certificate'],
'path': ['interfaces', 'ethernet'],
- 'script': '/usr/libexec/vyos/conf_mode/interfaces-ethernet.py'
},
{
'keys': ['certificate', 'ca_certificate', 'dh_params', 'shared_secret_key', 'auth_key', 'crypt_key'],
'path': ['interfaces', 'openvpn'],
- 'script': '/usr/libexec/vyos/conf_mode/interfaces-openvpn.py'
},
{
'keys': ['ca_certificate'],
'path': ['interfaces', 'sstpc'],
- 'script': '/usr/libexec/vyos/conf_mode/interfaces-sstpc.py'
},
{
'keys': ['certificate', 'ca_certificate', 'local_key', 'remote_key'],
'path': ['vpn', 'ipsec'],
- 'script': '/usr/libexec/vyos/conf_mode/vpn_ipsec.py'
},
{
'keys': ['certificate', 'ca_certificate'],
'path': ['vpn', 'openconnect'],
- 'script': '/usr/libexec/vyos/conf_mode/vpn_openconnect.py'
},
{
'keys': ['certificate', 'ca_certificate'],
'path': ['vpn', 'sstp'],
- 'script': '/usr/libexec/vyos/conf_mode/vpn_sstp.py'
}
]
@@ -82,6 +89,33 @@ sync_translate = {
'crypt_key': 'openvpn'
}
+def certbot_delete(certificate):
+ if not boot_configuration_complete():
+ return
+ if os.path.exists(f'{vyos_certbot_dir}/renewal/{certificate}.conf'):
+ cmd(f'certbot delete --non-interactive --config-dir {vyos_certbot_dir} --cert-name {certificate}')
+
+def certbot_request(name: str, config: dict, dry_run: bool=True):
+ # We do not call certbot when booting the system - there is no need to do so and
+ # request new certificates during boot/image upgrade as the certbot configuration
+ # is stored persistent under /config - thus we do not open the door to transient
+ # errors
+ if not boot_configuration_complete():
+ return
+
+ domains = '--domains ' + ' --domains '.join(config['domain_name'])
+ tmp = f'certbot certonly --non-interactive --config-dir {vyos_certbot_dir} --cert-name {name} '\
+ f'--standalone --agree-tos --no-eff-email --expand --server {config["url"]} '\
+ f'--email {config["email"]} --key-type rsa --rsa-key-size {config["rsa_key_size"]} '\
+ f'{domains}'
+ if 'listen_address' in config:
+ tmp += f' --http-01-address {config["listen_address"]}'
+ # verify() does not need to actually request a cert but only test for plausability
+ if dry_run:
+ tmp += ' --dry-run'
+
+ cmd(tmp, raising=ConfigError, message=f'ACME certbot request failed for "{name}"!')
+
def get_config(config=None):
if config:
conf = config
@@ -93,25 +127,60 @@ def get_config(config=None):
get_first_key=True,
no_tag_node_value_mangle=True)
- pki['changed'] = {}
- tmp = node_changed(conf, base + ['ca'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'ca' : tmp})
+ if len(argv) > 1 and argv[1] == 'certbot_renew':
+ pki['certbot_renew'] = {}
- tmp = node_changed(conf, base + ['certificate'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'certificate' : tmp})
+ tmp = node_changed(conf, base + ['ca'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'ca' : tmp})
- tmp = node_changed(conf, base + ['dh'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'dh' : tmp})
+ tmp = node_changed(conf, base + ['certificate'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'certificate' : tmp})
- tmp = node_changed(conf, base + ['key-pair'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'key_pair' : tmp})
+ tmp = node_changed(conf, base + ['dh'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'dh' : tmp})
- tmp = node_changed(conf, base + ['openvpn', 'shared-secret'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'openvpn' : tmp})
+ tmp = node_changed(conf, base + ['key-pair'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'key_pair' : tmp})
+
+ tmp = node_changed(conf, base + ['openvpn', 'shared-secret'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'openvpn' : tmp})
# We only merge on the defaults of there is a configuration at all
if conf.exists(base):
- pki = conf.merge_defaults(pki, recursive=True)
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**pki.kwargs, recursive=True)
+ # remove ACME default configuration if unused by CLI
+ if 'certificate' in pki:
+ for name, cert_config in pki['certificate'].items():
+ if 'acme' not in cert_config:
+ # Remove ACME default values
+ del default_values['certificate'][name]['acme']
+
+ # merge CLI and default dictionary
+ pki = config_dict_merge(default_values, pki)
+
+ # Certbot triggered an external renew of the certificates.
+ # Mark all ACME based certificates as "changed" to trigger
+ # update of dependent services
+ if 'certificate' in pki and 'certbot_renew' in pki:
+ renew = []
+ for name, cert_config in pki['certificate'].items():
+ if 'acme' in cert_config:
+ renew.append(name)
+ # If triggered externally by certbot, certificate key is not present in changed
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'certificate' : renew})
# We need to get the entire system configuration to verify that we are not
# deleting a certificate that is still referenced somewhere!
@@ -119,38 +188,34 @@ def get_config(config=None):
get_first_key=True,
no_tag_node_value_mangle=True)
- if 'changed' in pki:
- for search in sync_search:
- for key in search['keys']:
- changed_key = sync_translate[key]
-
- if changed_key not in pki['changed']:
- continue
-
- for item_name in pki['changed'][changed_key]:
- node_present = False
- if changed_key == 'openvpn':
- node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name)
- else:
- node_present = dict_search_args(pki, changed_key, item_name)
-
- if node_present:
- search_dict = dict_search_args(pki['system'], *search['path'])
-
- if not search_dict:
- continue
-
- for found_name, found_path in dict_search_recursive(search_dict, key):
- if found_name == item_name:
- path = search['path']
- path_str = ' '.join(path + found_path)
- print(f'pki: Updating config: {path_str} {found_name}')
-
- if path[0] == 'interfaces':
- ifname = found_path[0]
- set_dependents(path[1], conf, ifname)
- else:
- set_dependents(path[1], conf)
+ for search in sync_search:
+ for key in search['keys']:
+ changed_key = sync_translate[key]
+ if 'changed' not in pki or changed_key not in pki['changed']:
+ continue
+
+ for item_name in pki['changed'][changed_key]:
+ node_present = False
+ if changed_key == 'openvpn':
+ node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name)
+ else:
+ node_present = dict_search_args(pki, changed_key, item_name)
+
+ if node_present:
+ search_dict = dict_search_args(pki['system'], *search['path'])
+ if not search_dict:
+ continue
+ for found_name, found_path in dict_search_recursive(search_dict, key):
+ if found_name == item_name:
+ path = search['path']
+ path_str = ' '.join(path + found_path)
+ print(f'PKI: Updating config: {path_str} {found_name}')
+
+ if path[0] == 'interfaces':
+ ifname = found_path[0]
+ set_dependents(path[1], conf, ifname)
+ else:
+ set_dependents(path[1], conf)
return pki
@@ -223,6 +288,22 @@ def verify(pki):
if not is_valid_private_key(private['key'], protected):
raise ConfigError(f'Invalid private key on certificate "{name}"')
+ if 'acme' in cert_conf:
+ if 'domain_name' not in cert_conf['acme']:
+ raise ConfigError(f'At least one domain-name is required to request '\
+ f'certificate for "{name}" via ACME!')
+
+ if 'email' not in cert_conf['acme']:
+ raise ConfigError(f'An email address is required to request '\
+ f'certificate for "{name}" via ACME!')
+
+ if 'certbot_renew' not in pki:
+ # Only run the ACME command if something on this entity changed,
+ # as this is time intensive
+ tmp = dict_search('changed.certificate', pki)
+ if tmp != None and name in tmp:
+ certbot_request(name, cert_conf['acme'])
+
if 'dh' in pki:
for name, dh_conf in pki['dh'].items():
if 'parameters' in dh_conf:
@@ -283,12 +364,58 @@ def generate(pki):
if not pki:
return None
+ # Certbot renewal only needs to re-trigger the services to load up the
+ # new PEM file
+ if 'certbot_renew' in pki:
+ return None
+
+ certbot_list = []
+ certbot_list_on_disk = []
+ if os.path.exists(f'{vyos_certbot_dir}/live'):
+ certbot_list_on_disk = [f.path.split('/')[-1] for f in os.scandir(f'{vyos_certbot_dir}/live') if f.is_dir()]
+
+ if 'certificate' in pki:
+ changed_certificates = dict_search('changed.certificate', pki)
+ for name, cert_conf in pki['certificate'].items():
+ if 'acme' in cert_conf:
+ certbot_list.append(name)
+ # generate certificate if not found on disk
+ if name not in certbot_list_on_disk:
+ certbot_request(name, cert_conf['acme'], dry_run=False)
+ elif changed_certificates != None and name in changed_certificates:
+ # when something for the certificate changed, we should delete it
+ if name in certbot_list_on_disk:
+ certbot_delete(name)
+ certbot_request(name, cert_conf['acme'], dry_run=False)
+
+ # Cleanup certbot configuration and certificates if no longer in use by CLI
+ # Get foldernames under vyos_certbot_dir which each represent a certbot cert
+ if os.path.exists(f'{vyos_certbot_dir}/live'):
+ for cert in certbot_list_on_disk:
+ if cert not in certbot_list:
+ # certificate is no longer active on the CLI - remove it
+ certbot_delete(cert)
+
return None
def apply(pki):
+ systemd_certbot_name = 'certbot.timer'
if not pki:
+ call(f'systemctl stop {systemd_certbot_name}')
return None
+ has_certbot = False
+ if 'certificate' in pki:
+ for name, cert_conf in pki['certificate'].items():
+ if 'acme' in cert_conf:
+ has_certbot = True
+ break
+
+ if not has_certbot:
+ call(f'systemctl stop {systemd_certbot_name}')
+ elif has_certbot and not is_systemd_service_active(systemd_certbot_name):
+ call(f'systemctl restart {systemd_certbot_name}')
+
if 'changed' in pki:
call_dependents()
diff --git a/src/conf_mode/policy-local-route.py b/src/conf_mode/policy_local-route.py
index 2e8aabb80..91e4fce2c 100755
--- a/src/conf_mode/policy-local-route.py
+++ b/src/conf_mode/policy_local-route.py
@@ -52,19 +52,28 @@ def get_config(config=None):
if tmp:
for rule in (tmp or []):
src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address'])
+ src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port'])
fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address'])
+ dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port'])
+ table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table'])
proto = leaf_node_changed(conf, base_rule + [rule, 'protocol'])
rule_def = {}
if src:
rule_def = dict_merge({'source': {'address': src}}, rule_def)
+ if src_port:
+ rule_def = dict_merge({'source': {'port': src_port}}, rule_def)
if fwmk:
rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
if iif:
rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
if dst:
rule_def = dict_merge({'destination': {'address': dst}}, rule_def)
+ if dst_port:
+ rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def)
+ if table:
+ rule_def = dict_merge({'table' : table}, rule_def)
if proto:
rule_def = dict_merge({'protocol' : proto}, rule_def)
dict = dict_merge({dict_id : {rule : rule_def}}, dict)
@@ -79,9 +88,12 @@ def get_config(config=None):
if 'rule' in pbr[route]:
for rule, rule_config in pbr[route]['rule'].items():
src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address'])
+ src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port'])
fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address'])
+ dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port'])
+ table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table'])
proto = leaf_node_changed(conf, base_rule + [rule, 'protocol'])
# keep track of changes in configuration
# otherwise we might remove an existing node although nothing else has changed
@@ -105,14 +117,32 @@ def get_config(config=None):
if len(src) > 0:
rule_def = dict_merge({'source': {'address': src}}, rule_def)
+ # source port
+ if src_port is None:
+ if 'source' in rule_config:
+ if 'port' in rule_config['source']:
+ tmp = rule_config['source']['port']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'source': {'port': tmp}}, rule_def)
+ else:
+ changed = True
+ if len(src_port) > 0:
+ rule_def = dict_merge({'source': {'port': src_port}}, rule_def)
+
+ # fwmark
if fwmk is None:
if 'fwmark' in rule_config:
- rule_def = dict_merge({'fwmark': rule_config['fwmark']}, rule_def)
+ tmp = rule_config['fwmark']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'fwmark': tmp}, rule_def)
else:
changed = True
if len(fwmk) > 0:
rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
+ # inbound-interface
if iif is None:
if 'inbound_interface' in rule_config:
rule_def = dict_merge({'inbound_interface': rule_config['inbound_interface']}, rule_def)
@@ -121,6 +151,7 @@ def get_config(config=None):
if len(iif) > 0:
rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
+ # destination address
if dst is None:
if 'destination' in rule_config:
if 'address' in rule_config['destination']:
@@ -130,9 +161,35 @@ def get_config(config=None):
if len(dst) > 0:
rule_def = dict_merge({'destination': {'address': dst}}, rule_def)
+ # destination port
+ if dst_port is None:
+ if 'destination' in rule_config:
+ if 'port' in rule_config['destination']:
+ tmp = rule_config['destination']['port']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'destination': {'port': tmp}}, rule_def)
+ else:
+ changed = True
+ if len(dst_port) > 0:
+ rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def)
+
+ # table
+ if table is None:
+ if 'set' in rule_config and 'table' in rule_config['set']:
+ rule_def = dict_merge({'table': [rule_config['set']['table']]}, rule_def)
+ else:
+ changed = True
+ if len(table) > 0:
+ rule_def = dict_merge({'table' : table}, rule_def)
+
+ # protocol
if proto is None:
if 'protocol' in rule_config:
- rule_def = dict_merge({'protocol': rule_config['protocol']}, rule_def)
+ tmp = rule_config['protocol']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'protocol': tmp}, rule_def)
else:
changed = True
if len(proto) > 0:
@@ -192,19 +249,27 @@ def apply(pbr):
for rule, rule_config in pbr[rule_rm].items():
source = rule_config.get('source', {}).get('address', [''])
+ source_port = rule_config.get('source', {}).get('port', [''])
destination = rule_config.get('destination', {}).get('address', [''])
+ destination_port = rule_config.get('destination', {}).get('port', [''])
fwmark = rule_config.get('fwmark', [''])
inbound_interface = rule_config.get('inbound_interface', [''])
protocol = rule_config.get('protocol', [''])
+ table = rule_config.get('table', [''])
- for src, dst, fwmk, iif, proto in product(source, destination, fwmark, inbound_interface, protocol):
+ for src, dst, src_port, dst_port, fwmk, iif, proto, table in product(
+ source, destination, source_port, destination_port,
+ fwmark, inbound_interface, protocol, table):
f_src = '' if src == '' else f' from {src} '
+ f_src_port = '' if src_port == '' else f' sport {src_port} '
f_dst = '' if dst == '' else f' to {dst} '
+ f_dst_port = '' if dst_port == '' else f' dport {dst_port} '
f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} '
f_iif = '' if iif == '' else f' iif {iif} '
f_proto = '' if proto == '' else f' ipproto {proto} '
+ f_table = '' if table == '' else f' lookup {table} '
- call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_fwmk}{f_iif}')
+ call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif}{f_table}')
# Generate new config
for route in ['local_route', 'local_route6']:
@@ -218,7 +283,9 @@ def apply(pbr):
for rule, rule_config in pbr_route['rule'].items():
table = rule_config['set'].get('table', '')
source = rule_config.get('source', {}).get('address', ['all'])
+ source_port = rule_config.get('source', {}).get('port', '')
destination = rule_config.get('destination', {}).get('address', ['all'])
+ destination_port = rule_config.get('destination', {}).get('port', '')
fwmark = rule_config.get('fwmark', '')
inbound_interface = rule_config.get('inbound_interface', '')
protocol = rule_config.get('protocol', '')
@@ -227,11 +294,13 @@ def apply(pbr):
f_src = f' from {src} ' if src else ''
for dst in destination:
f_dst = f' to {dst} ' if dst else ''
+ f_src_port = f' sport {source_port} ' if source_port else ''
+ f_dst_port = f' dport {destination_port} ' if destination_port else ''
f_fwmk = f' fwmark {fwmark} ' if fwmark else ''
f_iif = f' iif {inbound_interface} ' if inbound_interface else ''
f_proto = f' ipproto {protocol} ' if protocol else ''
- call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_fwmk}{f_iif} lookup {table}')
+ call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table}')
return None
diff --git a/src/conf_mode/policy-route.py b/src/conf_mode/policy_route.py
index adad012de..6d7a06714 100755
--- a/src/conf_mode/policy-route.py
+++ b/src/conf_mode/policy_route.py
@@ -53,6 +53,10 @@ def get_config(config=None):
policy['firewall_group'] = conf.get_config_dict(['firewall', 'group'], key_mangling=('-', '_'), get_first_key=True,
no_tag_node_value_mangle=True)
+ # Remove dynamic firewall groups if present:
+ if 'dynamic_group' in policy['firewall_group']:
+ del policy['firewall_group']['dynamic_group']
+
return policy
def verify_rule(policy, name, rule_conf, ipv6, rule_id):
diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py
index dab784662..37421efb4 100755
--- a/src/conf_mode/protocols_bfd.py
+++ b/src/conf_mode/protocols_bfd.py
@@ -72,6 +72,9 @@ def verify(bfd):
if 'source' in peer_config and 'interface' in peer_config['source']:
raise ConfigError('BFD multihop and source interface cannot be used together')
+ if 'minimum_ttl' in peer_config and 'multihop' not in peer_config:
+ raise ConfigError('Minimum TTL is only available for multihop BFD sessions!')
+
if 'profile' in peer_config:
profile_name = peer_config['profile']
if 'profile' not in bfd or profile_name not in bfd['profile']:
diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py
index 00015023c..d90dfe45b 100755
--- a/src/conf_mode/protocols_bgp.py
+++ b/src/conf_mode/protocols_bgp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2020-2023 VyOS maintainers and contributors
+# Copyright (C) 2020-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -30,6 +30,7 @@ from vyos.template import render_to_string
from vyos.utils.dict import dict_search
from vyos.utils.network import get_interface_vrf
from vyos.utils.network import is_addr_assigned
+from vyos.utils.process import process_named_running
from vyos import ConfigError
from vyos import frr
from vyos import airbag
@@ -49,8 +50,13 @@ def get_config(config=None):
# eqivalent of the C foo ? 'a' : 'b' statement
base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path
- bgp = conf.get_config_dict(base, key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
+ bgp = conf.get_config_dict(
+ base,
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True,
+ )
bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'],
key_mangling=('-', '_'),
@@ -93,6 +99,7 @@ def get_config(config=None):
tmp = conf.get_config_dict(['policy'])
# Merge policy dict into "regular" config dict
bgp = dict_merge(tmp, bgp)
+
return bgp
@@ -199,6 +206,10 @@ def verify_remote_as(peer_config, bgp_config):
if 'v6only' in peer_config['interface']:
if 'remote_as' in peer_config['interface']['v6only']:
return peer_config['interface']['v6only']['remote_as']
+ if 'peer_group' in peer_config['interface']['v6only']:
+ peer_group_name = peer_config['interface']['v6only']['peer_group']
+ tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config)
+ if tmp: return tmp
return None
@@ -209,9 +220,12 @@ def verify_afi(peer_config, bgp_config):
# If address_family configured under peer-group
# if neighbor interface configured
- peer_group_name = ''
+ peer_group_name = None
if dict_search('interface.peer_group', peer_config):
peer_group_name = peer_config['interface']['peer_group']
+ elif dict_search('interface.v6only.peer_group', peer_config):
+ peer_group_name = peer_config['interface']['v6only']['peer_group']
+
# if neighbor IP configured.
if 'peer_group' in peer_config:
peer_group_name = peer_config['peer_group']
@@ -246,6 +260,19 @@ def verify(bgp):
if 'system_as' not in bgp:
raise ConfigError('BGP system-as number must be defined!')
+ # Verify BMP
+ if 'bmp' in bgp:
+ # check bmp flag "bgpd -d -F traditional --daemon -A 127.0.0.1 -M rpki -M bmp"
+ if not process_named_running('bgpd', 'bmp'):
+ raise ConfigError(
+ f'"bmp" flag is not found in bgpd. Configure "set system frr bmp" and restart bgp process'
+ )
+ # check bmp target
+ if 'target' in bgp['bmp']:
+ for target, target_config in bgp['bmp']['target'].items():
+ if 'address' not in target_config:
+ raise ConfigError(f'BMP target "{target}" address must be defined!')
+
# Verify vrf on interface and bgp section
if 'interface' in bgp:
for interface in bgp['interface']:
@@ -482,6 +509,14 @@ def verify(bgp):
if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']):
raise ConfigError(
'Command "import vrf" conflicts with "route-target vpn both" command!')
+ if dict_search('route_target.vpn.export', afi_config):
+ raise ConfigError(
+ 'Command "route-target vpn export" conflicts '\
+ 'with "route-target vpn both" command!')
+ if dict_search('route_target.vpn.import', afi_config):
+ raise ConfigError(
+ 'Command "route-target vpn import" conflicts '\
+ 'with "route-target vpn both" command!')
if dict_search('route_target.vpn.import', afi_config):
if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']):
@@ -518,6 +553,10 @@ def verify(bgp):
tmp = dict_search(f'route_map.vpn.{export_import}', afi_config)
if tmp: verify_route_map(tmp, bgp)
+ # per-vrf sid and per-af sid are mutually exclusive
+ if 'sid' in afi_config and 'sid' in bgp:
+ raise ConfigError('SID per VRF and SID per address-family are mutually exclusive!')
+
# Checks only required for L2VPN EVPN
if afi in ['l2vpn_evpn']:
if 'vni' in afi_config:
diff --git a/src/conf_mode/igmp_proxy.py b/src/conf_mode/protocols_igmp-proxy.py
index 40db417dd..40db417dd 100755
--- a/src/conf_mode/igmp_proxy.py
+++ b/src/conf_mode/protocols_igmp-proxy.py
diff --git a/src/conf_mode/protocols_igmp.py b/src/conf_mode/protocols_igmp.py
deleted file mode 100755
index 435189025..000000000
--- a/src/conf_mode/protocols_igmp.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2020-2023 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ipaddress import IPv4Address
-from sys import exit
-
-from vyos import ConfigError
-from vyos.config import Config
-from vyos.utils.process import process_named_running
-from vyos.utils.process import call
-from vyos.template import render
-from signal import SIGTERM
-
-from vyos import airbag
-airbag.enable()
-
-# Required to use the full path to pimd, in another case daemon will not be started
-pimd_cmd = f'/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1'
-
-config_file = r'/tmp/igmp.frr'
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
- igmp_conf = {
- 'igmp_conf' : False,
- 'pim_conf' : False,
- 'igmp_proxy_conf' : False,
- 'old_ifaces' : {},
- 'ifaces' : {}
- }
- if not (conf.exists('protocols igmp') or conf.exists_effective('protocols igmp')):
- return None
-
- if conf.exists('protocols igmp-proxy'):
- igmp_conf['igmp_proxy_conf'] = True
-
- if conf.exists('protocols pim'):
- igmp_conf['pim_conf'] = True
-
- if conf.exists('protocols igmp'):
- igmp_conf['igmp_conf'] = True
-
- conf.set_level('protocols igmp')
-
- # # Get interfaces
- for iface in conf.list_effective_nodes('interface'):
- igmp_conf['old_ifaces'].update({
- iface : {
- 'version' : conf.return_effective_value('interface {0} version'.format(iface)),
- 'query_interval' : conf.return_effective_value('interface {0} query-interval'.format(iface)),
- 'query_max_resp_time' : conf.return_effective_value('interface {0} query-max-response-time'.format(iface)),
- 'gr_join' : {}
- }
- })
- for gr_join in conf.list_effective_nodes('interface {0} join'.format(iface)):
- igmp_conf['old_ifaces'][iface]['gr_join'][gr_join] = conf.return_effective_values('interface {0} join {1} source'.format(iface, gr_join))
-
- for iface in conf.list_nodes('interface'):
- igmp_conf['ifaces'].update({
- iface : {
- 'version' : conf.return_value('interface {0} version'.format(iface)),
- 'query_interval' : conf.return_value('interface {0} query-interval'.format(iface)),
- 'query_max_resp_time' : conf.return_value('interface {0} query-max-response-time'.format(iface)),
- 'gr_join' : {}
- }
- })
- for gr_join in conf.list_nodes('interface {0} join'.format(iface)):
- igmp_conf['ifaces'][iface]['gr_join'][gr_join] = conf.return_values('interface {0} join {1} source'.format(iface, gr_join))
-
- return igmp_conf
-
-def verify(igmp):
- if igmp is None:
- return None
-
- if igmp['igmp_conf']:
- # Check conflict with IGMP-Proxy
- if igmp['igmp_proxy_conf']:
- raise ConfigError(f"IGMP proxy and PIM cannot be both configured at the same time")
-
- # Check interfaces
- if not igmp['ifaces']:
- raise ConfigError(f"IGMP require defined interfaces!")
- # Check, is this multicast group
- for intfc in igmp['ifaces']:
- for gr_addr in igmp['ifaces'][intfc]['gr_join']:
- if not IPv4Address(gr_addr).is_multicast:
- raise ConfigError(gr_addr + " not a multicast group")
-
-def generate(igmp):
- if igmp is None:
- return None
-
- render(config_file, 'frr/igmp.frr.j2', igmp)
- return None
-
-def apply(igmp):
- if igmp is None:
- return None
-
- pim_pid = process_named_running('pimd')
- if igmp['igmp_conf'] or igmp['pim_conf']:
- if not pim_pid:
- call(pimd_cmd)
-
- if os.path.exists(config_file):
- call(f'vtysh -d pimd -f {config_file}')
- os.remove(config_file)
- elif pim_pid:
- os.kill(int(pim_pid), SIGTERM)
-
- return None
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py
index e00c58ee4..8d594bb68 100755
--- a/src/conf_mode/protocols_isis.py
+++ b/src/conf_mode/protocols_isis.py
@@ -48,7 +48,8 @@ def get_config(config=None):
# eqivalent of the C foo ? 'a' : 'b' statement
base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path
isis = conf.get_config_dict(base, key_mangling=('-', '_'),
- get_first_key=True)
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
# Assign the name of our VRF context. This MUST be done before the return
# statement below, else on deletion we will delete the default instance
@@ -219,6 +220,51 @@ def verify(isis):
if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']):
raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
f'and no-php-flag configured at the same time.')
+
+ # Check for index ranges being larger than the segment routing global block
+ if dict_search('segment_routing.global_block', isis):
+ g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis)
+ g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis)
+ g_label_difference = int(g_high_label_value) - int(g_low_label_value)
+ if dict_search('segment_routing.prefix', isis):
+ for prefix, prefix_config in isis['segment_routing']['prefix'].items():
+ if 'index' in prefix_config:
+ index_size = isis['segment_routing']['prefix'][prefix]['index']['value']
+ if int(index_size) > int(g_label_difference):
+ raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\
+ f'index base size larger than the SRGB label base.')
+
+ # Check for LFA tiebreaker index duplication
+ if dict_search('fast_reroute.lfa.local.tiebreaker', isis):
+ comparison_dictionary = {}
+ for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items():
+ for index, index_options in item_options.items():
+ for index_value, index_value_options in index_options.items():
+ if index_value not in comparison_dictionary.keys():
+ comparison_dictionary[index_value] = [item]
+ else:
+ comparison_dictionary[index_value].append(item)
+ for index, index_length in comparison_dictionary.items():
+ if int(len(index_length)) > 1:
+ raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.')
+
+ # Check for LFA priority-limit configured multiple times per level
+ if dict_search('fast_reroute.lfa.local.priority_limit', isis):
+ comparison_dictionary = {}
+ for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items():
+ for level, level_options in priority_options.items():
+ if level not in comparison_dictionary.keys():
+ comparison_dictionary[level] = [priority]
+ else:
+ comparison_dictionary[level].append(priority)
+ for level, level_length in comparison_dictionary.items():
+ if int(len(level_length)) > 1:
+ raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.')
+
+ # Check for LFA remote prefix list configured with more than one list
+ if dict_search('fast_reroute.lfa.remote.prefix_list', isis):
+ if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1:
+ raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.')
return None
@@ -265,4 +311,4 @@ if __name__ == '__main__':
apply(c)
except ConfigError as e:
print(e)
- exit(1)
+ exit(1) \ No newline at end of file
diff --git a/src/conf_mode/protocols_nhrp.py b/src/conf_mode/protocols_nhrp.py
index 5ec0bc9e5..c339c6391 100755
--- a/src/conf_mode/protocols_nhrp.py
+++ b/src/conf_mode/protocols_nhrp.py
@@ -37,7 +37,7 @@ def get_config(config=None):
nhrp = conf.get_config_dict(base, key_mangling=('-', '_'),
get_first_key=True, no_tag_node_value_mangle=True)
- nhrp['del_tunnels'] = node_changed(conf, base + ['tunnel'], key_mangling=('-', '_'))
+ nhrp['del_tunnels'] = node_changed(conf, base + ['tunnel'])
if not conf.exists(base):
return nhrp
diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py
index cddd3765e..34cf49286 100755
--- a/src/conf_mode/protocols_ospf.py
+++ b/src/conf_mode/protocols_ospf.py
@@ -14,8 +14,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-
from sys import exit
from sys import argv
@@ -215,6 +213,19 @@ def verify(ospf):
raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
f'and no-php-flag configured at the same time.')
+ # Check for index ranges being larger than the segment routing global block
+ if dict_search('segment_routing.global_block', ospf):
+ g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf)
+ g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf)
+ g_label_difference = int(g_high_label_value) - int(g_low_label_value)
+ if dict_search('segment_routing.prefix', ospf):
+ for prefix, prefix_config in ospf['segment_routing']['prefix'].items():
+ if 'index' in prefix_config:
+ index_size = ospf['segment_routing']['prefix'][prefix]['index']['value']
+ if int(index_size) > int(g_label_difference):
+ raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\
+ f'index base size larger than the SRGB label base.')
+
# Check route summarisation
if 'summary_address' in ospf:
for prefix, prefix_options in ospf['summary_address'].items():
diff --git a/src/conf_mode/protocols_pim.py b/src/conf_mode/protocols_pim.py
index 0aaa0d2c6..09c3be8df 100755
--- a/src/conf_mode/protocols_pim.py
+++ b/src/conf_mode/protocols_pim.py
@@ -16,144 +16,139 @@
import os
-from ipaddress import IPv4Address
+from ipaddress import IPv4Network
+from signal import SIGTERM
from sys import exit
from vyos.config import Config
-from vyos import ConfigError
+from vyos.config import config_dict_merge
+from vyos.configdict import node_changed
+from vyos.configverify import verify_interface_exists
from vyos.utils.process import process_named_running
from vyos.utils.process import call
-from vyos.template import render
-from signal import SIGTERM
-
+from vyos.template import render_to_string
+from vyos import ConfigError
+from vyos import frr
from vyos import airbag
airbag.enable()
-# Required to use the full path to pimd, in another case daemon will not be started
-pimd_cmd = f'/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1'
-
-config_file = r'/tmp/pimd.frr'
-
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- pim_conf = {
- 'pim_conf' : False,
- 'igmp_conf' : False,
- 'igmp_proxy_conf' : False,
- 'old_pim' : {
- 'ifaces' : {},
- 'rp' : {}
- },
- 'pim' : {
- 'ifaces' : {},
- 'rp' : {}
- }
- }
- if not (conf.exists('protocols pim') or conf.exists_effective('protocols pim')):
- return None
-
- if conf.exists('protocols igmp-proxy'):
- pim_conf['igmp_proxy_conf'] = True
-
- if conf.exists('protocols igmp'):
- pim_conf['igmp_conf'] = True
-
- if conf.exists('protocols pim'):
- pim_conf['pim_conf'] = True
-
- conf.set_level('protocols pim')
-
- # Get interfaces
- for iface in conf.list_effective_nodes('interface'):
- pim_conf['old_pim']['ifaces'].update({
- iface : {
- 'hello' : conf.return_effective_value('interface {0} hello'.format(iface)),
- 'dr_prio' : conf.return_effective_value('interface {0} dr-priority'.format(iface))
- }
- })
- for iface in conf.list_nodes('interface'):
- pim_conf['pim']['ifaces'].update({
- iface : {
- 'hello' : conf.return_value('interface {0} hello'.format(iface)),
- 'dr_prio' : conf.return_value('interface {0} dr-priority'.format(iface)),
- }
- })
-
- conf.set_level('protocols pim rp')
-
- # Get RPs addresses
- for rp_addr in conf.list_effective_nodes('address'):
- pim_conf['old_pim']['rp'][rp_addr] = conf.return_effective_values('address {0} group'.format(rp_addr))
-
- for rp_addr in conf.list_nodes('address'):
- pim_conf['pim']['rp'][rp_addr] = conf.return_values('address {0} group'.format(rp_addr))
-
- # Get RP keep-alive-timer
- if conf.exists_effective('rp keep-alive-timer'):
- pim_conf['old_pim']['rp_keep_alive'] = conf.return_effective_value('rp keep-alive-timer')
- if conf.exists('rp keep-alive-timer'):
- pim_conf['pim']['rp_keep_alive'] = conf.return_value('rp keep-alive-timer')
-
- return pim_conf
+ base = ['protocols', 'pim']
+
+ pim = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, no_tag_node_value_mangle=True)
+
+ # We can not run both IGMP proxy and PIM at the same time - get IGMP
+ # proxy status
+ if conf.exists(['protocols', 'igmp-proxy']):
+ pim.update({'igmp_proxy_enabled' : {}})
+
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ pim['interface_removed'] = list(interfaces_removed)
+
+ # Bail out early if configuration tree does no longer exist. this must
+ # be done after retrieving the list of interfaces to be removed.
+ if not conf.exists(base):
+ pim.update({'deleted' : ''})
+ return pim
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**pim.kwargs, recursive=True)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: default-information
+ # originate comes with a default metric-type of 2, which will enable the
+ # entire default-information originate tree, even when not set via CLI so we
+ # need to check this first and probably drop that key.
+ for interface in pim.get('interface', []):
+ # We need to reload the defaults on every pass b/c of
+ # hello-multiplier dependency on dead-interval
+ # If hello-multiplier is set, we need to remove the default from
+ # dead-interval.
+ if 'igmp' not in pim['interface'][interface]:
+ del default_values['interface'][interface]['igmp']
+
+ pim = config_dict_merge(default_values, pim)
+ return pim
def verify(pim):
- if pim is None:
+ if not pim or 'deleted' in pim:
return None
- if pim['pim_conf']:
- # Check conflict with IGMP-Proxy
- if pim['igmp_proxy_conf']:
- raise ConfigError(f"IGMP proxy and PIM cannot be both configured at the same time")
-
- # Check interfaces
- if not pim['pim']['ifaces']:
- raise ConfigError(f"PIM require defined interfaces!")
+ if 'igmp_proxy_enabled' in pim:
+ raise ConfigError('IGMP proxy and PIM cannot be configured at the same time!')
- if not pim['pim']['rp']:
- raise ConfigError(f"RP address required")
+ if 'interface' not in pim:
+ raise ConfigError('PIM require defined interfaces!')
- # Check unique multicast groups
- uniq_groups = []
- for rp_addr in pim['pim']['rp']:
- if not pim['pim']['rp'][rp_addr]:
- raise ConfigError(f"Group should be specified for RP " + rp_addr)
- for group in pim['pim']['rp'][rp_addr]:
- if (group in uniq_groups):
- raise ConfigError(f"Group range " + group + " specified cannot exact match another")
+ for interface in pim['interface']:
+ verify_interface_exists(interface)
- # Check, is this multicast group
- gr_addr = group.split('/')
- if IPv4Address(gr_addr[0]) < IPv4Address('224.0.0.0'):
- raise ConfigError(group + " not a multicast group")
+ if 'rp' in pim:
+ if 'address' not in pim['rp']:
+ raise ConfigError('PIM rendezvous point needs to be defined!')
- uniq_groups.extend(pim['pim']['rp'][rp_addr])
+ # Check unique multicast groups
+ unique = []
+ pim_base_error = 'PIM rendezvous point group'
+ for address, address_config in pim['rp']['address'].items():
+ if 'group' not in address_config:
+ raise ConfigError(f'{pim_base_error} should be defined for "{address}"!')
+
+ # Check if it is a multicast group
+ for gr_addr in address_config['group']:
+ if not IPv4Network(gr_addr).is_multicast:
+ raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!')
+ if gr_addr in unique:
+ raise ConfigError(f'{pim_base_error} must be unique!')
+ unique.append(gr_addr)
def generate(pim):
- if pim is None:
+ if not pim or 'deleted' in pim:
return None
-
- render(config_file, 'frr/pimd.frr.j2', pim)
+ pim['frr_pimd_config'] = render_to_string('frr/pimd.frr.j2', pim)
return None
def apply(pim):
- if pim is None:
+ pim_daemon = 'pimd'
+ pim_pid = process_named_running(pim_daemon)
+
+ if not pim or 'deleted' in pim:
+ if 'deleted' in pim:
+ os.kill(int(pim_pid), SIGTERM)
+
return None
- pim_pid = process_named_running('pimd')
- if pim['igmp_conf'] or pim['pim_conf']:
- if not pim_pid:
- call(pimd_cmd)
+ if not pim_pid:
+ call('/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1')
+
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+
+ frr_cfg.load_configuration(pim_daemon)
+ frr_cfg.modify_section(f'^ip pim')
+ frr_cfg.modify_section(f'^ip igmp')
- if os.path.exists(config_file):
- call("vtysh -d pimd -f " + config_file)
- os.remove(config_file)
- elif pim_pid:
- os.kill(int(pim_pid), SIGTERM)
+ for key in ['interface', 'interface_removed']:
+ if key not in pim:
+ continue
+ for interface in pim[key]:
+ frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
+ if 'frr_pimd_config' in pim:
+ frr_cfg.add_before(frr.default_add_before, pim['frr_pimd_config'])
+ frr_cfg.commit_configuration(pim_daemon)
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/protocols_pim6.py b/src/conf_mode/protocols_pim6.py
index 6a1235ba5..2003a1014 100755
--- a/src/conf_mode/protocols_pim6.py
+++ b/src/conf_mode/protocols_pim6.py
@@ -15,18 +15,19 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ipaddress import IPv6Address
+from ipaddress import IPv6Network
from sys import exit
-from typing import Optional
-from vyos import ConfigError, airbag, frr
-from vyos.config import Config, ConfigDict
+from vyos.config import Config
+from vyos.config import config_dict_merge
from vyos.configdict import node_changed
from vyos.configverify import verify_interface_exists
from vyos.template import render_to_string
-
+from vyos import ConfigError
+from vyos import frr
+from vyos import airbag
airbag.enable()
-
def get_config(config=None):
if config:
conf = config
@@ -44,11 +45,21 @@ def get_config(config=None):
if interfaces_removed:
pim6['interface_removed'] = list(interfaces_removed)
- return pim6
+ # Bail out early if configuration tree does no longer exist. this must
+ # be done after retrieving the list of interfaces to be removed.
+ if not conf.exists(base):
+ pim6.update({'deleted' : ''})
+ return pim6
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**pim6.kwargs, recursive=True)
+
+ pim6 = config_dict_merge(default_values, pim6)
+ return pim6
def verify(pim6):
- if pim6 is None:
+ if not pim6 or 'deleted' in pim6:
return
for interface, interface_config in pim6.get('interface', {}).items():
@@ -60,13 +71,34 @@ def verify(pim6):
if not IPv6Address(group).is_multicast:
raise ConfigError(f"{group} is not a multicast group")
+ if 'rp' in pim6:
+ if 'address' not in pim6['rp']:
+ raise ConfigError('PIM6 rendezvous point needs to be defined!')
+
+ # Check unique multicast groups
+ unique = []
+ pim_base_error = 'PIM6 rendezvous point group'
+
+ if {'address', 'prefix-list6'} <= set(pim6['rp']):
+ raise ConfigError(f'{pim_base_error} supports either address or a prefix-list!')
+
+ for address, address_config in pim6['rp']['address'].items():
+ if 'group' not in address_config:
+ raise ConfigError(f'{pim_base_error} should be defined for "{address}"!')
+
+ # Check if it is a multicast group
+ for gr_addr in address_config['group']:
+ if not IPv6Network(gr_addr).is_multicast:
+ raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!')
+ if gr_addr in unique:
+ raise ConfigError(f'{pim_base_error} must be unique!')
+ unique.append(gr_addr)
def generate(pim6):
- if pim6 is None:
+ if not pim6 or 'deleted' in pim6:
return
-
pim6['new_frr_config'] = render_to_string('frr/pim6d.frr.j2', pim6)
-
+ return None
def apply(pim6):
if pim6 is None:
@@ -83,13 +115,12 @@ def apply(pim6):
if key not in pim6:
continue
for interface in pim6[key]:
- frr_cfg.modify_section(
- f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
+ frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
if 'new_frr_config' in pim6:
frr_cfg.add_before(frr.default_add_before, pim6['new_frr_config'])
frr_cfg.commit_configuration(pim6_daemon)
-
+ return None
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py
index 05e876f3b..0fc14e868 100755
--- a/src/conf_mode/protocols_rpki.py
+++ b/src/conf_mode/protocols_rpki.py
@@ -63,11 +63,11 @@ def verify(rpki):
preferences.append(preference)
if 'ssh' in peer_config:
- files = ['private_key_file', 'public_key_file', 'known_hosts_file']
+ files = ['private_key_file', 'public_key_file']
for file in files:
if file not in peer_config['ssh']:
- raise ConfigError('RPKI+SSH requires username, public/private ' \
- 'keys and known-hosts file to be defined!')
+ raise ConfigError('RPKI+SSH requires username and public/private ' \
+ 'key file to be defined!')
filename = peer_config['ssh'][file]
if not os.path.exists(filename):
diff --git a/src/conf_mode/protocols_segment-routing.py b/src/conf_mode/protocols_segment-routing.py
new file mode 100755
index 000000000..d865c2ac0
--- /dev/null
+++ b/src/conf_mode/protocols_segment-routing.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import node_changed
+from vyos.template import render_to_string
+from vyos.utils.dict import dict_search
+from vyos.utils.system import sysctl_write
+from vyos import ConfigError
+from vyos import frr
+from vyos import airbag
+airbag.enable()
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['protocols', 'segment-routing']
+ sr = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ sr['interface_removed'] = list(interfaces_removed)
+
+ import pprint
+ pprint.pprint(sr)
+ return sr
+
+def verify(sr):
+ if 'srv6' in sr:
+ srv6_enable = False
+ if 'interface' in sr:
+ for interface, interface_config in sr['interface'].items():
+ if 'srv6' in interface_config:
+ srv6_enable = True
+ break
+ if not srv6_enable:
+ raise ConfigError('SRv6 should be enabled on at least one interface!')
+ return None
+
+def generate(sr):
+ if not sr:
+ return None
+
+ sr['new_frr_config'] = render_to_string('frr/zebra.segment_routing.frr.j2', sr)
+ return None
+
+def apply(sr):
+ zebra_daemon = 'zebra'
+
+ if 'interface_removed' in sr:
+ for interface in sr['interface_removed']:
+ # Disable processing of IPv6-SR packets
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0')
+
+ if 'interface' in sr:
+ for interface, interface_config in sr['interface'].items():
+ # Accept or drop SR-enabled IPv6 packets on this interface
+ if 'srv6' in interface_config:
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1')
+ # Define HMAC policy for ingress SR-enabled packets on this interface
+ # It's a redundant check as HMAC has a default value - but better safe
+ # then sorry
+ tmp = dict_search('srv6.hmac', interface_config)
+ if tmp == 'accept':
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0')
+ elif tmp == 'drop':
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1')
+ elif tmp == 'ignore':
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1')
+ else:
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0')
+
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section(r'^segment-routing')
+ if 'new_frr_config' in sr:
+ frr_cfg.add_before(frr.default_add_before, sr['new_frr_config'])
+ frr_cfg.commit_configuration(zebra_daemon)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/arp.py b/src/conf_mode/protocols_static_arp.py
index b141f1141..b141f1141 100755
--- a/src/conf_mode/arp.py
+++ b/src/conf_mode/protocols_static_arp.py
diff --git a/src/conf_mode/protocols_static_neighbor-proxy.py b/src/conf_mode/protocols_static_neighbor-proxy.py
new file mode 100755
index 000000000..10cc1e748
--- /dev/null
+++ b/src/conf_mode/protocols_static_neighbor-proxy.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import node_changed
+from vyos.utils.process import call
+from vyos import ConfigError
+from vyos import airbag
+
+airbag.enable()
+
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['protocols', 'static', 'neighbor-proxy']
+ config = conf.get_config_dict(base, get_first_key=True)
+
+ return config
+
+
+def verify(config):
+
+ if 'arp' in config:
+ for neighbor, neighbor_conf in config['arp'].items():
+ if 'interface' not in neighbor_conf:
+ raise ConfigError(
+ f"ARP neighbor-proxy for '{neighbor}' requires an interface to be set!"
+ )
+
+ if 'nd' in config:
+ for neighbor, neighbor_conf in config['nd'].items():
+ if 'interface' not in neighbor_conf:
+ raise ConfigError(
+ f"ARP neighbor-proxy for '{neighbor}' requires an interface to be set!"
+ )
+
+
+def generate(config):
+ pass
+
+
+def apply(config):
+ if not config:
+ # Cleanup proxy
+ call('ip neighbor flush proxy')
+ call('ip -6 neighbor flush proxy')
+ return None
+
+ # Add proxy ARP
+ if 'arp' in config:
+ # Cleanup entries before config
+ call('ip neighbor flush proxy')
+ for neighbor, neighbor_conf in config['arp'].items():
+ for interface in neighbor_conf.get('interface'):
+ call(f'ip neighbor add proxy {neighbor} dev {interface}')
+
+ # Add proxy NDP
+ if 'nd' in config:
+ # Cleanup entries before config
+ call('ip -6 neighbor flush proxy')
+ for neighbor, neighbor_conf in config['nd'].items():
+ for interface in neighbor_conf['interface']:
+ call(f'ip -6 neighbor add proxy {neighbor} dev {interface}')
+
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py
index ad4121a49..40d7a6c16 100755
--- a/src/conf_mode/qos.py
+++ b/src/conf_mode/qos.py
@@ -149,7 +149,7 @@ def verify(qos):
if 'class' in policy_config:
for cls, cls_config in policy_config['class'].items():
# bandwidth is not mandatory for priority-queue - that is why this is on the exception list
- if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin']:
+ if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']:
raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!')
if 'match' in cls_config:
for match, match_config in cls_config['match'].items():
@@ -173,7 +173,7 @@ def verify(qos):
if 'default' not in policy_config:
raise ConfigError(f'Policy {policy} misses "default" class!')
if 'default' in policy_config:
- if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin']:
+ if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']:
raise ConfigError('Bandwidth not defined for default traffic!')
# we should check interface ingress/egress configuration after verifying that
diff --git a/src/conf_mode/bcast_relay.py b/src/conf_mode/service_broadcast-relay.py
index 31c552f5a..31c552f5a 100755
--- a/src/conf_mode/bcast_relay.py
+++ b/src/conf_mode/service_broadcast-relay.py
diff --git a/src/conf_mode/service_config_sync.py b/src/conf_mode/service_config-sync.py
index 4b8a7f6ee..4b8a7f6ee 100755
--- a/src/conf_mode/service_config_sync.py
+++ b/src/conf_mode/service_config-sync.py
diff --git a/src/conf_mode/conntrack_sync.py b/src/conf_mode/service_conntrack-sync.py
index 4fb2ce27f..4fb2ce27f 100755
--- a/src/conf_mode/conntrack_sync.py
+++ b/src/conf_mode/service_conntrack-sync.py
diff --git a/src/conf_mode/dhcp_relay.py b/src/conf_mode/service_dhcp-relay.py
index 37d708847..37d708847 100755
--- a/src/conf_mode/dhcp_relay.py
+++ b/src/conf_mode/service_dhcp-relay.py
diff --git a/src/conf_mode/dhcp_server.py b/src/conf_mode/service_dhcp-server.py
index ac7d95632..91ea354b6 100755
--- a/src/conf_mode/dhcp_server.py
+++ b/src/conf_mode/service_dhcp-server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2023 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -18,23 +18,36 @@ import os
from ipaddress import ip_address
from ipaddress import ip_network
-from netaddr import IPAddress
from netaddr import IPRange
from sys import exit
from vyos.config import Config
+from vyos.pki import wrap_certificate
+from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.utils.dict import dict_search
+from vyos.utils.dict import dict_search_args
+from vyos.utils.file import chmod_775
+from vyos.utils.file import makedir
+from vyos.utils.file import write_file
from vyos.utils.process import call
-from vyos.utils.process import run
+from vyos.utils.network import interface_exists
from vyos.utils.network import is_subnet_connected
from vyos.utils.network import is_addr_assigned
from vyos import ConfigError
from vyos import airbag
airbag.enable()
-config_file = '/run/dhcp-server/dhcpd.conf'
-systemd_override = r'/run/systemd/system/isc-dhcp-server.service.d/10-override.conf'
+ctrl_config_file = '/run/kea/kea-ctrl-agent.conf'
+ctrl_socket = '/run/kea/dhcp4-ctrl-socket'
+config_file = '/run/kea/kea-dhcp4.conf'
+lease_file = '/config/dhcp/dhcp4-leases.csv'
+systemd_override = r'/run/systemd/system/kea-ctrl-agent.service.d/10-override.conf'
+user_group = '_kea'
+
+ca_cert_file = '/run/kea/kea-failover-ca.pem'
+cert_file = '/run/kea/kea-failover.pem'
+cert_key_file = '/run/kea/kea-failover-key.pem'
def dhcp_slice_range(exclude_list, range_dict):
"""
@@ -130,6 +143,9 @@ def get_config(config=None):
dhcp['shared_network_name'][network]['subnet'][subnet].update(
{'range' : new_range_dict})
+ if dict_search('failover.certificate', dhcp):
+ dhcp['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True)
+
return dhcp
def verify(dhcp):
@@ -149,6 +165,7 @@ def verify(dhcp):
shared_networks = len(dhcp['shared_network_name'])
disabled_shared_networks = 0
+ subnet_ids = []
# A shared-network requires a subnet definition
for network, network_config in dhcp['shared_network_name'].items():
@@ -160,19 +177,20 @@ def verify(dhcp):
'lease subnet must be configured.')
for subnet, subnet_config in network_config['subnet'].items():
+ if 'subnet_id' not in subnet_config:
+ raise ConfigError(f'Unique subnet ID not specified for subnet "{subnet}"')
+
+ if subnet_config['subnet_id'] in subnet_ids:
+ raise ConfigError(f'Subnet ID for subnet "{subnet}" is not unique')
+
+ subnet_ids.append(subnet_config['subnet_id'])
+
# All delivered static routes require a next-hop to be set
if 'static_route' in subnet_config:
for route, route_option in subnet_config['static_route'].items():
if 'next_hop' not in route_option:
raise ConfigError(f'DHCP static-route "{route}" requires router to be defined!')
- # DHCP failover needs at least one subnet that uses it
- if 'enable_failover' in subnet_config:
- if 'failover' not in dhcp:
- raise ConfigError(f'Can not enable failover for "{subnet}" in "{network}".\n' \
- 'Failover is not configured globally!')
- failover_ok = True
-
# Check if DHCP address range is inside configured subnet declaration
if 'range' in subnet_config:
networks = []
@@ -214,15 +232,35 @@ def verify(dhcp):
if 'static_mapping' in subnet_config:
# Static mappings require just a MAC address (will use an IP from the dynamic pool if IP is not set)
+ used_ips = []
+ used_mac = []
+ used_duid = []
for mapping, mapping_config in subnet_config['static_mapping'].items():
if 'ip_address' in mapping_config:
if ip_address(mapping_config['ip_address']) not in ip_network(subnet):
raise ConfigError(f'Configured static lease address for mapping "{mapping}" is\n' \
f'not within shared-network "{network}, {subnet}"!')
- if 'mac_address' not in mapping_config:
- raise ConfigError(f'MAC address required for static mapping "{mapping}"\n' \
- f'within shared-network "{network}, {subnet}"!')
+ if ('mac' not in mapping_config and 'duid' not in mapping_config) or \
+ ('mac' in mapping_config and 'duid' in mapping_config):
+ raise ConfigError(f'Either MAC address or Client identifier (DUID) is required for '
+ f'static mapping "{mapping}" within shared-network "{network}, {subnet}"!')
+
+ if 'disable' not in mapping_config:
+ if mapping_config['ip_address'] in used_ips:
+ raise ConfigError(f'Configured IP address for static mapping "{mapping}" already exists on another static mapping')
+ used_ips.append(mapping_config['ip_address'])
+
+ if 'disable' not in mapping_config:
+ if 'mac' in mapping_config:
+ if mapping_config['mac'] in used_mac:
+ raise ConfigError(f'Configured MAC address for static mapping "{mapping}" already exists on another static mapping')
+ used_mac.append(mapping_config['mac'])
+
+ if 'duid' in mapping_config:
+ if mapping_config['duid'] in used_duid:
+ raise ConfigError(f'Configured DUID for static mapping "{mapping}" already exists on another static mapping')
+ used_duid.append(mapping_config['duid'])
# There must be one subnet connected to a listen interface.
# This only counts if the network itself is not disabled!
@@ -249,14 +287,34 @@ def verify(dhcp):
raise ConfigError(f'At least one shared network must be active!')
if 'failover' in dhcp:
- if not failover_ok:
- raise ConfigError('DHCP failover must be enabled for at least one subnet!')
-
for key in ['name', 'remote', 'source_address', 'status']:
if key not in dhcp['failover']:
tmp = key.replace('_', '-')
raise ConfigError(f'DHCP failover requires "{tmp}" to be specified!')
+ if len({'certificate', 'ca_certificate'} & set(dhcp['failover'])) == 1:
+ raise ConfigError(f'DHCP secured failover requires both certificate and CA certificate')
+
+ if 'certificate' in dhcp['failover']:
+ cert_name = dhcp['failover']['certificate']
+
+ if cert_name not in dhcp['pki']['certificate']:
+ raise ConfigError(f'Invalid certificate specified for DHCP failover')
+
+ if not dict_search_args(dhcp['pki']['certificate'], cert_name, 'certificate'):
+ raise ConfigError(f'Invalid certificate specified for DHCP failover')
+
+ if not dict_search_args(dhcp['pki']['certificate'], cert_name, 'private', 'key'):
+ raise ConfigError(f'Missing private key on certificate specified for DHCP failover')
+
+ if 'ca_certificate' in dhcp['failover']:
+ ca_cert_name = dhcp['failover']['ca_certificate']
+ if ca_cert_name not in dhcp['pki']['ca']:
+ raise ConfigError(f'Invalid CA certificate specified for DHCP failover')
+
+ if not dict_search_args(dhcp['pki']['ca'], ca_cert_name, 'certificate'):
+ raise ConfigError(f'Invalid CA certificate specified for DHCP failover')
+
for address in (dict_search('listen_address', dhcp) or []):
if is_addr_assigned(address):
listen_ok = True
@@ -265,12 +323,18 @@ def verify(dhcp):
else:
raise ConfigError(f'listen-address "{address}" not configured on any interface')
-
if not listen_ok:
raise ConfigError('None of the configured subnets have an appropriate primary IP address on any\n'
'broadcast interface configured, nor was there an explicit listen-address\n'
'configured for serving DHCP relay packets!')
+ if 'listen_address' in dhcp and 'listen_interface' in dhcp:
+ raise ConfigError(f'Cannot define listen-address and listen-interface at the same time')
+
+ for interface in (dict_search('listen_interface', dhcp) or []):
+ if not interface_exists(interface):
+ raise ConfigError(f'listen-interface "{interface}" does not exist')
+
return None
def generate(dhcp):
@@ -278,43 +342,71 @@ def generate(dhcp):
if not dhcp or 'disable' in dhcp:
return None
- # Please see: https://vyos.dev/T1129 for quoting of the raw
- # parameters we can pass to ISC DHCPd
- tmp_file = '/tmp/dhcpd.conf'
- render(tmp_file, 'dhcp-server/dhcpd.conf.j2', dhcp,
- formater=lambda _: _.replace("&quot;", '"'))
- # XXX: as we have the ability for a user to pass in "raw" options via VyOS
- # CLI (see T3544) we now ask ISC dhcpd to test the newly rendered
- # configuration
- tmp = run(f'/usr/sbin/dhcpd -4 -q -t -cf {tmp_file}')
- if tmp > 0:
- if os.path.exists(tmp_file):
- os.unlink(tmp_file)
- raise ConfigError('Configuration file errors encountered - check your options!')
-
- # Now that we know that the newly rendered configuration is "good" we can
- # render the "real" configuration
- render(config_file, 'dhcp-server/dhcpd.conf.j2', dhcp,
- formater=lambda _: _.replace("&quot;", '"'))
- render(systemd_override, 'dhcp-server/10-override.conf.j2', dhcp)
-
- # Clean up configuration test file
- if os.path.exists(tmp_file):
- os.unlink(tmp_file)
+ dhcp['lease_file'] = lease_file
+ dhcp['machine'] = os.uname().machine
+
+ # Create directory for lease file if necessary
+ lease_dir = os.path.dirname(lease_file)
+ if not os.path.isdir(lease_dir):
+ makedir(lease_dir, group='vyattacfg')
+ chmod_775(lease_dir)
+
+ # Create lease file if necessary and let kea own it - 'kea-lfc' expects it that way
+ if not os.path.exists(lease_file):
+ write_file(lease_file, '', user=user_group, group=user_group, mode=0o644)
+
+ for f in [cert_file, cert_key_file, ca_cert_file]:
+ if os.path.exists(f):
+ os.unlink(f)
+
+ if 'failover' in dhcp:
+ if 'certificate' in dhcp['failover']:
+ cert_name = dhcp['failover']['certificate']
+ cert_data = dhcp['pki']['certificate'][cert_name]['certificate']
+ key_data = dhcp['pki']['certificate'][cert_name]['private']['key']
+ write_file(cert_file, wrap_certificate(cert_data), user=user_group, mode=0o600)
+ write_file(cert_key_file, wrap_private_key(key_data), user=user_group, mode=0o600)
+
+ dhcp['failover']['cert_file'] = cert_file
+ dhcp['failover']['cert_key_file'] = cert_key_file
+
+ if 'ca_certificate' in dhcp['failover']:
+ ca_cert_name = dhcp['failover']['ca_certificate']
+ ca_cert_data = dhcp['pki']['ca'][ca_cert_name]['certificate']
+ write_file(ca_cert_file, wrap_certificate(ca_cert_data), user=user_group, mode=0o600)
+
+ dhcp['failover']['ca_cert_file'] = ca_cert_file
+
+ render(systemd_override, 'dhcp-server/10-override.conf.j2', dhcp)
+
+ render(ctrl_config_file, 'dhcp-server/kea-ctrl-agent.conf.j2', dhcp, user=user_group, group=user_group)
+ render(config_file, 'dhcp-server/kea-dhcp4.conf.j2', dhcp, user=user_group, group=user_group)
return None
def apply(dhcp):
- call('systemctl daemon-reload')
- # bail out early - looks like removal from running config
+ services = ['kea-ctrl-agent', 'kea-dhcp4-server', 'kea-dhcp-ddns-server']
+
if not dhcp or 'disable' in dhcp:
- call('systemctl stop isc-dhcp-server.service')
+ for service in services:
+ call(f'systemctl stop {service}.service')
+
if os.path.exists(config_file):
os.unlink(config_file)
return None
- call('systemctl restart isc-dhcp-server.service')
+ for service in services:
+ action = 'restart'
+
+ if service == 'kea-dhcp-ddns-server' and 'dynamic_dns_update' not in dhcp:
+ action = 'stop'
+
+ if service == 'kea-ctrl-agent' and 'failover' not in dhcp:
+ action = 'stop'
+
+ call(f'systemctl {action} {service}.service')
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/dhcpv6_relay.py b/src/conf_mode/service_dhcpv6-relay.py
index 6537ca3c2..6537ca3c2 100755
--- a/src/conf_mode/dhcpv6_relay.py
+++ b/src/conf_mode/service_dhcpv6-relay.py
diff --git a/src/conf_mode/dhcpv6_server.py b/src/conf_mode/service_dhcpv6-server.py
index 427001609..add83eb0d 100755
--- a/src/conf_mode/dhcpv6_server.py
+++ b/src/conf_mode/service_dhcpv6-server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2022 VyOS maintainers and contributors
+# Copyright (C) 2018-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -22,15 +22,20 @@ from sys import exit
from vyos.config import Config
from vyos.template import render
-from vyos.template import is_ipv6
from vyos.utils.process import call
+from vyos.utils.file import chmod_775
+from vyos.utils.file import makedir
+from vyos.utils.file import write_file
from vyos.utils.dict import dict_search
from vyos.utils.network import is_subnet_connected
from vyos import ConfigError
from vyos import airbag
airbag.enable()
-config_file = '/run/dhcp-server/dhcpdv6.conf'
+config_file = '/run/kea/kea-dhcp6.conf'
+ctrl_socket = '/run/kea/dhcp6-ctrl-socket'
+lease_file = '/config/dhcp/dhcp6-leases.csv'
+user_group = '_kea'
def get_config(config=None):
if config:
@@ -58,6 +63,7 @@ def verify(dhcpv6):
# Inspect shared-network/subnet
subnets = []
+ subnet_ids = []
listen_ok = False
for network, network_config in dhcpv6['shared_network_name'].items():
# A shared-network requires a subnet definition
@@ -67,26 +73,37 @@ def verify(dhcpv6):
'each shared network!')
for subnet, subnet_config in network_config['subnet'].items():
- if 'address_range' in subnet_config:
- if 'start' in subnet_config['address_range']:
- range6_start = []
- range6_stop = []
- for start, start_config in subnet_config['address_range']['start'].items():
- if 'stop' not in start_config:
- raise ConfigError(f'address-range stop address for start "{start}" is not defined!')
- stop = start_config['stop']
+ if 'subnet_id' not in subnet_config:
+ raise ConfigError(f'Unique subnet ID not specified for subnet "{subnet}"')
+
+ if subnet_config['subnet_id'] in subnet_ids:
+ raise ConfigError(f'Subnet ID for subnet "{subnet}" is not unique')
+
+ subnet_ids.append(subnet_config['subnet_id'])
+
+ if 'range' in subnet_config:
+ range6_start = []
+ range6_stop = []
+
+ for num, range_config in subnet_config['range'].items():
+ if 'start' in range_config:
+ start = range_config['start']
+
+ if 'stop' not in range_config:
+ raise ConfigError(f'Range stop address for start "{start}" is not defined!')
+ stop = range_config['stop']
# Start address must be inside network
if not ip_address(start) in ip_network(subnet):
- raise ConfigError(f'address-range start address "{start}" is not in subnet "{subnet}"!')
+ raise ConfigError(f'Range start address "{start}" is not in subnet "{subnet}"!')
# Stop address must be inside network
if not ip_address(stop) in ip_network(subnet):
- raise ConfigError(f'address-range stop address "{stop}" is not in subnet "{subnet}"!')
+ raise ConfigError(f'Range stop address "{stop}" is not in subnet "{subnet}"!')
# Stop address must be greater or equal to start address
if not ip_address(stop) >= ip_address(start):
- raise ConfigError(f'address-range stop address "{stop}" must be greater then or equal ' \
+ raise ConfigError(f'Range stop address "{stop}" must be greater then or equal ' \
f'to the range start address "{start}"!')
# DHCPv6 range start address must be unique - two ranges can't
@@ -94,6 +111,7 @@ def verify(dhcpv6):
if start in range6_start:
raise ConfigError(f'Conflicting DHCPv6 lease range: '\
f'Pool start address "{start}" defined multipe times!')
+
range6_start.append(start)
# DHCPv6 range stop address must be unique - two ranges can't
@@ -101,26 +119,48 @@ def verify(dhcpv6):
if stop in range6_stop:
raise ConfigError(f'Conflicting DHCPv6 lease range: '\
f'Pool stop address "{stop}" defined multipe times!')
+
range6_stop.append(stop)
- if 'prefix' in subnet_config:
- for prefix in subnet_config['prefix']:
- if ip_network(prefix) not in ip_network(subnet):
- raise ConfigError(f'address-range prefix "{prefix}" is not in subnet "{subnet}""')
+ if 'prefix' in range_config:
+ prefix = range_config['prefix']
+
+ if not ip_network(prefix).subnet_of(ip_network(subnet)):
+ raise ConfigError(f'Range prefix "{prefix}" is not in subnet "{subnet}"')
# Prefix delegation sanity checks
if 'prefix_delegation' in subnet_config:
- if 'start' not in subnet_config['prefix_delegation']:
- raise ConfigError('prefix-delegation start address not defined!')
+ if 'prefix' not in subnet_config['prefix_delegation']:
+ raise ConfigError('prefix-delegation prefix not defined!')
- for prefix, prefix_config in subnet_config['prefix_delegation']['start'].items():
- if 'stop' not in prefix_config:
- raise ConfigError(f'Stop address of delegated IPv6 prefix range "{prefix}" '\
+ for prefix, prefix_config in subnet_config['prefix_delegation']['prefix'].items():
+ if 'delegated_length' not in prefix_config:
+ raise ConfigError(f'Delegated IPv6 prefix length for "{prefix}" '\
f'must be configured')
if 'prefix_length' not in prefix_config:
raise ConfigError('Length of delegated IPv6 prefix must be configured')
+ if prefix_config['prefix_length'] > prefix_config['delegated_length']:
+ raise ConfigError('Length of delegated IPv6 prefix must be within parent prefix')
+
+ if 'excluded_prefix' in prefix_config:
+ if 'excluded_prefix_length' not in prefix_config:
+ raise ConfigError('Length of excluded IPv6 prefix must be configured')
+
+ prefix_len = prefix_config['prefix_length']
+ prefix_obj = ip_network(f'{prefix}/{prefix_len}')
+
+ excluded_prefix = prefix_config['excluded_prefix']
+ excluded_len = prefix_config['excluded_prefix_length']
+ excluded_obj = ip_network(f'{excluded_prefix}/{excluded_len}')
+
+ if excluded_len <= prefix_config['delegated_length']:
+ raise ConfigError('Excluded IPv6 prefix must be smaller than delegated prefix')
+
+ if not excluded_obj.subnet_of(prefix_obj):
+ raise ConfigError(f'Excluded prefix "{excluded_prefix}" does not exist in the prefix')
+
# Static mappings don't require anything (but check if IP is in subnet if it's set)
if 'static_mapping' in subnet_config:
for mapping, mapping_config in subnet_config['static_mapping'].items():
@@ -129,13 +169,20 @@ def verify(dhcpv6):
if ip_address(mapping_config['ipv6_address']) not in ip_network(subnet):
raise ConfigError(f'static-mapping address for mapping "{mapping}" is not in subnet "{subnet}"!')
- if 'vendor_option' in subnet_config:
- if len(dict_search('vendor_option.cisco.tftp_server', subnet_config)) > 2:
- raise ConfigError(f'No more then two Cisco tftp-servers should be defined for subnet "{subnet}"!')
+ if ('mac' not in mapping_config and 'duid' not in mapping_config) or \
+ ('mac' in mapping_config and 'duid' in mapping_config):
+ raise ConfigError(f'Either MAC address or Client identifier (DUID) is required for '
+ f'static mapping "{mapping}" within shared-network "{network}, {subnet}"!')
+
+ if 'option' in subnet_config:
+ if 'vendor_option' in subnet_config['option']:
+ if len(dict_search('option.vendor_option.cisco.tftp_server', subnet_config)) > 2:
+ raise ConfigError(f'No more then two Cisco tftp-servers should be defined for subnet "{subnet}"!')
# Subnets must be unique
if subnet in subnets:
raise ConfigError(f'DHCPv6 subnets must be unique! Subnet {subnet} defined multiple times!')
+
subnets.append(subnet)
# DHCPv6 requires at least one configured address range or one static mapping
@@ -168,12 +215,25 @@ def generate(dhcpv6):
if not dhcpv6 or 'disable' in dhcpv6:
return None
- render(config_file, 'dhcp-server/dhcpdv6.conf.j2', dhcpv6)
+ dhcpv6['lease_file'] = lease_file
+ dhcpv6['machine'] = os.uname().machine
+
+ # Create directory for lease file if necessary
+ lease_dir = os.path.dirname(lease_file)
+ if not os.path.isdir(lease_dir):
+ makedir(lease_dir, group='vyattacfg')
+ chmod_775(lease_dir)
+
+ # Create lease file if necessary and let kea own it - 'kea-lfc' expects it that way
+ if not os.path.exists(lease_file):
+ write_file(lease_file, '', user=user_group, group=user_group, mode=0o644)
+
+ render(config_file, 'dhcp-server/kea-dhcp6.conf.j2', dhcpv6, user=user_group, group=user_group)
return None
def apply(dhcpv6):
# bail out early - looks like removal from running config
- service_name = 'isc-dhcp-server6.service'
+ service_name = 'kea-dhcp6-server.service'
if not dhcpv6 or 'disable' in dhcpv6:
# DHCP server is removed in the commit
call(f'systemctl stop {service_name}')
@@ -182,6 +242,7 @@ def apply(dhcpv6):
return None
call(f'systemctl restart {service_name}')
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/service_dns_dynamic.py b/src/conf_mode/service_dns_dynamic.py
new file mode 100755
index 000000000..a551a9891
--- /dev/null
+++ b/src/conf_mode/service_dns_dynamic.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from sys import exit
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.configverify import verify_interface_exists
+from vyos.configverify import dynamic_interface_pattern
+from vyos.template import render
+from vyos.utils.process import call
+from vyos.utils.network import interface_exists
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+config_file = r'/run/ddclient/ddclient.conf'
+systemd_override = r'/run/systemd/system/ddclient.service.d/override.conf'
+
+# Protocols that require zone
+zone_necessary = ['cloudflare', 'digitalocean', 'godaddy', 'hetzner', 'gandi',
+ 'nfsn', 'nsupdate']
+zone_supported = zone_necessary + ['dnsexit2', 'zoneedit1']
+
+# Protocols that do not require username
+username_unnecessary = ['1984', 'cloudflare', 'cloudns', 'digitalocean', 'dnsexit2',
+ 'duckdns', 'freemyip', 'hetzner', 'keysystems', 'njalla',
+ 'nsupdate', 'regfishde']
+
+# Protocols that support TTL
+ttl_supported = ['cloudflare', 'dnsexit2', 'gandi', 'hetzner', 'godaddy', 'nfsn',
+ 'nsupdate']
+
+# Protocols that support both IPv4 and IPv6
+dualstack_supported = ['cloudflare', 'digitalocean', 'dnsexit2', 'duckdns',
+ 'dyndns2', 'easydns', 'freedns', 'hetzner', 'infomaniak',
+ 'njalla']
+
+# dyndns2 protocol in ddclient honors dual stack for selective servers
+# because of the way it is implemented in ddclient
+dyndns_dualstack_servers = ['members.dyndns.org', 'dynv6.com']
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['service', 'dns', 'dynamic']
+ if not conf.exists(base):
+ return None
+
+ dyndns = conf.get_config_dict(base, key_mangling=('-', '_'),
+ no_tag_node_value_mangle=True,
+ get_first_key=True,
+ with_recursive_defaults=True)
+
+ dyndns['config_file'] = config_file
+ return dyndns
+
+def verify(dyndns):
+ # bail out early - looks like removal from running config
+ if not dyndns or 'name' not in dyndns:
+ return None
+
+ # Dynamic DNS service provider - configuration validation
+ for service, config in dyndns['name'].items():
+ error_msg_req = f'is required for Dynamic DNS service "{service}"'
+ error_msg_uns = f'is not supported for Dynamic DNS service "{service}"'
+
+ for field in ['protocol', 'address', 'host_name']:
+ if field not in config:
+ raise ConfigError(f'"{field.replace("_", "-")}" {error_msg_req}')
+
+ if not any(x in config['address'] for x in ['interface', 'web']):
+ raise ConfigError(f'Either "interface" or "web" {error_msg_req} '
+ f'with protocol "{config["protocol"]}"')
+ if all(x in config['address'] for x in ['interface', 'web']):
+ raise ConfigError(f'Both "interface" and "web" at the same time {error_msg_uns} '
+ f'with protocol "{config["protocol"]}"')
+
+ # If dyndns address is an interface, ensure that the interface exists
+ # and warn if a non-active dynamic interface is used
+ if 'interface' in config['address']:
+ tmp = re.compile(dynamic_interface_pattern)
+ # exclude check interface for dynamic interfaces
+ if tmp.match(config['address']['interface']):
+ if not interface_exists(config['address']['interface']):
+ Warning(f'Interface "{config["address"]["interface"]}" does not exist yet and '
+ f'cannot be used for Dynamic DNS service "{service}" until it is up!')
+ else:
+ verify_interface_exists(config['address']['interface'])
+
+ if 'web' in config['address']:
+ # If 'skip' is specified, 'url' is required as well
+ if 'skip' in config['address']['web'] and 'url' not in config['address']['web']:
+ raise ConfigError(f'"url" along with "skip" {error_msg_req} '
+ f'with protocol "{config["protocol"]}"')
+ if 'url' in config['address']['web']:
+ # Warn if using checkip.dyndns.org, as it does not support HTTPS
+ # See: https://github.com/ddclient/ddclient/issues/597
+ if re.search("^(https?://)?checkip\.dyndns\.org", config['address']['web']['url']):
+ Warning(f'"checkip.dyndns.org" does not support HTTPS requests for IP address '
+ f'lookup. Please use a different IP address lookup service.')
+
+ # RFC2136 uses 'key' instead of 'password'
+ if config['protocol'] != 'nsupdate' and 'password' not in config:
+ raise ConfigError(f'"password" {error_msg_req}')
+
+ # Other RFC2136 specific configuration validation
+ if config['protocol'] == 'nsupdate':
+ if 'password' in config:
+ raise ConfigError(f'"password" {error_msg_uns} with protocol "{config["protocol"]}"')
+ for field in ['server', 'key']:
+ if field not in config:
+ raise ConfigError(f'"{field}" {error_msg_req} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] in zone_necessary and 'zone' not in config:
+ raise ConfigError(f'"zone" {error_msg_req} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] not in zone_supported and 'zone' in config:
+ raise ConfigError(f'"zone" {error_msg_uns} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] not in username_unnecessary and 'username' not in config:
+ raise ConfigError(f'"username" {error_msg_req} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] not in ttl_supported and 'ttl' in config:
+ raise ConfigError(f'"ttl" {error_msg_uns} with protocol "{config["protocol"]}"')
+
+ if config['ip_version'] == 'both':
+ if config['protocol'] not in dualstack_supported:
+ raise ConfigError(f'Both IPv4 and IPv6 at the same time {error_msg_uns} '
+ f'with protocol "{config["protocol"]}"')
+ # dyndns2 protocol in ddclient honors dual stack only for dyn.com (dyndns.org)
+ if config['protocol'] == 'dyndns2' and 'server' in config and config['server'] not in dyndns_dualstack_servers:
+ raise ConfigError(f'Both IPv4 and IPv6 at the same time {error_msg_uns} '
+ f'for "{config["server"]}" with protocol "{config["protocol"]}"')
+
+ if {'wait_time', 'expiry_time'} <= config.keys() and int(config['expiry_time']) < int(config['wait_time']):
+ raise ConfigError(f'"expiry-time" must be greater than "wait-time" for '
+ f'Dynamic DNS service "{service}"')
+
+ return None
+
+def generate(dyndns):
+ # bail out early - looks like removal from running config
+ if not dyndns or 'name' not in dyndns:
+ return None
+
+ render(config_file, 'dns-dynamic/ddclient.conf.j2', dyndns, permission=0o600)
+ render(systemd_override, 'dns-dynamic/override.conf.j2', dyndns)
+ return None
+
+def apply(dyndns):
+ systemd_service = 'ddclient.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
+ # bail out early - looks like removal from running config
+ if not dyndns or 'name' not in dyndns:
+ call(f'systemctl stop {systemd_service}')
+ if os.path.exists(config_file):
+ os.unlink(config_file)
+ else:
+ call(f'systemctl reload-or-restart {systemd_service}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/dns_forwarding.py b/src/conf_mode/service_dns_forwarding.py
index c186f47af..ecad765f4 100755
--- a/src/conf_mode/dns_forwarding.py
+++ b/src/conf_mode/service_dns_forwarding.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2022 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -26,18 +26,18 @@ from vyos.template import render
from vyos.template import bracketize_ipv6
from vyos.utils.process import call
from vyos.utils.permission import chown
-from vyos.utils.dict import dict_search
from vyos import ConfigError
from vyos import airbag
airbag.enable()
-pdns_rec_user = pdns_rec_group = 'pdns'
-pdns_rec_run_dir = '/run/powerdns'
+pdns_rec_user_group = 'pdns'
+pdns_rec_run_dir = '/run/pdns-recursor'
pdns_rec_lua_conf_file = f'{pdns_rec_run_dir}/recursor.conf.lua'
pdns_rec_hostsd_lua_conf_file = f'{pdns_rec_run_dir}/recursor.vyos-hostsd.conf.lua'
pdns_rec_hostsd_zones_file = f'{pdns_rec_run_dir}/recursor.forward-zones.conf'
pdns_rec_config_file = f'{pdns_rec_run_dir}/recursor.conf'
+pdns_rec_systemd_override = '/run/systemd/system/pdns-recursor.service.d/override.conf'
hostsd_tag = 'static'
@@ -55,6 +55,9 @@ def get_config(config=None):
get_first_key=True,
with_recursive_defaults=True)
+ dns['config_file'] = pdns_rec_config_file
+ dns['config_dir'] = os.path.dirname(pdns_rec_config_file)
+
# some additions to the default dictionary
if 'system' in dns:
base_nameservers = ['system', 'name-server']
@@ -251,11 +254,16 @@ def generate(dns):
if not dns:
return None
- render(pdns_rec_config_file, 'dns-forwarding/recursor.conf.j2',
- dns, user=pdns_rec_user, group=pdns_rec_group)
+ render(pdns_rec_systemd_override, 'dns-forwarding/override.conf.j2', dns)
+
+ render(pdns_rec_config_file, 'dns-forwarding/recursor.conf.j2', dns,
+ user=pdns_rec_user_group, group=pdns_rec_user_group)
- render(pdns_rec_lua_conf_file, 'dns-forwarding/recursor.conf.lua.j2',
- dns, user=pdns_rec_user, group=pdns_rec_group)
+ render(pdns_rec_config_file, 'dns-forwarding/recursor.conf.j2', dns,
+ user=pdns_rec_user_group, group=pdns_rec_user_group)
+
+ render(pdns_rec_lua_conf_file, 'dns-forwarding/recursor.conf.lua.j2', dns,
+ user=pdns_rec_user_group, group=pdns_rec_user_group)
for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'):
os.unlink(zone_filename)
@@ -263,21 +271,25 @@ def generate(dns):
if 'authoritative_zones' in dns:
for zone in dns['authoritative_zones']:
render(zone['file'], 'dns-forwarding/recursor.zone.conf.j2',
- zone, user=pdns_rec_user, group=pdns_rec_group)
+ zone, user=pdns_rec_user_group, group=pdns_rec_user_group)
# if vyos-hostsd didn't create its files yet, create them (empty)
for file in [pdns_rec_hostsd_lua_conf_file, pdns_rec_hostsd_zones_file]:
with open(file, 'a'):
pass
- chown(file, user=pdns_rec_user, group=pdns_rec_group)
+ chown(file, user=pdns_rec_user_group, group=pdns_rec_user_group)
return None
def apply(dns):
+ systemd_service = 'pdns-recursor.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
if not dns:
# DNS forwarding is removed in the commit
- call('systemctl stop pdns-recursor.service')
+ call(f'systemctl stop {systemd_service}')
if os.path.isfile(pdns_rec_config_file):
os.unlink(pdns_rec_config_file)
@@ -345,7 +357,7 @@ def apply(dns):
hc.apply()
### finally (re)start pdns-recursor
- call('systemctl restart pdns-recursor.service')
+ call(f'systemctl reload-or-restart {systemd_service}')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/service_event_handler.py b/src/conf_mode/service_event-handler.py
index 5028ef52f..5028ef52f 100755
--- a/src/conf_mode/service_event_handler.py
+++ b/src/conf_mode/service_event-handler.py
diff --git a/src/conf_mode/service_https.py b/src/conf_mode/service_https.py
new file mode 100755
index 000000000..46efc3c93
--- /dev/null
+++ b/src/conf_mode/service_https.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import socket
+import sys
+import json
+
+from time import sleep
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.config import config_dict_merge
+from vyos.configdiff import get_config_diff
+from vyos.configverify import verify_vrf
+from vyos.defaults import api_config_state
+from vyos.pki import wrap_certificate
+from vyos.pki import wrap_private_key
+from vyos.pki import wrap_dh_parameters
+from vyos.pki import load_dh_parameters
+from vyos.template import render
+from vyos.utils.dict import dict_search
+from vyos.utils.process import call
+from vyos.utils.process import is_systemd_service_active
+from vyos.utils.network import check_port_availability
+from vyos.utils.network import is_listen_port_bind_service
+from vyos.utils.file import write_file
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+config_file = '/etc/nginx/sites-enabled/default'
+systemd_override = r'/run/systemd/system/nginx.service.d/override.conf'
+cert_dir = '/run/nginx/certs'
+
+user = 'www-data'
+group = 'www-data'
+
+systemd_service_api = '/run/systemd/system/vyos-http-api.service'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['service', 'https']
+ if not conf.exists(base):
+ return None
+
+ https = conf.get_config_dict(base, get_first_key=True,
+ key_mangling=('-', '_'),
+ with_pki=True)
+
+ # store path to API config file for later use in templates
+ https['api_config_state'] = api_config_state
+ # get fully qualified system hsotname
+ https['hostname'] = socket.getfqdn()
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**https.kwargs, recursive=True)
+ if 'api' not in https or 'graphql' not in https['api']:
+ del default_values['api']
+
+ # merge CLI and default dictionary
+ https = config_dict_merge(default_values, https)
+ return https
+
+def verify(https):
+ if https is None:
+ return None
+
+ if 'certificates' in https and 'certificate' in https['certificates']:
+ cert_name = https['certificates']['certificate']
+ if 'pki' not in https:
+ raise ConfigError('PKI is not configured!')
+
+ if cert_name not in https['pki']['certificate']:
+ raise ConfigError('Invalid certificate in configuration!')
+
+ pki_cert = https['pki']['certificate'][cert_name]
+
+ if 'certificate' not in pki_cert:
+ raise ConfigError('Missing certificate in configuration!')
+
+ if 'private' not in pki_cert or 'key' not in pki_cert['private']:
+ raise ConfigError('Missing certificate private key in configuration!')
+
+ if 'dh_params' in https['certificates']:
+ dh_name = https['certificates']['dh_params']
+ if dh_name not in https['pki']['dh']:
+ raise ConfigError('Invalid DH parameter in configuration!')
+
+ pki_dh = https['pki']['dh'][dh_name]
+ dh_params = load_dh_parameters(pki_dh['parameters'])
+ dh_numbers = dh_params.parameter_numbers()
+ dh_bits = dh_numbers.p.bit_length()
+ if dh_bits < 2048:
+ raise ConfigError(f'Minimum DH key-size is 2048 bits')
+
+ else:
+ Warning('No certificate specified, using build-in self-signed certificates. '\
+ 'Do not use them in a production environment!')
+
+ # Check if server port is already in use by a different appliaction
+ listen_address = ['0.0.0.0']
+ port = int(https['port'])
+ if 'listen_address' in https:
+ listen_address = https['listen_address']
+
+ for address in listen_address:
+ if not check_port_availability(address, port, 'tcp') and not is_listen_port_bind_service(port, 'nginx'):
+ raise ConfigError(f'TCP port "{port}" is used by another service!')
+
+ verify_vrf(https)
+
+ # Verify API server settings, if present
+ if 'api' in https:
+ keys = dict_search('api.keys.id', https)
+ gql_auth_type = dict_search('api.graphql.authentication.type', https)
+
+ # If "api graphql" is not defined and `gql_auth_type` is None,
+ # there's certainly no JWT auth option, and keys are required
+ jwt_auth = (gql_auth_type == "token")
+
+ # Check for incomplete key configurations in every case
+ valid_keys_exist = False
+ if keys:
+ for k in keys:
+ if 'key' not in keys[k]:
+ raise ConfigError(f'Missing HTTPS API key string for key id "{k}"')
+ else:
+ valid_keys_exist = True
+
+ # If only key-based methods are enabled,
+ # fail the commit if no valid key configurations are found
+ if (not valid_keys_exist) and (not jwt_auth):
+ raise ConfigError('At least one HTTPS API key is required unless GraphQL token authentication is enabled!')
+
+ if (not valid_keys_exist) and jwt_auth:
+ Warning(f'API keys are not configured: classic (non-GraphQL) API will be unavailable!')
+
+ return None
+
+def generate(https):
+ if https is None:
+ for file in [systemd_service_api, config_file, systemd_override]:
+ if os.path.exists(file):
+ os.unlink(file)
+ return None
+
+ if 'api' in https:
+ render(systemd_service_api, 'https/vyos-http-api.service.j2', https)
+ with open(api_config_state, 'w') as f:
+ json.dump(https['api'], f, indent=2)
+ else:
+ if os.path.exists(systemd_service_api):
+ os.unlink(systemd_service_api)
+
+ # get certificate data
+ if 'certificates' in https and 'certificate' in https['certificates']:
+ cert_name = https['certificates']['certificate']
+ pki_cert = https['pki']['certificate'][cert_name]
+
+ cert_path = os.path.join(cert_dir, f'{cert_name}_cert.pem')
+ key_path = os.path.join(cert_dir, f'{cert_name}_key.pem')
+
+ server_cert = str(wrap_certificate(pki_cert['certificate']))
+
+ # Append CA certificate if specified to form a full chain
+ if 'ca_certificate' in https['certificates']:
+ ca_cert = https['certificates']['ca_certificate']
+ server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate']))
+
+ write_file(cert_path, server_cert, user=user, group=group, mode=0o644)
+ write_file(key_path, wrap_private_key(pki_cert['private']['key']),
+ user=user, group=group, mode=0o600)
+
+ tmp_path = {'cert_path': cert_path, 'key_path': key_path}
+
+ if 'dh_params' in https['certificates']:
+ dh_name = https['certificates']['dh_params']
+ pki_dh = https['pki']['dh'][dh_name]
+ if 'parameters' in pki_dh:
+ dh_path = os.path.join(cert_dir, f'{dh_name}_dh.pem')
+ write_file(dh_path, wrap_dh_parameters(pki_dh['parameters']),
+ user=user, group=group, mode=0o600)
+ tmp_path.update({'dh_file' : dh_path})
+
+ https['certificates'].update(tmp_path)
+
+ render(config_file, 'https/nginx.default.j2', https)
+ render(systemd_override, 'https/override.conf.j2', https)
+ return None
+
+def apply(https):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+ http_api_service_name = 'vyos-http-api.service'
+ https_service_name = 'nginx.service'
+
+ if https is None:
+ call(f'systemctl stop {http_api_service_name}')
+ call(f'systemctl stop {https_service_name}')
+ return
+
+ if 'api' in https:
+ call(f'systemctl reload-or-restart {http_api_service_name}')
+ # Let uvicorn settle before (possibly) restarting nginx
+ sleep(1)
+ elif is_systemd_service_active(http_api_service_name):
+ call(f'systemctl stop {http_api_service_name}')
+
+ call(f'systemctl reload-or-restart {https_service_name}')
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/conf_mode/service_ids_fastnetmon.py b/src/conf_mode/service_ids_ddos-protection.py
index 276a71fcb..276a71fcb 100755
--- a/src/conf_mode/service_ids_fastnetmon.py
+++ b/src/conf_mode/service_ids_ddos-protection.py
diff --git a/src/conf_mode/service_ipoe-server.py b/src/conf_mode/service_ipoe-server.py
index b70e32373..6df6f3dc7 100755
--- a/src/conf_mode/service_ipoe-server.py
+++ b/src/conf_mode/service_ipoe-server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2023 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -15,17 +15,17 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import jmespath
from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
-from vyos.configverify import verify_accel_ppp_base_service
from vyos.configverify import verify_interface_exists
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import get_pools_in_order
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -35,87 +35,6 @@ ipoe_conf = '/run/accel-pppd/ipoe.conf'
ipoe_chap_secrets = '/run/accel-pppd/ipoe.chap-secrets'
-def get_pools_in_order(data: dict) -> list:
- """Return a list of dictionaries representing pool data in the order
- in which they should be allocated. Pool must be defined before we can
- use it with 'next-pool' option.
-
- Args:
- data: A dictionary of pool data, where the keys are pool names and the
- values are dictionaries containing the 'subnet' key and the optional
- 'next_pool' key.
-
- Returns:
- list: A list of dictionaries
-
- Raises:
- ValueError: If a 'next_pool' key references a pool name that
- has not been defined.
- ValueError: If a circular reference is found in the 'next_pool' keys.
-
- Example:
- config_data = {
- ... 'first-pool': {
- ... 'next_pool': 'second-pool',
- ... 'subnet': '192.0.2.0/25'
- ... },
- ... 'second-pool': {
- ... 'next_pool': 'third-pool',
- ... 'subnet': '203.0.113.0/25'
- ... },
- ... 'third-pool': {
- ... 'subnet': '198.51.100.0/24'
- ... },
- ... 'foo': {
- ... 'subnet': '100.64.0.0/24',
- ... 'next_pool': 'second-pool'
- ... }
- ... }
-
- % get_pools_in_order(config_data)
- [{'third-pool': {'subnet': '198.51.100.0/24'}},
- {'second-pool': {'next_pool': 'third-pool', 'subnet': '203.0.113.0/25'}},
- {'first-pool': {'next_pool': 'second-pool', 'subnet': '192.0.2.0/25'}},
- {'foo': {'next_pool': 'second-pool', 'subnet': '100.64.0.0/24'}}]
- """
- pools = []
- unresolved_pools = {}
-
- for pool, pool_config in data.items():
- if 'next_pool' not in pool_config:
- pools.insert(0, {pool: pool_config})
- else:
- unresolved_pools[pool] = pool_config
-
- while unresolved_pools:
- resolved_pools = []
-
- for pool, pool_config in unresolved_pools.items():
- next_pool_name = pool_config['next_pool']
-
- if any(p for p in pools if next_pool_name in p):
- index = next(
- (i for i, p in enumerate(pools) if next_pool_name in p),
- None)
- pools.insert(index + 1, {pool: pool_config})
- resolved_pools.append(pool)
- elif next_pool_name in unresolved_pools:
- # next pool not yet resolved
- pass
- else:
- raise ValueError(
- f"Pool '{next_pool_name}' not defined in configuration data"
- )
-
- if not resolved_pools:
- raise ValueError("Circular reference in configuration data")
-
- for pool in resolved_pools:
- unresolved_pools.pop(pool)
-
- return pools
-
-
def get_config(config=None):
if config:
conf = config
@@ -128,18 +47,11 @@ def get_config(config=None):
# retrieve common dictionary keys
ipoe = get_accel_dict(conf, base, ipoe_chap_secrets)
- if jmespath.search('client_ip_pool.name', ipoe):
- dict_named_pools = jmespath.search('client_ip_pool.name', ipoe)
+ if dict_search('client_ip_pool', ipoe):
# Multiple named pools require ordered values T5099
- ipoe['ordered_named_pools'] = get_pools_in_order(dict_named_pools)
- # T5099 'next-pool' option
- if jmespath.search('client_ip_pool.name.*.next_pool', ipoe):
- for pool, pool_config in ipoe['client_ip_pool']['name'].items():
- if 'next_pool' in pool_config:
- ipoe['first_named_pool'] = pool
- ipoe['first_named_pool_subnet'] = pool_config
- break
+ ipoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', ipoe))
+ ipoe['server_type'] = 'ipoe'
return ipoe
@@ -156,9 +68,7 @@ def verify(ipoe):
raise ConfigError('Option "client-subnet" incompatible with "vlan"!'
'Use "ipoe client-ip-pool" instead.')
- #verify_accel_ppp_base_service(ipoe, local_users=False)
- # IPoE server does not have 'gateway' option in the CLI
- # we cannot use configverify.py verify_accel_ppp_base_service for ipoe-server
+ verify_accel_ppp_ip_pool(ipoe)
if dict_search('authentication.mode', ipoe) == 'radius':
if not dict_search('authentication.radius.server', ipoe):
@@ -169,9 +79,6 @@ def verify(ipoe):
if 'key' not in radius_config:
raise ConfigError(f'Missing RADIUS secret key for server "{server}"')
- if 'client_ipv6_pool' in ipoe:
- if 'delegate' in ipoe['client_ipv6_pool'] and 'prefix' not in ipoe['client_ipv6_pool']:
- raise ConfigError('IPoE IPv6 deletate-prefix requires IPv6 prefix to be configured!')
return None
diff --git a/src/conf_mode/lldp.py b/src/conf_mode/service_lldp.py
index c2e87d171..3c647a0e8 100755
--- a/src/conf_mode/lldp.py
+++ b/src/conf_mode/service_lldp.py
@@ -86,9 +86,9 @@ def verify(lldp):
raise ConfigError(f'Must define both longitude and latitude for "{interface}" location!')
# check options
- if 'snmp' in lldp and 'enable' in lldp['snmp']:
+ if 'snmp' in lldp:
if 'system_snmp_enabled' not in lldp:
- raise ConfigError('SNMP must be configured to enable LLDP SNMP')
+ raise ConfigError('SNMP must be configured to enable LLDP SNMP!')
def generate(lldp):
@@ -121,4 +121,3 @@ if __name__ == '__main__':
except ConfigError as e:
print(e)
exit(1)
-
diff --git a/src/conf_mode/service_mdns-repeater.py b/src/conf_mode/service_mdns_repeater.py
index 6909731ff..6526c23d1 100755
--- a/src/conf_mode/service_mdns-repeater.py
+++ b/src/conf_mode/service_mdns_repeater.py
@@ -29,6 +29,7 @@ from vyos import airbag
airbag.enable()
config_file = '/run/avahi-daemon/avahi-daemon.conf'
+systemd_override = r'/run/systemd/system/avahi-daemon.service.d/override.conf'
vrrp_running_file = '/run/mdns_vrrp_active'
def get_config(config=None):
@@ -48,6 +49,8 @@ def get_config(config=None):
if mdns:
mdns['vrrp_exists'] = conf.exists('high-availability vrrp')
+ mdns['config_file'] = config_file
+
return mdns
def verify(mdns):
@@ -101,11 +104,16 @@ def generate(mdns):
return None
render(config_file, 'mdns-repeater/avahi-daemon.conf.j2', mdns)
+ render(systemd_override, 'mdns-repeater/override.conf.j2', mdns)
return None
def apply(mdns):
+ systemd_service = 'avahi-daemon.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
if not mdns or 'disable' in mdns:
- call('systemctl stop avahi-daemon.service')
+ call(f'systemctl stop {systemd_service}')
if os.path.exists(config_file):
os.unlink(config_file)
@@ -120,10 +128,10 @@ def apply(mdns):
os.mknod(vrrp_running_file) # vrrp script looks for this file to update mdns repeater
if len(mdns['interface']) < 2:
- call('systemctl stop avahi-daemon.service')
+ call(f'systemctl stop {systemd_service}')
return None
- call('systemctl restart avahi-daemon.service')
+ call(f'systemctl restart {systemd_service}')
return None
diff --git a/src/conf_mode/service_ndp-proxy.py b/src/conf_mode/service_ndp-proxy.py
new file mode 100755
index 000000000..aa2374f4c
--- /dev/null
+++ b/src/conf_mode/service_ndp-proxy.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configverify import verify_interface_exists
+from vyos.utils.process import call
+from vyos.template import render
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+systemd_service = 'ndppd.service'
+ndppd_config = '/run/ndppd/ndppd.conf'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['service', 'ndp-proxy']
+ if not conf.exists(base):
+ return None
+
+ ndpp = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
+
+ return ndpp
+
+def verify(ndpp):
+ if not ndpp:
+ return None
+
+ if 'interface' in ndpp:
+ for interface, interface_config in ndpp['interface'].items():
+ verify_interface_exists(interface)
+
+ if 'rule' in interface_config:
+ for rule, rule_config in interface_config['rule'].items():
+ if rule_config['mode'] == 'interface' and 'interface' not in rule_config:
+ raise ConfigError(f'Rule "{rule}" uses interface mode but no interface defined!')
+
+ if rule_config['mode'] != 'interface' and 'interface' in rule_config:
+ if interface_config['mode'] != 'interface' and 'interface' in interface_config:
+ raise ConfigError(f'Rule "{rule}" does not use interface mode, thus interface can not be defined!')
+
+ return None
+
+def generate(ndpp):
+ if not ndpp:
+ return None
+
+ render(ndppd_config, 'ndppd/ndppd.conf.j2', ndpp)
+ return None
+
+def apply(ndpp):
+ if not ndpp:
+ call(f'systemctl stop {systemd_service}')
+ if os.path.isfile(ndppd_config):
+ os.unlink(ndppd_config)
+ return None
+
+ call(f'systemctl reload-or-restart {systemd_service}')
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/ntp.py b/src/conf_mode/service_ntp.py
index 1cc23a7df..f11690ee6 100755
--- a/src/conf_mode/ntp.py
+++ b/src/conf_mode/service_ntp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2023 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -42,7 +42,7 @@ def get_config(config=None):
if not conf.exists(base):
return None
- ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_defaults=True)
ntp['config_file'] = config_file
ntp['user'] = user_group
diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py
index aace267a7..31299a15c 100755
--- a/src/conf_mode/service_pppoe-server.py
+++ b/src/conf_mode/service_pppoe-server.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2023 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -21,13 +21,16 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
from vyos.configdict import is_node_changed
-from vyos.configverify import verify_accel_ppp_base_service
from vyos.configverify import verify_interface_exists
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
from vyos import ConfigError
from vyos import airbag
+
airbag.enable()
pppoe_conf = r'/run/accel-pppd/pppoe.conf'
@@ -45,12 +48,19 @@ def get_config(config=None):
# retrieve common dictionary keys
pppoe = get_accel_dict(conf, base, pppoe_chap_secrets)
+ if dict_search('client_ip_pool', pppoe):
+ # Multiple named pools require ordered values T5099
+ pppoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', pppoe))
+
# reload-or-restart does not implemented in accel-ppp
# use this workaround until it will be implemented
# https://phabricator.accel-ppp.org/T3
- if is_node_changed(conf, base + ['client-ip-pool']) or is_node_changed(
- conf, base + ['client-ipv6-pool']):
+ conditions = [is_node_changed(conf, base + ['client-ip-pool']),
+ is_node_changed(conf, base + ['client-ipv6-pool']),
+ is_node_changed(conf, base + ['interface'])]
+ if any(conditions):
pppoe.update({'restart_required': {}})
+ pppoe['server_type'] = 'pppoe'
return pppoe
def verify(pppoe):
@@ -69,17 +79,13 @@ def verify(pppoe):
for interface in pppoe['interface']:
verify_interface_exists(interface)
- # local ippool and gateway settings config checks
- if not (dict_search('client_ip_pool.subnet', pppoe) or
- (dict_search('client_ip_pool.name', pppoe) or
- (dict_search('client_ip_pool.start', pppoe) and
- dict_search('client_ip_pool.stop', pppoe)))):
- print('Warning: No PPPoE client pool defined')
+ verify_accel_ppp_ip_pool(pppoe)
if dict_search('authentication.radius.dynamic_author.server', pppoe):
if not dict_search('authentication.radius.dynamic_author.key', pppoe):
raise ConfigError('DA/CoE server key required!')
+
return None
diff --git a/src/conf_mode/salt-minion.py b/src/conf_mode/service_salt-minion.py
index a8fce8e01..a8fce8e01 100755
--- a/src/conf_mode/salt-minion.py
+++ b/src/conf_mode/service_salt-minion.py
diff --git a/src/conf_mode/snmp.py b/src/conf_mode/service_snmp.py
index d2ed5414f..6565ffd60 100755
--- a/src/conf_mode/snmp.py
+++ b/src/conf_mode/service_snmp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2021 VyOS maintainers and contributors
+# Copyright (C) 2018-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -54,7 +54,7 @@ def get_config(config=None):
if not conf.exists(base):
snmp.update({'deleted' : ''})
- if conf.exists(['service', 'lldp', 'snmp', 'enable']):
+ if conf.exists(['service', 'lldp', 'snmp']):
snmp.update({'lldp_snmp' : ''})
if 'deleted' in snmp:
@@ -86,7 +86,7 @@ def get_config(config=None):
return snmp
def verify(snmp):
- if not snmp:
+ if 'deleted' in snmp:
return None
if {'deleted', 'lldp_snmp'} <= set(snmp):
@@ -178,8 +178,6 @@ def verify(snmp):
return None
def generate(snmp):
-
- #
# As we are manipulating the snmpd user database we have to stop it first!
# This is even save if service is going to be removed
call(f'systemctl stop {systemd_service}')
@@ -190,7 +188,7 @@ def generate(snmp):
if os.path.isfile(file):
os.unlink(file)
- if not snmp:
+ if 'deleted' in snmp:
return None
if 'v3' in snmp:
@@ -244,7 +242,7 @@ def apply(snmp):
# Always reload systemd manager configuration
call('systemctl daemon-reload')
- if not snmp:
+ if 'deleted' in snmp:
return None
# start SNMP daemon
@@ -256,9 +254,7 @@ def apply(snmp):
# Following daemons from FRR 9.0/stable have SNMP module compiled in VyOS
frr_daemons_list = ['zebra', 'bgpd', 'ospf6d', 'ospfd', 'ripd', 'isisd', 'ldpd']
for frr_daemon in frr_daemons_list:
- call(
- f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null'
- )
+ call(f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null')
return None
diff --git a/src/conf_mode/ssh.py b/src/conf_mode/service_ssh.py
index ee5e1eca2..ee5e1eca2 100755
--- a/src/conf_mode/ssh.py
+++ b/src/conf_mode/service_ssh.py
diff --git a/src/conf_mode/tftp_server.py b/src/conf_mode/service_tftp-server.py
index 3ad346e2e..3ad346e2e 100755
--- a/src/conf_mode/tftp_server.py
+++ b/src/conf_mode/service_tftp-server.py
diff --git a/src/conf_mode/intel_qat.py b/src/conf_mode/system_acceleration.py
index e4b248675..e4b248675 100755
--- a/src/conf_mode/intel_qat.py
+++ b/src/conf_mode/system_acceleration.py
diff --git a/src/conf_mode/config_mgmt.py b/src/conf_mode/system_config-management.py
index c681a8405..c681a8405 100755
--- a/src/conf_mode/config_mgmt.py
+++ b/src/conf_mode/system_config-management.py
diff --git a/src/conf_mode/conntrack.py b/src/conf_mode/system_conntrack.py
index 4cece6921..7f6c71440 100755
--- a/src/conf_mode/conntrack.py
+++ b/src/conf_mode/system_conntrack.py
@@ -159,6 +159,13 @@ def verify(conntrack):
if not group_obj:
Warning(f'{error_group} "{group_name}" has no members!')
+ if dict_search_args(conntrack, 'timeout', 'custom', inet, 'rule') != None:
+ for rule, rule_config in conntrack['timeout']['custom'][inet]['rule'].items():
+ if 'protocol' not in rule_config:
+ raise ConfigError(f'Conntrack custom timeout rule {rule} requires protocol tcp or udp')
+ else:
+ if 'tcp' in rule_config['protocol'] and 'udp' in rule_config['protocol']:
+ raise ConfigError(f'conntrack custom timeout rule {rule} - Cant use both tcp and udp protocol')
return None
def generate(conntrack):
diff --git a/src/conf_mode/system_console.py b/src/conf_mode/system_console.py
index ebf9a113b..a888b125e 100755
--- a/src/conf_mode/system_console.py
+++ b/src/conf_mode/system_console.py
@@ -22,6 +22,7 @@ from vyos.config import Config
from vyos.utils.process import call
from vyos.utils.file import read_file
from vyos.utils.file import write_file
+from vyos.system import grub_util
from vyos.template import render
from vyos import ConfigError
from vyos import airbag
@@ -114,30 +115,7 @@ def generate(console):
return None
speed = console['device']['ttyS0']['speed']
- grub_config = '/boot/grub/grub.cfg'
- if not os.path.isfile(grub_config):
- return None
-
- lines = read_file(grub_config).split('\n')
- p = re.compile(r'^(.* console=ttyS0),[0-9]+(.*)$')
- write = False
- newlines = []
- for line in lines:
- if line.startswith('serial --unit'):
- newline = f'serial --unit=0 --speed={speed}'
- elif p.match(line):
- newline = '{},{}{}'.format(p.search(line)[1], speed, p.search(line)[2])
- else:
- newline = line
-
- if newline != line:
- write = True
-
- newlines.append(newline)
- newlines.append('')
-
- if write:
- write_file(grub_config, '\n'.join(newlines))
+ grub_util.update_console_speed(speed)
return None
diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/system_flow-accounting.py
index 81ee39df1..206f513c8 100755
--- a/src/conf_mode/flow_accounting_conf.py
+++ b/src/conf_mode/system_flow-accounting.py
@@ -28,6 +28,7 @@ from vyos.ifconfig import Section
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.process import cmd
+from vyos.utils.process import run
from vyos.utils.network import is_addr_assigned
from vyos import ConfigError
from vyos import airbag
@@ -116,6 +117,30 @@ def _nftables_config(configured_ifaces, direction, length=None):
cmd(command, raising=ConfigError)
+def _nftables_trigger_setup(operation: str) -> None:
+ """Add a dummy rule to unlock the main pmacct loop with a packet-trigger
+
+ Args:
+ operation (str): 'add' or 'delete' a trigger
+ """
+ # check if a chain exists
+ table_exists = False
+ if run('nft -snj list table ip pmacct') == 0:
+ table_exists = True
+
+ if operation == 'delete' and table_exists:
+ nft_cmd: str = 'nft delete table ip pmacct'
+ cmd(nft_cmd, raising=ConfigError)
+ if operation == 'add' and not table_exists:
+ nft_cmds: list[str] = [
+ 'nft add table ip pmacct',
+ 'nft add chain ip pmacct pmacct_out { type filter hook output priority raw - 50 \\; policy accept \\; }',
+ 'nft add rule ip pmacct pmacct_out oif lo ip daddr 127.0.254.0 counter log group 2 snaplen 1 queue-threshold 0 comment NFLOG_TRIGGER'
+ ]
+ for nft_cmd in nft_cmds:
+ cmd(nft_cmd, raising=ConfigError)
+
+
def get_config(config=None):
if config:
conf = config
@@ -252,7 +277,6 @@ def generate(flow_config):
call('systemctl daemon-reload')
def apply(flow_config):
- action = 'restart'
# Check if flow-accounting was removed and define command
if not flow_config:
_nftables_config([], 'ingress')
@@ -262,6 +286,10 @@ def apply(flow_config):
call(f'systemctl stop {systemd_service}')
if os.path.exists(uacctd_conf_path):
os.unlink(uacctd_conf_path)
+
+ # must be done after systemctl
+ _nftables_trigger_setup('delete')
+
return
# Start/reload flow-accounting daemon
@@ -277,6 +305,10 @@ def apply(flow_config):
else:
_nftables_config([], 'egress')
+ # add a trigger for signal processing
+ _nftables_trigger_setup('add')
+
+
if __name__ == '__main__':
try:
config = get_config()
diff --git a/src/conf_mode/system_frr.py b/src/conf_mode/system_frr.py
index 6727b63c2..07f291000 100755
--- a/src/conf_mode/system_frr.py
+++ b/src/conf_mode/system_frr.py
@@ -40,7 +40,9 @@ def get_config(config=None):
conf = Config()
base = ['system', 'frr']
- frr_config = conf.get_config_dict(base, get_first_key=True)
+ frr_config = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
return frr_config
diff --git a/src/conf_mode/host_name.py b/src/conf_mode/system_host-name.py
index 36d1f6493..6204cf247 100755
--- a/src/conf_mode/host_name.py
+++ b/src/conf_mode/system_host-name.py
@@ -61,8 +61,9 @@ def get_config(config=None):
hosts['domain_name'] = conf.return_value(['system', 'domain-name'])
hosts['domain_search'].append(hosts['domain_name'])
- for search in conf.return_values(['system', 'domain-search', 'domain']):
- hosts['domain_search'].append(search)
+ if conf.exists(['system', 'domain-search']):
+ for search in conf.return_values(['system', 'domain-search']):
+ hosts['domain_search'].append(search)
if conf.exists(['system', 'name-server']):
for ns in conf.return_values(['system', 'name-server']):
diff --git a/src/conf_mode/system-ip.py b/src/conf_mode/system_ip.py
index 7612e2c0d..7612e2c0d 100755
--- a/src/conf_mode/system-ip.py
+++ b/src/conf_mode/system_ip.py
diff --git a/src/conf_mode/system-ipv6.py b/src/conf_mode/system_ipv6.py
index 90a1a8087..90a1a8087 100755
--- a/src/conf_mode/system-ipv6.py
+++ b/src/conf_mode/system_ipv6.py
diff --git a/src/conf_mode/system-login.py b/src/conf_mode/system_login.py
index 87a269499..3d16bdb4a 100755
--- a/src/conf_mode/system-login.py
+++ b/src/conf_mode/system_login.py
@@ -20,6 +20,7 @@ from passlib.hosts import linux_context
from psutil import users
from pwd import getpwall
from pwd import getpwnam
+from pwd import getpwuid
from sys import exit
from time import sleep
@@ -29,6 +30,7 @@ from vyos.defaults import directories
from vyos.template import render
from vyos.template import is_ipv4
from vyos.utils.dict import dict_search
+from vyos.utils.file import chown
from vyos.utils.process import cmd
from vyos.utils.process import call
from vyos.utils.process import rc_cmd
@@ -306,6 +308,7 @@ def generate(login):
def apply(login):
+ enable_otp = False
if 'user' in login:
for user, user_config in login['user'].items():
# make new user using vyatta shell and make home directory (-m),
@@ -330,16 +333,22 @@ def apply(login):
if tmp: command += f" --home '{tmp}'"
else: command += f" --home '/home/{user}'"
- command += f' --groups frr,frrvty,vyattacfg,sudo,adm,dip,disk {user}'
+ command += f' --groups frr,frrvty,vyattacfg,sudo,adm,dip,disk,_kea {user}'
try:
cmd(command)
-
# we should not rely on the value stored in
# user_config['home_directory'], as a crazy user will choose
# username root or any other system user which will fail.
#
# XXX: Should we deny using root at all?
home_dir = getpwnam(user).pw_dir
+ # T5875: ensure UID is properly set on home directory if user is re-added
+ # the home directory will always exist, as it's created above by --create-home,
+ # retrieve current owner of home directory and adjust it on demand
+ dir_owner = getpwuid(os.stat(home_dir).st_uid).pw_name
+ if dir_owner != user:
+ chown(home_dir, user=user, recursive=True)
+
render(f'{home_dir}/.ssh/authorized_keys', 'login/authorized_keys.j2',
user_config, permission=0o600,
formater=lambda _: _.replace("&quot;", '"'),
@@ -350,6 +359,7 @@ def apply(login):
# Generate 2FA/MFA One-Time-Pad configuration
if dict_search('authentication.otp.key', user_config):
+ enable_otp = True
render(f'{home_dir}/.google_authenticator', 'login/pam_otp_ga.conf.j2',
user_config, permission=0o400, user=user, group='users')
else:
@@ -398,6 +408,11 @@ def apply(login):
pam_profile = 'tacplus-optional'
cmd(f'pam-auth-update --enable {pam_profile}')
+ # Enable/disable Google authenticator
+ cmd('pam-auth-update --disable mfa-google-authenticator')
+ if enable_otp:
+ cmd(f'pam-auth-update --enable mfa-google-authenticator')
+
return None
diff --git a/src/conf_mode/system-login-banner.py b/src/conf_mode/system_login_banner.py
index 65fa04417..65fa04417 100755
--- a/src/conf_mode/system-login-banner.py
+++ b/src/conf_mode/system_login_banner.py
diff --git a/src/conf_mode/system-logs.py b/src/conf_mode/system_logs.py
index 8ad4875d4..8ad4875d4 100755
--- a/src/conf_mode/system-logs.py
+++ b/src/conf_mode/system_logs.py
diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system_option.py
index d92121b3d..3b5b67437 100755
--- a/src/conf_mode/system-option.py
+++ b/src/conf_mode/system_option.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2023 VyOS maintainers and contributors
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -22,6 +22,7 @@ from time import sleep
from vyos.config import Config
from vyos.configverify import verify_source_interface
+from vyos.system import grub_util
from vyos.template import render
from vyos.utils.process import cmd
from vyos.utils.process import is_systemd_service_running
@@ -39,7 +40,6 @@ time_format_to_locale = {
'24-hour': 'en_GB.UTF-8'
}
-
def get_config(config=None):
if config:
conf = config
@@ -87,6 +87,13 @@ def verify(options):
def generate(options):
render(curlrc_config, 'system/curlrc.j2', options)
render(ssh_config, 'system/ssh_config.j2', options)
+
+ cmdline_options = []
+ if 'kernel' in options:
+ if 'disable_mitigations' in options['kernel']:
+ cmdline_options.append('mitigations=off')
+ grub_util.update_kernel_cmdline_options(' '.join(cmdline_options))
+
return None
def apply(options):
diff --git a/src/conf_mode/system-proxy.py b/src/conf_mode/system_proxy.py
index 079c43e7e..079c43e7e 100755
--- a/src/conf_mode/system-proxy.py
+++ b/src/conf_mode/system_proxy.py
diff --git a/src/conf_mode/system_sflow.py b/src/conf_mode/system_sflow.py
index 2df1bbb7a..41119b494 100755
--- a/src/conf_mode/system_sflow.py
+++ b/src/conf_mode/system_sflow.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2023 VyOS maintainers and contributors
+# Copyright (C) 2023-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -19,6 +19,7 @@ import os
from sys import exit
from vyos.config import Config
+from vyos.configverify import verify_vrf
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.network import is_addr_assigned
@@ -46,7 +47,6 @@ def get_config(config=None):
return sflow
-
def verify(sflow):
if not sflow:
return None
@@ -68,9 +68,8 @@ def verify(sflow):
if 'server' not in sflow:
raise ConfigError('You need to configure at least one sFlow server!')
- # return True if all checks were passed
- return True
-
+ verify_vrf(sflow)
+ return None
def generate(sflow):
if not sflow:
@@ -81,7 +80,6 @@ def generate(sflow):
# Reload systemd manager configuration
call('systemctl daemon-reload')
-
def apply(sflow):
if not sflow:
# Stop flow-accounting daemon and remove configuration file
@@ -93,7 +91,6 @@ def apply(sflow):
# Start/reload flow-accounting daemon
call(f'systemctl restart {systemd_service}')
-
if __name__ == '__main__':
try:
config = get_config()
diff --git a/src/conf_mode/system-syslog.py b/src/conf_mode/system_syslog.py
index 07fbb0734..07fbb0734 100755
--- a/src/conf_mode/system-syslog.py
+++ b/src/conf_mode/system_syslog.py
diff --git a/src/conf_mode/task_scheduler.py b/src/conf_mode/system_task-scheduler.py
index 129be5d3c..129be5d3c 100755
--- a/src/conf_mode/task_scheduler.py
+++ b/src/conf_mode/system_task-scheduler.py
diff --git a/src/conf_mode/system-timezone.py b/src/conf_mode/system_timezone.py
index cd3d4b229..cd3d4b229 100755
--- a/src/conf_mode/system-timezone.py
+++ b/src/conf_mode/system_timezone.py
diff --git a/src/conf_mode/system_update_check.py b/src/conf_mode/system_update-check.py
index 8d641a97d..8d641a97d 100755
--- a/src/conf_mode/system_update_check.py
+++ b/src/conf_mode/system_update-check.py
diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py
index 9e9385ddb..d074ed159 100755
--- a/src/conf_mode/vpn_ipsec.py
+++ b/src/conf_mode/vpn_ipsec.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2021-2023 VyOS maintainers and contributors
+# Copyright (C) 2021-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -27,6 +27,7 @@ from vyos.base import Warning
from vyos.config import Config
from vyos.configdict import leaf_node_changed
from vyos.configverify import verify_interface_exists
+from vyos.configverify import dynamic_interface_pattern
from vyos.defaults import directories
from vyos.ifconfig import Interface
from vyos.pki import encode_certificate
@@ -43,6 +44,7 @@ from vyos.template import is_ipv4
from vyos.template import is_ipv6
from vyos.template import render
from vyos.utils.network import is_ipv6_link_local
+from vyos.utils.network import interface_exists
from vyos.utils.dict import dict_search
from vyos.utils.dict import dict_search_args
from vyos.utils.process import call
@@ -65,11 +67,11 @@ default_install_routes = 'yes'
vici_socket = '/var/run/charon.vici'
-CERT_PATH = f'{swanctl_dir}/x509/'
+CERT_PATH = f'{swanctl_dir}/x509/'
PUBKEY_PATH = f'{swanctl_dir}/pubkey/'
-KEY_PATH = f'{swanctl_dir}/private/'
-CA_PATH = f'{swanctl_dir}/x509ca/'
-CRL_PATH = f'{swanctl_dir}/x509crl/'
+KEY_PATH = f'{swanctl_dir}/private/'
+CA_PATH = f'{swanctl_dir}/x509ca/'
+CRL_PATH = f'{swanctl_dir}/x509crl/'
DHCP_HOOK_IFLIST = '/tmp/ipsec_dhcp_waiting'
@@ -87,15 +89,13 @@ def get_config(config=None):
ipsec = conf.get_config_dict(base, key_mangling=('-', '_'),
no_tag_node_value_mangle=True,
get_first_key=True,
- with_recursive_defaults=True)
+ with_recursive_defaults=True,
+ with_pki=True)
ipsec['dhcp_no_address'] = {}
ipsec['install_routes'] = 'no' if conf.exists(base + ["options", "disable-route-autoinstall"]) else default_install_routes
ipsec['interface_change'] = leaf_node_changed(conf, base + ['interface'])
ipsec['nhrp_exists'] = conf.exists(['protocols', 'nhrp', 'tunnel'])
- ipsec['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True)
tmp = conf.get_config_dict(l2tp_base, key_mangling=('-', '_'),
no_tag_node_value_mangle=True,
@@ -160,9 +160,16 @@ def verify(ipsec):
if 'id' not in psk_config or 'secret' not in psk_config:
raise ConfigError(f'Authentication psk "{psk}" missing "id" or "secret"')
- if 'interfaces' in ipsec :
- for ifname in ipsec['interface']:
- verify_interface_exists(ifname)
+ if 'interface' in ipsec:
+ tmp = re.compile(dynamic_interface_pattern)
+ for interface in ipsec['interface']:
+ # exclude check interface for dynamic interfaces
+ if tmp.match(interface):
+ if not interface_exists(interface):
+ Warning(f'Interface "{interface}" does not exist yet and cannot be used '
+ f'for IPsec until it is up!')
+ else:
+ verify_interface_exists(interface)
if 'l2tp' in ipsec:
if 'esp_group' in ipsec['l2tp']:
@@ -396,7 +403,7 @@ def verify(ipsec):
if 'bind' in peer_conf['vti']:
vti_interface = peer_conf['vti']['bind']
- if not os.path.exists(f'/sys/class/net/{vti_interface}'):
+ if not interface_exists(vti_interface):
raise ConfigError(f'VTI interface {vti_interface} for site-to-site peer {peer} does not exist!')
if 'vti' not in peer_conf and 'tunnel' not in peer_conf:
diff --git a/src/conf_mode/vpn_l2tp.py b/src/conf_mode/vpn_l2tp.py
index 6232ce64a..4ca717814 100755
--- a/src/conf_mode/vpn_l2tp.py
+++ b/src/conf_mode/vpn_l2tp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2023 VyOS maintainers and contributors
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -15,321 +15,47 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import re
-from copy import deepcopy
-from stat import S_IRUSR, S_IWUSR, S_IRGRP
from sys import exit
-from ipaddress import ip_network
-
from vyos.config import Config
-from vyos.template import is_ipv4
+from vyos.configdep import call_dependents, set_dependents
+from vyos.configdict import get_accel_dict
from vyos.template import render
from vyos.utils.process import call
-from vyos.utils.system import get_half_cpus
-from vyos.utils.network import check_port_availability
-from vyos.utils.network import is_listen_port_bind_service
+from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
+from vyos.base import Warning
from vyos import ConfigError
from vyos import airbag
airbag.enable()
+
l2tp_conf = '/run/accel-pppd/l2tp.conf'
l2tp_chap_secrets = '/run/accel-pppd/l2tp.chap-secrets'
-default_config_data = {
- 'auth_mode': 'local',
- 'auth_ppp_mppe': 'prefer',
- 'auth_proto': ['auth_mschap_v2'],
- 'chap_secrets_file': l2tp_chap_secrets, # used in Jinja2 template
- 'client_ip_pool': None,
- 'client_ip_subnets': [],
- 'client_ipv6_pool': [],
- 'client_ipv6_pool_configured': False,
- 'client_ipv6_delegate_prefix': [],
- 'dnsv4': [],
- 'dnsv6': [],
- 'gateway_address': '10.255.255.0',
- 'local_users' : [],
- 'mtu': '1436',
- 'outside_addr': '',
- 'ppp_mppe': 'prefer',
- 'ppp_echo_failure' : '3',
- 'ppp_echo_interval' : '30',
- 'ppp_echo_timeout': '0',
- 'ppp_ipv6_accept_peer_intf_id': False,
- 'ppp_ipv6_intf_id': None,
- 'ppp_ipv6_peer_intf_id': None,
- 'radius_server': [],
- 'radius_acct_inter_jitter': '',
- 'radius_acct_interim_interval': None,
- 'radius_acct_tmo': '3',
- 'radius_max_try': '3',
- 'radius_timeout': '3',
- 'radius_nas_id': '',
- 'radius_nas_ip': '',
- 'radius_source_address': '',
- 'radius_shaper_attr': '',
- 'radius_shaper_vendor': '',
- 'radius_dynamic_author': {},
- 'wins': [],
- 'ip6_column': [],
- 'thread_cnt': get_half_cpus()
-}
-
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base_path = ['vpn', 'l2tp', 'remote-access']
- if not conf.exists(base_path):
- return None
-
- conf.set_level(base_path)
- l2tp = deepcopy(default_config_data)
-
- ### general options ###
- if conf.exists(['name-server']):
- for name_server in conf.return_values(['name-server']):
- if is_ipv4(name_server):
- l2tp['dnsv4'].append(name_server)
- else:
- l2tp['dnsv6'].append(name_server)
-
- if conf.exists(['wins-server']):
- l2tp['wins'] = conf.return_values(['wins-server'])
-
- if conf.exists('outside-address'):
- l2tp['outside_addr'] = conf.return_value('outside-address')
-
- if conf.exists(['authentication', 'mode']):
- l2tp['auth_mode'] = conf.return_value(['authentication', 'mode'])
-
- if conf.exists(['authentication', 'require']):
- l2tp['auth_proto'] = []
- auth_mods = {
- 'pap': 'auth_pap',
- 'chap': 'auth_chap_md5',
- 'mschap': 'auth_mschap_v1',
- 'mschap-v2': 'auth_mschap_v2'
- }
-
- for proto in conf.return_values(['authentication', 'require']):
- l2tp['auth_proto'].append(auth_mods[proto])
-
- if conf.exists(['authentication', 'mppe']):
- l2tp['auth_ppp_mppe'] = conf.return_value(['authentication', 'mppe'])
-
- #
- # local auth
- if conf.exists(['authentication', 'local-users']):
- for username in conf.list_nodes(['authentication', 'local-users', 'username']):
- user = {
- 'name' : username,
- 'password' : '',
- 'state' : 'enabled',
- 'ip' : '*',
- 'upload' : None,
- 'download' : None
- }
-
- conf.set_level(base_path + ['authentication', 'local-users', 'username', username])
-
- if conf.exists(['password']):
- user['password'] = conf.return_value(['password'])
-
- if conf.exists(['disable']):
- user['state'] = 'disable'
-
- if conf.exists(['static-ip']):
- user['ip'] = conf.return_value(['static-ip'])
-
- if conf.exists(['rate-limit', 'download']):
- user['download'] = conf.return_value(['rate-limit', 'download'])
-
- if conf.exists(['rate-limit', 'upload']):
- user['upload'] = conf.return_value(['rate-limit', 'upload'])
-
- l2tp['local_users'].append(user)
-
- #
- # RADIUS auth and settings
- conf.set_level(base_path + ['authentication', 'radius'])
- if conf.exists(['server']):
- for server in conf.list_nodes(['server']):
- radius = {
- 'server' : server,
- 'key' : '',
- 'fail_time' : 0,
- 'port' : '1812',
- 'acct_port' : '1813'
- }
-
- conf.set_level(base_path + ['authentication', 'radius', 'server', server])
-
- if conf.exists(['disable-accounting']):
- radius['acct_port'] = '0'
-
- if conf.exists(['fail-time']):
- radius['fail_time'] = conf.return_value(['fail-time'])
-
- if conf.exists(['port']):
- radius['port'] = conf.return_value(['port'])
-
- if conf.exists(['acct-port']):
- radius['acct_port'] = conf.return_value(['acct-port'])
-
- if conf.exists(['key']):
- radius['key'] = conf.return_value(['key'])
-
- if not conf.exists(['disable']):
- l2tp['radius_server'].append(radius)
-
- #
- # advanced radius-setting
- conf.set_level(base_path + ['authentication', 'radius'])
-
- if conf.exists(['accounting-interim-interval']):
- l2tp['radius_acct_interim_interval'] = conf.return_value(['accounting-interim-interval'])
-
- if conf.exists(['acct-interim-jitter']):
- l2tp['radius_acct_inter_jitter'] = conf.return_value(['acct-interim-jitter'])
-
- if conf.exists(['acct-timeout']):
- l2tp['radius_acct_tmo'] = conf.return_value(['acct-timeout'])
-
- if conf.exists(['max-try']):
- l2tp['radius_max_try'] = conf.return_value(['max-try'])
-
- if conf.exists(['timeout']):
- l2tp['radius_timeout'] = conf.return_value(['timeout'])
-
- if conf.exists(['nas-identifier']):
- l2tp['radius_nas_id'] = conf.return_value(['nas-identifier'])
-
- if conf.exists(['nas-ip-address']):
- l2tp['radius_nas_ip'] = conf.return_value(['nas-ip-address'])
-
- if conf.exists(['source-address']):
- l2tp['radius_source_address'] = conf.return_value(['source-address'])
-
- # Dynamic Authorization Extensions (DOA)/Change Of Authentication (COA)
- if conf.exists(['dae-server']):
- dae = {
- 'port' : '',
- 'server' : '',
- 'key' : ''
- }
+ base = ['vpn', 'l2tp', 'remote-access']
- if conf.exists(['dae-server', 'ip-address']):
- dae['server'] = conf.return_value(['dae-server', 'ip-address'])
+ set_dependents('ipsec', conf)
- if conf.exists(['dae-server', 'port']):
- dae['port'] = conf.return_value(['dae-server', 'port'])
-
- if conf.exists(['dae-server', 'secret']):
- dae['key'] = conf.return_value(['dae-server', 'secret'])
-
- l2tp['radius_dynamic_author'] = dae
-
- if conf.exists(['rate-limit', 'enable']):
- l2tp['radius_shaper_attr'] = 'Filter-Id'
- c_attr = ['rate-limit', 'enable', 'attribute']
- if conf.exists(c_attr):
- l2tp['radius_shaper_attr'] = conf.return_value(c_attr)
-
- c_vendor = ['rate-limit', 'enable', 'vendor']
- if conf.exists(c_vendor):
- l2tp['radius_shaper_vendor'] = conf.return_value(c_vendor)
-
- conf.set_level(base_path)
- if conf.exists(['client-ip-pool']):
- if conf.exists(['client-ip-pool', 'start']) and conf.exists(['client-ip-pool', 'stop']):
- start = conf.return_value(['client-ip-pool', 'start'])
- stop = conf.return_value(['client-ip-pool', 'stop'])
- l2tp['client_ip_pool'] = start + '-' + re.search('[0-9]+$', stop).group(0)
-
- if conf.exists(['client-ip-pool', 'subnet']):
- l2tp['client_ip_subnets'] = conf.return_values(['client-ip-pool', 'subnet'])
-
- if conf.exists(['client-ipv6-pool', 'prefix']):
- l2tp['client_ipv6_pool_configured'] = True
- l2tp['ip6_column'].append('ip6')
- for prefix in conf.list_nodes(['client-ipv6-pool', 'prefix']):
- tmp = {
- 'prefix': prefix,
- 'mask': '64'
- }
-
- if conf.exists(['client-ipv6-pool', 'prefix', prefix, 'mask']):
- tmp['mask'] = conf.return_value(['client-ipv6-pool', 'prefix', prefix, 'mask'])
-
- l2tp['client_ipv6_pool'].append(tmp)
-
- if conf.exists(['client-ipv6-pool', 'delegate']):
- l2tp['ip6_column'].append('ip6-db')
- for prefix in conf.list_nodes(['client-ipv6-pool', 'delegate']):
- tmp = {
- 'prefix': prefix,
- 'mask': ''
- }
-
- if conf.exists(['client-ipv6-pool', 'delegate', prefix, 'delegation-prefix']):
- tmp['mask'] = conf.return_value(['client-ipv6-pool', 'delegate', prefix, 'delegation-prefix'])
-
- l2tp['client_ipv6_delegate_prefix'].append(tmp)
-
- if conf.exists(['mtu']):
- l2tp['mtu'] = conf.return_value(['mtu'])
-
- # gateway address
- if conf.exists(['gateway-address']):
- l2tp['gateway_address'] = conf.return_value(['gateway-address'])
- else:
- # calculate gw-ip-address
- if conf.exists(['client-ip-pool', 'start']):
- # use start ip as gw-ip-address
- l2tp['gateway_address'] = conf.return_value(['client-ip-pool', 'start'])
-
- elif conf.exists(['client-ip-pool', 'subnet']):
- # use first ip address from first defined pool
- subnet = conf.return_values(['client-ip-pool', 'subnet'])[0]
- subnet = ip_network(subnet)
- l2tp['gateway_address'] = str(list(subnet.hosts())[0])
-
- # LNS secret
- if conf.exists(['lns', 'shared-secret']):
- l2tp['lns_shared_secret'] = conf.return_value(['lns', 'shared-secret'])
- if conf.exists(['lns', 'host-name']):
- l2tp['lns_host_name'] = conf.return_value(['lns', 'host-name'])
-
- if conf.exists(['ccp-disable']):
- l2tp['ccp_disable'] = True
-
- # PPP options
- if conf.exists(['idle']):
- l2tp['ppp_echo_timeout'] = conf.return_value(['idle'])
-
- if conf.exists(['ppp-options', 'lcp-echo-failure']):
- l2tp['ppp_echo_failure'] = conf.return_value(['ppp-options', 'lcp-echo-failure'])
-
- if conf.exists(['ppp-options', 'lcp-echo-interval']):
- l2tp['ppp_echo_interval'] = conf.return_value(['ppp-options', 'lcp-echo-interval'])
-
- if conf.exists(['ppp-options', 'ipv6']):
- l2tp['ppp_ipv6'] = conf.return_value(['ppp-options', 'ipv6'])
-
- if conf.exists(['ppp-options', 'ipv6-accept-peer-intf-id']):
- l2tp['ppp_ipv6_accept_peer_intf_id'] = True
-
- if conf.exists(['ppp-options', 'ipv6-intf-id']):
- l2tp['ppp_ipv6_intf_id'] = conf.return_value(['ppp-options', 'ipv6-intf-id'])
-
- if conf.exists(['ppp-options', 'ipv6-peer-intf-id']):
- l2tp['ppp_ipv6_peer_intf_id'] = conf.return_value(['ppp-options', 'ipv6-peer-intf-id'])
+ if not conf.exists(base):
+ return None
+ # retrieve common dictionary keys
+ l2tp = get_accel_dict(conf, base, l2tp_chap_secrets)
+ if dict_search('client_ip_pool', l2tp):
+ # Multiple named pools require ordered values T5099
+ l2tp['ordered_named_pools'] = get_pools_in_order(
+ dict_search('client_ip_pool', l2tp))
+ l2tp['server_type'] = 'l2tp'
return l2tp
@@ -337,56 +63,18 @@ def verify(l2tp):
if not l2tp:
return None
- if l2tp['auth_mode'] == 'local':
- if not l2tp['local_users']:
- raise ConfigError('L2TP local auth mode requires local users to be configured!')
+ verify_accel_ppp_base_service(l2tp)
- for user in l2tp['local_users']:
- if not user['password']:
- raise ConfigError(f"Password required for user {user['name']}")
+ if dict_search('authentication.radius.dynamic_author.server', l2tp):
+ if not dict_search('authentication.radius.dynamic_author.key', l2tp):
+ raise ConfigError('DA/CoE server key required!')
- elif l2tp['auth_mode'] == 'radius':
- if len(l2tp['radius_server']) == 0:
- raise ConfigError("RADIUS authentication requires at least one server")
+ verify_accel_ppp_ip_pool(l2tp)
- for radius in l2tp['radius_server']:
- if not radius['key']:
- raise ConfigError(f"Missing RADIUS secret for server { radius['key'] }")
- if l2tp['radius_dynamic_author']:
- if not l2tp['radius_dynamic_author']['server']:
- raise ConfigError("Missing ip-address for dae-server")
- if not l2tp['radius_dynamic_author']['key']:
- raise ConfigError("Missing secret for dae-server")
- address = l2tp['radius_dynamic_author']['server']
- port = l2tp['radius_dynamic_author']['port']
- proto = 'tcp'
- # check if dae listen port is not used by another service
- if check_port_availability(address, int(port), proto) is not True and \
- not is_listen_port_bind_service(int(port), 'accel-pppd'):
- raise ConfigError(f'"{proto}" port "{port}" is used by another service')
-
- # check for the existence of a client ip pool
- if not (l2tp['client_ip_pool'] or l2tp['client_ip_subnets']):
+ if 'wins_server' in l2tp and len(l2tp['wins_server']) > 2:
raise ConfigError(
- "set vpn l2tp remote-access client-ip-pool requires subnet or start/stop IP pool")
-
- # check ipv6
- if l2tp['client_ipv6_delegate_prefix'] and not l2tp['client_ipv6_pool']:
- raise ConfigError('IPv6 prefix delegation requires client-ipv6-pool prefix')
-
- for prefix in l2tp['client_ipv6_delegate_prefix']:
- if not prefix['mask']:
- raise ConfigError('Delegation-prefix required for individual delegated networks')
-
- if len(l2tp['wins']) > 2:
- raise ConfigError('Not more then two IPv4 WINS name-servers can be configured')
-
- if len(l2tp['dnsv4']) > 2:
- raise ConfigError('Not more then two IPv4 DNS name-servers can be configured')
-
- if len(l2tp['dnsv6']) > 3:
- raise ConfigError('Not more then three IPv6 DNS name-servers can be configured')
+ 'Not more then two WINS name-servers can be configured')
return None
@@ -397,13 +85,9 @@ def generate(l2tp):
render(l2tp_conf, 'accel-ppp/l2tp.config.j2', l2tp)
- if l2tp['auth_mode'] == 'local':
- render(l2tp_chap_secrets, 'accel-ppp/chap-secrets.j2', l2tp)
- os.chmod(l2tp_chap_secrets, S_IRUSR | S_IWUSR | S_IRGRP)
-
- else:
- if os.path.exists(l2tp_chap_secrets):
- os.unlink(l2tp_chap_secrets)
+ if dict_search('authentication.mode', l2tp) == 'local':
+ render(l2tp_chap_secrets, 'accel-ppp/chap-secrets.config_dict.j2',
+ l2tp, permission=0o640)
return None
@@ -414,10 +98,11 @@ def apply(l2tp):
for file in [l2tp_chap_secrets, l2tp_conf]:
if os.path.exists(file):
os.unlink(file)
+ else:
+ call('systemctl restart accel-ppp@l2tp.service')
- return None
+ call_dependents()
- call('systemctl restart accel-ppp@l2tp.service')
if __name__ == '__main__':
try:
@@ -425,6 +110,7 @@ if __name__ == '__main__':
verify(c)
generate(c)
apply(c)
+
except ConfigError as e:
print(e)
exit(1)
diff --git a/src/conf_mode/vpn_openconnect.py b/src/conf_mode/vpn_openconnect.py
index a039172c4..08e4fc6db 100755
--- a/src/conf_mode/vpn_openconnect.py
+++ b/src/conf_mode/vpn_openconnect.py
@@ -56,12 +56,8 @@ def get_config(config=None):
ocserv = conf.get_config_dict(base, key_mangling=('-', '_'),
get_first_key=True,
- with_recursive_defaults=True)
-
- if ocserv:
- ocserv['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True)
+ with_recursive_defaults=True,
+ with_pki=True)
return ocserv
@@ -95,7 +91,7 @@ def verify(ocserv):
if not ocserv["authentication"]['radius']['server']:
raise ConfigError('Openconnect authentication mode radius requires at least one RADIUS server')
if "local" in ocserv["authentication"]["mode"]:
- if not ocserv["authentication"]["local_users"]:
+ if not ocserv.get("authentication", {}).get("local_users"):
raise ConfigError('openconnect mode local required at least one user')
if not ocserv["authentication"]["local_users"]["username"]:
raise ConfigError('openconnect mode local required at least one user')
diff --git a/src/conf_mode/vpn_pptp.py b/src/conf_mode/vpn_pptp.py
index d542f57fe..b1d5067d5 100755
--- a/src/conf_mode/vpn_pptp.py
+++ b/src/conf_mode/vpn_pptp.py
@@ -15,17 +15,18 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import re
-
-from copy import deepcopy
-from stat import S_IRUSR, S_IWUSR, S_IRGRP
from sys import exit
+
from vyos.config import Config
from vyos.template import render
-from vyos.utils.system import get_half_cpus
from vyos.utils.process import call
+from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
from vyos import ConfigError
+from vyos.configdict import get_accel_dict
from vyos import airbag
airbag.enable()
@@ -33,211 +34,25 @@ airbag.enable()
pptp_conf = '/run/accel-pppd/pptp.conf'
pptp_chap_secrets = '/run/accel-pppd/pptp.chap-secrets'
-default_pptp = {
- 'auth_mode' : 'local',
- 'local_users' : [],
- 'radius_server' : [],
- 'radius_acct_inter_jitter': '',
- 'radius_acct_interim_interval': None,
- 'radius_acct_tmo' : '30',
- 'radius_max_try' : '3',
- 'radius_timeout' : '30',
- 'radius_nas_id' : '',
- 'radius_nas_ip' : '',
- 'radius_source_address' : '',
- 'radius_shaper_attr' : '',
- 'radius_shaper_enable': False,
- 'radius_shaper_multiplier': '',
- 'radius_shaper_vendor': '',
- 'radius_dynamic_author' : '',
- 'chap_secrets_file': pptp_chap_secrets, # used in Jinja2 template
- 'outside_addr': '',
- 'dnsv4': [],
- 'wins': [],
- 'client_ip_pool': '',
- 'mtu': '1436',
- 'auth_proto' : ['auth_mschap_v2'],
- 'ppp_mppe' : 'prefer',
- 'thread_cnt': get_half_cpus()
-}
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base_path = ['vpn', 'pptp', 'remote-access']
- if not conf.exists(base_path):
+ base = ['vpn', 'pptp', 'remote-access']
+ if not conf.exists(base):
return None
- pptp = deepcopy(default_pptp)
- conf.set_level(base_path)
-
- if conf.exists(['name-server']):
- pptp['dnsv4'] = conf.return_values(['name-server'])
-
- if conf.exists(['wins-server']):
- pptp['wins'] = conf.return_values(['wins-server'])
-
- if conf.exists(['outside-address']):
- pptp['outside_addr'] = conf.return_value(['outside-address'])
-
- if conf.exists(['authentication', 'mode']):
- pptp['auth_mode'] = conf.return_value(['authentication', 'mode'])
-
- #
- # local auth
- if conf.exists(['authentication', 'local-users']):
- for username in conf.list_nodes(['authentication', 'local-users', 'username']):
- user = {
- 'name': username,
- 'password' : '',
- 'state' : 'enabled',
- 'ip' : '*',
- }
-
- conf.set_level(base_path + ['authentication', 'local-users', 'username', username])
-
- if conf.exists(['password']):
- user['password'] = conf.return_value(['password'])
-
- if conf.exists(['disable']):
- user['state'] = 'disable'
-
- if conf.exists(['static-ip']):
- user['ip'] = conf.return_value(['static-ip'])
-
- if not conf.exists(['disable']):
- pptp['local_users'].append(user)
-
- #
- # RADIUS auth and settings
- conf.set_level(base_path + ['authentication', 'radius'])
- if conf.exists(['server']):
- for server in conf.list_nodes(['server']):
- radius = {
- 'server' : server,
- 'key' : '',
- 'fail_time' : 0,
- 'port' : '1812',
- 'acct_port' : '1813'
- }
-
- conf.set_level(base_path + ['authentication', 'radius', 'server', server])
-
- if conf.exists(['disable-accounting']):
- radius['acct_port'] = '0'
-
- if conf.exists(['fail-time']):
- radius['fail_time'] = conf.return_value(['fail-time'])
-
- if conf.exists(['port']):
- radius['port'] = conf.return_value(['port'])
-
- if conf.exists(['acct-port']):
- radius['acct_port'] = conf.return_value(['acct-port'])
-
- if conf.exists(['key']):
- radius['key'] = conf.return_value(['key'])
-
- if not conf.exists(['disable']):
- pptp['radius_server'].append(radius)
-
- #
- # advanced radius-setting
- conf.set_level(base_path + ['authentication', 'radius'])
-
- if conf.exists(['accounting-interim-interval']):
- pptp['radius_acct_interim_interval'] = conf.return_value(['accounting-interim-interval'])
-
- if conf.exists(['acct-interim-jitter']):
- pptp['radius_acct_inter_jitter'] = conf.return_value(['acct-interim-jitter'])
-
- if conf.exists(['acct-timeout']):
- pptp['radius_acct_tmo'] = conf.return_value(['acct-timeout'])
-
- if conf.exists(['max-try']):
- pptp['radius_max_try'] = conf.return_value(['max-try'])
-
- if conf.exists(['timeout']):
- pptp['radius_timeout'] = conf.return_value(['timeout'])
-
- if conf.exists(['nas-identifier']):
- pptp['radius_nas_id'] = conf.return_value(['nas-identifier'])
-
- if conf.exists(['nas-ip-address']):
- pptp['radius_nas_ip'] = conf.return_value(['nas-ip-address'])
-
- if conf.exists(['source-address']):
- pptp['radius_source_address'] = conf.return_value(['source-address'])
-
- # Dynamic Authorization Extensions (DOA)/Change Of Authentication (COA)
- if conf.exists(['dae-server']):
- dae = {
- 'port' : '',
- 'server' : '',
- 'key' : ''
- }
-
- if conf.exists(['dynamic-author', 'ip-address']):
- dae['server'] = conf.return_value(['dynamic-author', 'ip-address'])
-
- if conf.exists(['dynamic-author', 'port']):
- dae['port'] = conf.return_value(['dynamic-author', 'port'])
-
- if conf.exists(['dynamic-author', 'key']):
- dae['key'] = conf.return_value(['dynamic-author', 'key'])
-
- pptp['radius_dynamic_author'] = dae
-
- # Rate limit
- if conf.exists(['rate-limit', 'attribute']):
- pptp['radius_shaper_attr'] = conf.return_value(['rate-limit', 'attribute'])
-
- if conf.exists(['rate-limit', 'enable']):
- pptp['radius_shaper_enable'] = True
-
- if conf.exists(['rate-limit', 'multiplier']):
- pptp['radius_shaper_multiplier'] = conf.return_value(['rate-limit', 'multiplier'])
-
- if conf.exists(['rate-limit', 'vendor']):
- pptp['radius_shaper_vendor'] = conf.return_value(['rate-limit', 'vendor'])
-
- conf.set_level(base_path)
- if conf.exists(['client-ip-pool']):
- if conf.exists(['client-ip-pool', 'start']) and conf.exists(['client-ip-pool', 'stop']):
- start = conf.return_value(['client-ip-pool', 'start'])
- stop = conf.return_value(['client-ip-pool', 'stop'])
- pptp['client_ip_pool'] = start + '-' + re.search('[0-9]+$', stop).group(0)
-
- if conf.exists(['mtu']):
- pptp['mtu'] = conf.return_value(['mtu'])
-
- # gateway address
- if conf.exists(['gateway-address']):
- pptp['gw_ip'] = conf.return_value(['gateway-address'])
- else:
- # calculate gw-ip-address
- if conf.exists(['client-ip-pool', 'start']):
- # use start ip as gw-ip-address
- pptp['gateway_address'] = conf.return_value(['client-ip-pool', 'start'])
-
- if conf.exists(['authentication', 'require']):
- # clear default list content, now populate with actual CLI values
- pptp['auth_proto'] = []
- auth_mods = {
- 'pap': 'auth_pap',
- 'chap': 'auth_chap_md5',
- 'mschap': 'auth_mschap_v1',
- 'mschap-v2': 'auth_mschap_v2'
- }
-
- for proto in conf.return_values(['authentication', 'require']):
- pptp['auth_proto'].append(auth_mods[proto])
-
- if conf.exists(['authentication', 'mppe']):
- pptp['ppp_mppe'] = conf.return_value(['authentication', 'mppe'])
+ # retrieve common dictionary keys
+ pptp = get_accel_dict(conf, base, pptp_chap_secrets)
+ if dict_search('client_ip_pool', pptp):
+ # Multiple named pools require ordered values T5099
+ pptp['ordered_named_pools'] = get_pools_in_order(
+ dict_search('client_ip_pool', pptp))
+ pptp['chap_secrets_file'] = pptp_chap_secrets
+ pptp['server_type'] = 'pptp'
return pptp
@@ -245,29 +60,12 @@ def verify(pptp):
if not pptp:
return None
- if pptp['auth_mode'] == 'local':
- if not pptp['local_users']:
- raise ConfigError('PPTP local auth mode requires local users to be configured!')
-
- for user in pptp['local_users']:
- username = user['name']
- if not user['password']:
- raise ConfigError(f'Password required for local user "{username}"')
+ verify_accel_ppp_base_service(pptp)
+ verify_accel_ppp_ip_pool(pptp)
- elif pptp['auth_mode'] == 'radius':
- if len(pptp['radius_server']) == 0:
- raise ConfigError('RADIUS authentication requires at least one server')
-
- for radius in pptp['radius_server']:
- if not radius['key']:
- server = radius['server']
- raise ConfigError(f'Missing RADIUS secret key for server "{ server }"')
-
- if len(pptp['dnsv4']) > 2:
- raise ConfigError('Not more then two IPv4 DNS name-servers can be configured')
-
- if len(pptp['wins']) > 2:
- raise ConfigError('Not more then two IPv4 WINS name-servers can be configured')
+ if 'wins_server' in pptp and len(pptp['wins_server']) > 2:
+ raise ConfigError(
+ 'Not more then two WINS name-servers can be configured')
def generate(pptp):
@@ -276,12 +74,11 @@ def generate(pptp):
render(pptp_conf, 'accel-ppp/pptp.config.j2', pptp)
- if pptp['local_users']:
- render(pptp_chap_secrets, 'accel-ppp/chap-secrets.j2', pptp)
- os.chmod(pptp_chap_secrets, S_IRUSR | S_IWUSR | S_IRGRP)
- else:
- if os.path.exists(pptp_chap_secrets):
- os.unlink(pptp_chap_secrets)
+ if dict_search('authentication.mode', pptp) == 'local':
+ render(pptp_chap_secrets, 'accel-ppp/chap-secrets.config_dict.j2',
+ pptp, permission=0o640)
+
+ return None
def apply(pptp):
@@ -295,6 +92,7 @@ def apply(pptp):
call('systemctl restart accel-ppp@pptp.service')
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/conf_mode/vpn_sstp.py b/src/conf_mode/vpn_sstp.py
index e98d8385b..5c229fe62 100755
--- a/src/conf_mode/vpn_sstp.py
+++ b/src/conf_mode/vpn_sstp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2022 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -20,14 +20,15 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
-from vyos.configdict import dict_merge
-from vyos.configverify import verify_accel_ppp_base_service
from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.network import check_port_availability
from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
from vyos.utils.network import is_listen_port_bind_service
from vyos.utils.file import write_file
from vyos import ConfigError
@@ -52,14 +53,15 @@ def get_config(config=None):
return None
# retrieve common dictionary keys
- sstp = get_accel_dict(conf, base, sstp_chap_secrets)
- if sstp:
- sstp['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True,
- no_tag_node_value_mangle=True)
+ sstp = get_accel_dict(conf, base, sstp_chap_secrets, with_pki=True)
+ if dict_search('client_ip_pool', sstp):
+ # Multiple named pools require ordered values T5099
+ sstp['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', sstp))
+ sstp['server_type'] = 'sstp'
return sstp
+
def verify(sstp):
if not sstp:
return None
@@ -71,9 +73,7 @@ def verify(sstp):
raise ConfigError(f'"{proto}" port "{port}" is used by another service')
verify_accel_ppp_base_service(sstp)
-
- if 'client_ip_pool' not in sstp and 'client_ipv6_pool' not in sstp:
- raise ConfigError('Client IP subnet required')
+ verify_accel_ppp_ip_pool(sstp)
#
# SSL certificate checks
diff --git a/src/conf_mode/vpp.py b/src/conf_mode/vpp.py
deleted file mode 100755
index 82c2f236e..000000000
--- a/src/conf_mode/vpp.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2023 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from psutil import virtual_memory
-
-from pathlib import Path
-from re import search as re_search, MULTILINE as re_M
-
-from vyos.config import Config
-from vyos.configdep import set_dependents, call_dependents
-from vyos.configdict import node_changed
-from vyos.ifconfig import Section
-from vyos.utils.boot import boot_configuration_complete
-from vyos.utils.process import call
-from vyos.utils.process import rc_cmd
-from vyos.utils.system import sysctl_read
-from vyos.utils.system import sysctl_apply
-from vyos.template import render
-
-from vyos import ConfigError
-from vyos import airbag
-from vyos.vpp import VPPControl
-from vyos.vpp import HostControl
-
-airbag.enable()
-
-service_name = 'vpp'
-service_conf = Path(f'/run/vpp/{service_name}.conf')
-systemd_override = '/run/systemd/system/vpp.service.d/10-override.conf'
-
-# Free memory required for VPP
-# 2 GB for hugepages + 1 GB for other services
-MIN_AVAILABLE_MEMORY: int = 3 * 1024**3
-
-
-def _get_pci_address_by_interface(iface) -> str:
- rc, out = rc_cmd(f'ethtool -i {iface}')
- # if ethtool command was successful
- if rc == 0 and out:
- regex_filter = r'^bus-info: (?P<address>\w+:\w+:\w+\.\w+)$'
- re_obj = re_search(regex_filter, out, re_M)
- # if bus-info with PCI address found
- if re_obj:
- address = re_obj.groupdict().get('address', '')
- return address
- # use VPP - maybe interface already attached to it
- vpp_control = VPPControl(attempts=20, interval=500)
- pci_addr = vpp_control.get_pci_addr(iface)
- if pci_addr:
- return pci_addr
- # raise error if PCI address was not found
- raise ConfigError(f'Cannot find PCI address for interface {iface}')
-
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['vpp']
- base_ethernet = ['interfaces', 'ethernet']
-
- # find interfaces removed from VPP
- removed_ifaces = []
- tmp = node_changed(conf, base + ['interface'])
- if tmp:
- for removed_iface in tmp:
- pci_address: str = _get_pci_address_by_interface(removed_iface)
- removed_ifaces.append({
- 'iface_name': removed_iface,
- 'iface_pci_addr': pci_address
- })
- # add an interface to a list of interfaces that need
- # to be reinitialized after the commit
- set_dependents('ethernet', conf, removed_iface)
-
- if not conf.exists(base):
- return {'removed_ifaces': removed_ifaces}
-
- config = conf.get_config_dict(base, key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True,
- with_recursive_defaults=True)
-
- if 'interface' in config:
- for iface, iface_config in config['interface'].items():
- # add an interface to a list of interfaces that need
- # to be reinitialized after the commit
- set_dependents('ethernet', conf, iface)
-
- # Get PCI address auto
- if iface_config['pci'] == 'auto':
- config['interface'][iface]['pci'] = _get_pci_address_by_interface(iface)
-
- config['other_interfaces'] = conf.get_config_dict(base_ethernet, key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- if removed_ifaces:
- config['removed_ifaces'] = removed_ifaces
-
- return config
-
-
-def verify(config):
- # bail out early - looks like removal from running config
- if not config or (len(config) == 1 and 'removed_ifaces' in config):
- return None
-
- if 'interface' not in config:
- raise ConfigError('"interface" is required but not set!')
-
- if 'cpu' in config:
- if 'corelist_workers' in config['cpu'] and 'main_core' not in config[
- 'cpu']:
- raise ConfigError('"cpu main-core" is required but not set!')
-
- memory_available: int = virtual_memory().available
- if memory_available < MIN_AVAILABLE_MEMORY:
- raise ConfigError(
- 'Not enough free memory to start VPP:\n'
- f'available: {round(memory_available / 1024**3, 1)}GB\n'
- f'required: {round(MIN_AVAILABLE_MEMORY / 1024**3, 1)}GB')
-
-
-def generate(config):
- if not config or (len(config) == 1 and 'removed_ifaces' in config):
- # Remove old config and return
- service_conf.unlink(missing_ok=True)
- return None
-
- render(service_conf, 'vpp/startup.conf.j2', config)
- render(systemd_override, 'vpp/override.conf.j2', config)
-
- # apply default sysctl values from
- # https://github.com/FDio/vpp/blob/v23.06/src/vpp/conf/80-vpp.conf
- sysctl_config: dict[str, str] = {
- 'vm.nr_hugepages': '1024',
- 'vm.max_map_count': '3096',
- 'vm.hugetlb_shm_group': '0',
- 'kernel.shmmax': '2147483648'
- }
- # we do not want to reduce `kernel.shmmax`
- kernel_shmnax_current: str = sysctl_read('kernel.shmmax')
- if int(kernel_shmnax_current) > int(sysctl_config['kernel.shmmax']):
- sysctl_config['kernel.shmmax'] = kernel_shmnax_current
-
- if not sysctl_apply(sysctl_config):
- raise ConfigError('Cannot configure sysctl parameters for VPP')
-
- return None
-
-
-def apply(config):
- if not config or (len(config) == 1 and 'removed_ifaces' in config):
- call(f'systemctl stop {service_name}.service')
- else:
- call('systemctl daemon-reload')
- call(f'systemctl restart {service_name}.service')
-
- # Initialize interfaces removed from VPP
- for iface in config.get('removed_ifaces', []):
- host_control = HostControl()
- # rescan PCI to use a proper driver
- host_control.pci_rescan(iface['iface_pci_addr'])
- # rename to the proper name
- iface_new_name: str = host_control.get_eth_name(iface['iface_pci_addr'])
- host_control.rename_iface(iface_new_name, iface['iface_name'])
-
- if 'interface' in config:
- # connect to VPP
- # must be performed multiple attempts because API is not available
- # immediately after the service restart
- vpp_control = VPPControl(attempts=20, interval=500)
- for iface, _ in config['interface'].items():
- # Create lcp
- if iface not in Section.interfaces():
- vpp_control.lcp_pair_add(iface, iface)
-
- # reinitialize interfaces, but not during the first boot
- if boot_configuration_complete():
- call_dependents()
-
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py
index 37625142c..a2f4956be 100755
--- a/src/conf_mode/vrf.py
+++ b/src/conf_mode/vrf.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2020-2023 VyOS maintainers and contributors
+# Copyright (C) 2020-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -32,8 +32,6 @@ from vyos.utils.network import get_vrf_members
from vyos.utils.network import interface_exists
from vyos.utils.process import call
from vyos.utils.process import cmd
-from vyos.utils.process import popen
-from vyos.utils.process import run
from vyos.utils.system import sysctl_write
from vyos import ConfigError
from vyos import frr
@@ -41,17 +39,29 @@ from vyos import airbag
airbag.enable()
config_file = '/etc/iproute2/rt_tables.d/vyos-vrf.conf'
-nft_vrf_config = '/tmp/nftables-vrf-zones'
-
-def has_rule(af : str, priority : int, table : str):
- """ Check if a given ip rule exists """
+k_mod = ['vrf']
+
+def has_rule(af : str, priority : int, table : str=None):
+ """
+ Check if a given ip rule exists
+ $ ip --json -4 rule show
+ [{'l3mdev': None, 'priority': 1000, 'src': 'all'},
+ {'action': 'unreachable', 'l3mdev': None, 'priority': 2000, 'src': 'all'},
+ {'priority': 32765, 'src': 'all', 'table': 'local'},
+ {'priority': 32766, 'src': 'all', 'table': 'main'},
+ {'priority': 32767, 'src': 'all', 'table': 'default'}]
+ """
if af not in ['-4', '-6']:
raise ValueError()
- command = f'ip -j {af} rule show'
+ command = f'ip --detail --json {af} rule show'
for tmp in loads(cmd(command)):
- if {'priority', 'table'} <= set(tmp):
+ if 'priority' in tmp and 'table' in tmp:
if tmp['priority'] == priority and tmp['table'] == table:
return True
+ elif 'priority' in tmp and table in tmp:
+ # l3mdev table has a different layout
+ if tmp['priority'] == priority:
+ return True
return False
def vrf_interfaces(c, match):
@@ -173,8 +183,6 @@ def verify(vrf):
def generate(vrf):
# Render iproute2 VR helper names
render(config_file, 'iproute2/vrf.conf.j2', vrf)
- # Render nftables zones config
- render(nft_vrf_config, 'firewall/nftables-vrf-zones.j2', vrf)
# Render VRF Kernel/Zebra route-map filters
vrf['frr_zebra_config'] = render_to_string('frr/zebra.vrf.route-map.frr.j2', vrf)
@@ -215,14 +223,6 @@ def apply(vrf):
call(f'ip link delete dev {tmp}')
if 'name' in vrf:
- # Separate VRFs in conntrack table
- # check if table already exists
- _, err = popen('nft list table inet vrf_zones')
- # If not, create a table
- if err and os.path.exists(nft_vrf_config):
- cmd(f'nft -f {nft_vrf_config}')
- os.unlink(nft_vrf_config)
-
# Linux routing uses rules to find tables - routing targets are then
# looked up in those tables. If the lookup got a matching route, the
# process ends.
@@ -306,13 +306,6 @@ def apply(vrf):
frr_cfg.add_before(frr.default_add_before, vrf['frr_zebra_config'])
frr_cfg.commit_configuration(zebra_daemon)
- # return to default lookup preference when no VRF is configured
- if 'name' not in vrf:
- # Remove VRF zones table from nftables
- tmp = run('nft list table inet vrf_zones')
- if tmp == 0:
- cmd('nft delete table inet vrf_zones')
-
return None
if __name__ == '__main__':