summaryrefslogtreecommitdiff
path: root/src/conf_mode
diff options
context:
space:
mode:
Diffstat (limited to 'src/conf_mode')
-rwxr-xr-xsrc/conf_mode/container.py28
-rwxr-xr-xsrc/conf_mode/dns_dynamic.py134
-rwxr-xr-xsrc/conf_mode/firewall.py119
-rwxr-xr-xsrc/conf_mode/high-availability.py20
-rwxr-xr-xsrc/conf_mode/http-api.py150
-rwxr-xr-xsrc/conf_mode/https.py266
-rwxr-xr-xsrc/conf_mode/interfaces-ethernet.py225
-rwxr-xr-xsrc/conf_mode/interfaces_bonding.py (renamed from src/conf_mode/interfaces-bonding.py)59
-rwxr-xr-xsrc/conf_mode/interfaces_bridge.py (renamed from src/conf_mode/interfaces-bridge.py)20
-rwxr-xr-xsrc/conf_mode/interfaces_dummy.py (renamed from src/conf_mode/interfaces-dummy.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_ethernet.py391
-rwxr-xr-xsrc/conf_mode/interfaces_geneve.py (renamed from src/conf_mode/interfaces-geneve.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_input.py (renamed from src/conf_mode/interfaces-input.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_l2tpv3.py (renamed from src/conf_mode/interfaces-l2tpv3.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_loopback.py (renamed from src/conf_mode/interfaces-loopback.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_macsec.py (renamed from src/conf_mode/interfaces-macsec.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_openvpn.py (renamed from src/conf_mode/interfaces-openvpn.py)23
-rwxr-xr-xsrc/conf_mode/interfaces_pppoe.py (renamed from src/conf_mode/interfaces-pppoe.py)11
-rwxr-xr-xsrc/conf_mode/interfaces_pseudo-ethernet.py (renamed from src/conf_mode/interfaces-pseudo-ethernet.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_sstpc.py (renamed from src/conf_mode/interfaces-sstpc.py)6
-rwxr-xr-xsrc/conf_mode/interfaces_tunnel.py (renamed from src/conf_mode/interfaces-tunnel.py)4
-rwxr-xr-xsrc/conf_mode/interfaces_virtual-ethernet.py (renamed from src/conf_mode/interfaces-virtual-ethernet.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_vti.py (renamed from src/conf_mode/interfaces-vti.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_vxlan.py (renamed from src/conf_mode/interfaces-vxlan.py)80
-rwxr-xr-xsrc/conf_mode/interfaces_wireguard.py (renamed from src/conf_mode/interfaces-wireguard.py)33
-rwxr-xr-xsrc/conf_mode/interfaces_wireless.py (renamed from src/conf_mode/interfaces-wireless.py)0
-rwxr-xr-xsrc/conf_mode/interfaces_wwan.py (renamed from src/conf_mode/interfaces-wwan.py)0
-rwxr-xr-xsrc/conf_mode/le_cert.py115
-rwxr-xr-xsrc/conf_mode/load-balancing_reverse-proxy.py (renamed from src/conf_mode/load-balancing-haproxy.py)45
-rwxr-xr-xsrc/conf_mode/load-balancing_wan.py (renamed from src/conf_mode/load-balancing-wan.py)0
-rwxr-xr-xsrc/conf_mode/nat.py35
-rwxr-xr-xsrc/conf_mode/nat64.py216
-rwxr-xr-xsrc/conf_mode/nat66.py33
-rwxr-xr-xsrc/conf_mode/netns.py5
-rwxr-xr-xsrc/conf_mode/pki.py235
-rwxr-xr-xsrc/conf_mode/policy-local-route.py222
-rwxr-xr-xsrc/conf_mode/policy_local-route.py315
-rwxr-xr-xsrc/conf_mode/policy_route.py (renamed from src/conf_mode/policy-route.py)0
-rwxr-xr-xsrc/conf_mode/protocols_bgp.py24
-rwxr-xr-xsrc/conf_mode/protocols_igmp-proxy.py (renamed from src/conf_mode/igmp_proxy.py)0
-rwxr-xr-xsrc/conf_mode/protocols_igmp.py140
-rwxr-xr-xsrc/conf_mode/protocols_isis.py50
-rwxr-xr-xsrc/conf_mode/protocols_nhrp.py2
-rwxr-xr-xsrc/conf_mode/protocols_ospf.py13
-rwxr-xr-xsrc/conf_mode/protocols_pim.py207
-rwxr-xr-xsrc/conf_mode/protocols_pim6.py133
-rwxr-xr-xsrc/conf_mode/protocols_segment-routing.py118
-rwxr-xr-xsrc/conf_mode/protocols_static_arp.py (renamed from src/conf_mode/arp.py)0
-rwxr-xr-xsrc/conf_mode/protocols_static_neighbor-proxy.py95
-rwxr-xr-xsrc/conf_mode/service_broadcast-relay.py (renamed from src/conf_mode/bcast_relay.py)0
-rwxr-xr-xsrc/conf_mode/service_config-sync.py (renamed from src/conf_mode/service_config_sync.py)0
-rwxr-xr-xsrc/conf_mode/service_conntrack-sync.py (renamed from src/conf_mode/conntrack_sync.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcp-relay.py (renamed from src/conf_mode/dhcp_relay.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcp-server.py (renamed from src/conf_mode/dhcp_server.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcpv6-relay.py (renamed from src/conf_mode/dhcpv6_relay.py)0
-rwxr-xr-xsrc/conf_mode/service_dhcpv6-server.py (renamed from src/conf_mode/dhcpv6_server.py)0
-rwxr-xr-xsrc/conf_mode/service_dns_dynamic.py187
-rwxr-xr-xsrc/conf_mode/service_dns_forwarding.py (renamed from src/conf_mode/dns_forwarding.py)0
-rwxr-xr-xsrc/conf_mode/service_event-handler.py (renamed from src/conf_mode/service_event_handler.py)0
-rwxr-xr-xsrc/conf_mode/service_https.py238
-rwxr-xr-xsrc/conf_mode/service_ids_ddos-protection.py (renamed from src/conf_mode/service_ids_fastnetmon.py)0
-rwxr-xr-xsrc/conf_mode/service_ipoe-server.py102
-rwxr-xr-xsrc/conf_mode/service_lldp.py (renamed from src/conf_mode/lldp.py)5
-rwxr-xr-xsrc/conf_mode/service_mdns_repeater.py (renamed from src/conf_mode/service_mdns-repeater.py)38
-rwxr-xr-xsrc/conf_mode/service_ndp-proxy.py91
-rwxr-xr-xsrc/conf_mode/service_ntp.py (renamed from src/conf_mode/ntp.py)0
-rwxr-xr-xsrc/conf_mode/service_pppoe-server.py23
-rwxr-xr-xsrc/conf_mode/service_salt-minion.py (renamed from src/conf_mode/salt-minion.py)0
-rwxr-xr-xsrc/conf_mode/service_snmp.py (renamed from src/conf_mode/snmp.py)16
-rwxr-xr-xsrc/conf_mode/service_ssh.py (renamed from src/conf_mode/ssh.py)0
-rwxr-xr-xsrc/conf_mode/service_tftp-server.py (renamed from src/conf_mode/tftp_server.py)0
-rwxr-xr-xsrc/conf_mode/system_acceleration.py (renamed from src/conf_mode/intel_qat.py)0
-rwxr-xr-xsrc/conf_mode/system_config-management.py (renamed from src/conf_mode/config_mgmt.py)0
-rwxr-xr-xsrc/conf_mode/system_conntrack.py (renamed from src/conf_mode/conntrack.py)0
-rwxr-xr-xsrc/conf_mode/system_flow-accounting.py (renamed from src/conf_mode/flow_accounting_conf.py)34
-rwxr-xr-xsrc/conf_mode/system_frr.py29
-rwxr-xr-xsrc/conf_mode/system_host-name.py (renamed from src/conf_mode/host_name.py)5
-rwxr-xr-xsrc/conf_mode/system_ip.py (renamed from src/conf_mode/system-ip.py)28
-rwxr-xr-xsrc/conf_mode/system_ipv6.py (renamed from src/conf_mode/system-ipv6.py)25
-rwxr-xr-xsrc/conf_mode/system_login.py (renamed from src/conf_mode/system-login.py)44
-rwxr-xr-xsrc/conf_mode/system_login_banner.py (renamed from src/conf_mode/system-login-banner.py)0
-rwxr-xr-xsrc/conf_mode/system_logs.py (renamed from src/conf_mode/system-logs.py)0
-rwxr-xr-xsrc/conf_mode/system_option.py (renamed from src/conf_mode/system-option.py)0
-rwxr-xr-xsrc/conf_mode/system_proxy.py (renamed from src/conf_mode/system-proxy.py)0
-rwxr-xr-xsrc/conf_mode/system_syslog.py (renamed from src/conf_mode/system-syslog.py)0
-rwxr-xr-xsrc/conf_mode/system_task-scheduler.py (renamed from src/conf_mode/task_scheduler.py)0
-rwxr-xr-xsrc/conf_mode/system_timezone.py (renamed from src/conf_mode/system-timezone.py)0
-rwxr-xr-xsrc/conf_mode/system_update-check.py (renamed from src/conf_mode/system_update_check.py)0
-rwxr-xr-xsrc/conf_mode/vpn_ipsec.py36
-rwxr-xr-xsrc/conf_mode/vpn_l2tp.py383
-rwxr-xr-xsrc/conf_mode/vpn_openconnect.py8
-rwxr-xr-xsrc/conf_mode/vpn_pptp.py290
-rwxr-xr-xsrc/conf_mode/vpn_sstp.py18
-rwxr-xr-xsrc/conf_mode/vpp.py207
-rwxr-xr-xsrc/conf_mode/vrf.py12
95 files changed, 2840 insertions, 2551 deletions
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py
index 46eb10714..59d11c5a3 100755
--- a/src/conf_mode/container.py
+++ b/src/conf_mode/container.py
@@ -142,11 +142,17 @@ def verify(container):
for address in container_config['network'][network_name]['address']:
network = None
if is_ipv4(address):
- network = [x for x in container['network'][network_name]['prefix'] if is_ipv4(x)][0]
- cnt_ipv4 += 1
+ try:
+ network = [x for x in container['network'][network_name]['prefix'] if is_ipv4(x)][0]
+ cnt_ipv4 += 1
+ except:
+ raise ConfigError(f'Network "{network_name}" does not contain an IPv4 prefix!')
elif is_ipv6(address):
- network = [x for x in container['network'][network_name]['prefix'] if is_ipv6(x)][0]
- cnt_ipv6 += 1
+ try:
+ network = [x for x in container['network'][network_name]['prefix'] if is_ipv6(x)][0]
+ cnt_ipv6 += 1
+ except:
+ raise ConfigError(f'Network "{network_name}" does not contain an IPv6 prefix!')
# Specified container IP address must belong to network prefix
if ip_address(address) not in ip_network(network):
@@ -232,9 +238,9 @@ def verify(container):
# A network attached to a container can not be deleted
if {'network_remove', 'name'} <= set(container):
for network in container['network_remove']:
- for container, container_config in container['name'].items():
- if 'network' in container_config and network in container_config['network']:
- raise ConfigError(f'Can not remove network "{network}", used by container "{container}"!')
+ for c, c_config in container['name'].items():
+ if 'network' in c_config and network in c_config['network']:
+ raise ConfigError(f'Can not remove network "{network}", used by container "{c}"!')
if 'registry' in container:
for registry, registry_config in container['registry'].items():
@@ -274,10 +280,10 @@ def generate_run_arguments(name, container_config):
env_opt += f" --env \"{k}={v['value']}\""
# Check/set label options "--label foo=bar"
- env_opt = ''
+ label = ''
if 'label' in container_config:
for k, v in container_config['label'].items():
- env_opt += f" --label \"{k}={v['value']}\""
+ label += f" --label \"{k}={v['value']}\""
hostname = ''
if 'host_name' in container_config:
@@ -314,7 +320,7 @@ def generate_run_arguments(name, container_config):
container_base_cmd = f'--detach --interactive --tty --replace {cap_add} ' \
f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \
- f'--name {name} {hostname} {device} {port} {volume} {env_opt}'
+ f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label}'
entrypoint = ''
if 'entrypoint' in container_config:
@@ -349,7 +355,7 @@ def generate_run_arguments(name, container_config):
else:
ip_param += f' --ip {address}'
- return f'{container_base_cmd} --net {networks} {ip_param} {entrypoint} {image} {command} {command_arguments}'.strip()
+ return f'{container_base_cmd} --no-healthcheck --net {networks} {ip_param} {entrypoint} {image} {command} {command_arguments}'.strip()
def generate(container):
# bail out early - looks like removal from running config
diff --git a/src/conf_mode/dns_dynamic.py b/src/conf_mode/dns_dynamic.py
deleted file mode 100755
index ab80defe8..000000000
--- a/src/conf_mode/dns_dynamic.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2018-2023 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from sys import exit
-
-from vyos.config import Config
-from vyos.template import render
-from vyos.utils.process import call
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-config_file = r'/run/ddclient/ddclient.conf'
-systemd_override = r'/run/systemd/system/ddclient.service.d/override.conf'
-
-# Protocols that require zone
-zone_allowed = ['cloudflare', 'godaddy', 'hetzner', 'gandi', 'nfsn']
-
-# Protocols that do not require username
-username_unnecessary = ['1984', 'cloudflare', 'cloudns', 'duckdns', 'freemyip', 'hetzner', 'keysystems', 'njalla']
-
-# Protocols that support both IPv4 and IPv6
-dualstack_supported = ['cloudflare', 'dyndns2', 'freedns', 'njalla']
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base_level = ['service', 'dns', 'dynamic']
- if not conf.exists(base_level):
- return None
-
- dyndns = conf.get_config_dict(base_level, key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True,
- with_recursive_defaults=True)
-
- dyndns['config_file'] = config_file
- return dyndns
-
-def verify(dyndns):
- # bail out early - looks like removal from running config
- if not dyndns or 'address' not in dyndns:
- return None
-
- for address in dyndns['address']:
- # RFC2136 - configuration validation
- if 'rfc2136' in dyndns['address'][address]:
- for config in dyndns['address'][address]['rfc2136'].values():
- for field in ['host_name', 'zone', 'server', 'key']:
- if field not in config:
- raise ConfigError(f'"{field.replace("_", "-")}" is required for RFC2136 '
- f'based Dynamic DNS service on "{address}"')
-
- # Dynamic DNS service provider - configuration validation
- if 'service' in dyndns['address'][address]:
- for service, config in dyndns['address'][address]['service'].items():
- error_msg = f'is required for Dynamic DNS service "{service}" on "{address}"'
-
- for field in ['host_name', 'password', 'protocol']:
- if field not in config:
- raise ConfigError(f'"{field.replace("_", "-")}" {error_msg}')
-
- if config['protocol'] in zone_allowed and 'zone' not in config:
- raise ConfigError(f'"zone" {error_msg}')
-
- if config['protocol'] not in zone_allowed and 'zone' in config:
- raise ConfigError(f'"{config["protocol"]}" does not support "zone"')
-
- if config['protocol'] not in username_unnecessary:
- if 'username' not in config:
- raise ConfigError(f'"username" {error_msg}')
-
- if config['ip_version'] == 'both':
- if config['protocol'] not in dualstack_supported:
- raise ConfigError(f'"{config["protocol"]}" does not support '
- f'both IPv4 and IPv6 at the same time')
- # dyndns2 protocol in ddclient honors dual stack only for dyn.com (dyndns.org)
- if config['protocol'] == 'dyndns2' and 'server' in config and config['server'] != 'members.dyndns.org':
- raise ConfigError(f'"{config["protocol"]}" does not support '
- f'both IPv4 and IPv6 at the same time for "{config["server"]}"')
-
- return None
-
-def generate(dyndns):
- # bail out early - looks like removal from running config
- if not dyndns or 'address' not in dyndns:
- return None
-
- render(config_file, 'dns-dynamic/ddclient.conf.j2', dyndns)
- render(systemd_override, 'dns-dynamic/override.conf.j2', dyndns)
- return None
-
-def apply(dyndns):
- systemd_service = 'ddclient.service'
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
-
- # bail out early - looks like removal from running config
- if not dyndns or 'address' not in dyndns:
- call(f'systemctl stop {systemd_service}')
- if os.path.exists(config_file):
- os.unlink(config_file)
- else:
- call(f'systemctl reload-or-restart {systemd_service}')
-
- return None
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py
index c3b1ee015..ee19555c4 100755
--- a/src/conf_mode/firewall.py
+++ b/src/conf_mode/firewall.py
@@ -23,10 +23,11 @@ from sys import exit
from vyos.base import Warning
from vyos.config import Config
-from vyos.configdict import node_changed
+from vyos.configdict import is_node_changed
from vyos.configdiff import get_config_diff, Diff
from vyos.configdep import set_dependents, call_dependents
-# from vyos.configverify import verify_interface_exists
+from vyos.configverify import verify_interface_exists
+from vyos.ethtool import Ethtool
from vyos.firewall import fqdn_config_parse
from vyos.firewall import geoip_update
from vyos.template import render
@@ -40,9 +41,6 @@ from vyos import ConfigError
from vyos import airbag
airbag.enable()
-nat_conf_script = 'nat.py'
-policy_route_conf_script = 'policy-route.py'
-
nftables_conf = '/run/nftables.conf'
sysfs_config = {
@@ -131,7 +129,7 @@ def get_config(config=None):
with_recursive_defaults=True)
- firewall['group_resync'] = bool('group' in firewall or node_changed(conf, base + ['group']))
+ firewall['group_resync'] = bool('group' in firewall or is_node_changed(conf, base + ['group']))
if firewall['group_resync']:
# Update nat and policy-route as firewall groups were updated
set_dependents('group_resync', conf)
@@ -160,6 +158,15 @@ def verify_rule(firewall, rule_conf, ipv6):
if target not in dict_search_args(firewall, 'ipv6', 'name'):
raise ConfigError(f'Invalid jump-target. Firewall ipv6 name {target} does not exist on the system')
+ if rule_conf['action'] == 'offload':
+ if 'offload_target' not in rule_conf:
+ raise ConfigError('Action set to offload, but no offload-target specified')
+
+ offload_target = rule_conf['offload_target']
+
+ if not dict_search_args(firewall, 'flowtable', offload_target):
+ raise ConfigError(f'Invalid offload-target. Flowtable "{offload_target}" does not exist on the system')
+
if 'queue_options' in rule_conf:
if 'queue' not in rule_conf['action']:
raise ConfigError('queue-options defined, but action queue needed and it is not defined')
@@ -249,7 +256,7 @@ def verify_rule(firewall, rule_conf, ipv6):
raise ConfigError(f'{side} port-group and port cannot both be defined')
if 'log_options' in rule_conf:
- if 'log' not in rule_conf or 'enable' not in rule_conf['log']:
+ if 'log' not in rule_conf:
raise ConfigError('log-options defined, but log is not enable')
if 'snapshot_length' in rule_conf['log_options'] and 'group' not in rule_conf['log_options']:
@@ -260,8 +267,8 @@ def verify_rule(firewall, rule_conf, ipv6):
for direction in ['inbound_interface','outbound_interface']:
if direction in rule_conf:
- if 'interface_name' in rule_conf[direction] and 'interface_group' in rule_conf[direction]:
- raise ConfigError(f'Cannot specify both interface-group and interface-name for {direction}')
+ if 'name' in rule_conf[direction] and 'group' in rule_conf[direction]:
+ raise ConfigError(f'Cannot specify both interface group and interface name for {direction}')
def verify_nested_group(group_name, group, groups, seen):
if 'include' not in group:
@@ -279,7 +286,31 @@ def verify_nested_group(group_name, group, groups, seen):
if 'include' in groups[g]:
verify_nested_group(g, groups[g], groups, seen)
+def verify_hardware_offload(ifname):
+ ethtool = Ethtool(ifname)
+ enabled, fixed = ethtool.get_hw_tc_offload()
+
+ if not enabled and fixed:
+ raise ConfigError(f'Interface "{ifname}" does not support hardware offload')
+
+ if not enabled:
+ raise ConfigError(f'Interface "{ifname}" requires "offload hw-tc-offload"')
+
def verify(firewall):
+ if 'flowtable' in firewall:
+ for flowtable, flowtable_conf in firewall['flowtable'].items():
+ if 'interface' not in flowtable_conf:
+ raise ConfigError(f'Flowtable "{flowtable}" requires at least one interface')
+
+ for ifname in flowtable_conf['interface']:
+ verify_interface_exists(ifname)
+
+ if dict_search_args(flowtable_conf, 'offload') == 'hardware':
+ interfaces = flowtable_conf['interface']
+
+ for ifname in interfaces:
+ verify_hardware_offload(ifname)
+
if 'group' in firewall:
for group_type in nested_group_types:
if group_type in firewall['group']:
@@ -327,12 +358,82 @@ def verify(firewall):
for rule_id, rule_conf in name_conf['rule'].items():
verify_rule(firewall, rule_conf, True)
+ #### ZONESSSS
+ local_zone = False
+ zone_interfaces = []
+
+ if 'zone' in firewall:
+ for zone, zone_conf in firewall['zone'].items():
+ if 'local_zone' not in zone_conf and 'interface' not in zone_conf:
+ raise ConfigError(f'Zone "{zone}" has no interfaces and is not the local zone')
+
+ if 'local_zone' in zone_conf:
+ if local_zone:
+ raise ConfigError('There cannot be multiple local zones')
+ if 'interface' in zone_conf:
+ raise ConfigError('Local zone cannot have interfaces assigned')
+ if 'intra_zone_filtering' in zone_conf:
+ raise ConfigError('Local zone cannot use intra-zone-filtering')
+ local_zone = True
+
+ if 'interface' in zone_conf:
+ found_duplicates = [intf for intf in zone_conf['interface'] if intf in zone_interfaces]
+
+ if found_duplicates:
+ raise ConfigError(f'Interfaces cannot be assigned to multiple zones')
+
+ zone_interfaces += zone_conf['interface']
+
+ if 'intra_zone_filtering' in zone_conf:
+ intra_zone = zone_conf['intra_zone_filtering']
+
+ if len(intra_zone) > 1:
+ raise ConfigError('Only one intra-zone-filtering action must be specified')
+
+ if 'firewall' in intra_zone:
+ v4_name = dict_search_args(intra_zone, 'firewall', 'name')
+ if v4_name and not dict_search_args(firewall, 'ipv4', 'name', v4_name):
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ v6_name = dict_search_args(intra_zone, 'firewall', 'ipv6_name')
+ if v6_name and not dict_search_args(firewall, 'ipv6', 'name', v6_name):
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
+ if not v4_name and not v6_name:
+ raise ConfigError('No firewall names specified for intra-zone-filtering')
+
+ if 'from' in zone_conf:
+ for from_zone, from_conf in zone_conf['from'].items():
+ if from_zone not in firewall['zone']:
+ raise ConfigError(f'Zone "{zone}" refers to a non-existent or deleted zone "{from_zone}"')
+
+ v4_name = dict_search_args(from_conf, 'firewall', 'name')
+ if v4_name and not dict_search_args(firewall, 'ipv4', 'name', v4_name):
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ v6_name = dict_search_args(from_conf, 'firewall', 'ipv6_name')
+ if v6_name and not dict_search_args(firewall, 'ipv6', 'name', v6_name):
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
return None
def generate(firewall):
if not os.path.exists(nftables_conf):
firewall['first_install'] = True
+ if 'zone' in firewall:
+ for local_zone, local_zone_conf in firewall['zone'].items():
+ if 'local_zone' not in local_zone_conf:
+ continue
+
+ local_zone_conf['from_local'] = {}
+
+ for zone, zone_conf in firewall['zone'].items():
+ if zone == local_zone or 'from' not in zone_conf:
+ continue
+ if local_zone in zone_conf['from']:
+ local_zone_conf['from_local'][zone] = zone_conf['from'][local_zone]
+
# Determine if conntrack is needed
firewall['ipv4_conntrack_action'] = 'return'
firewall['ipv6_conntrack_action'] = 'return'
diff --git a/src/conf_mode/high-availability.py b/src/conf_mode/high-availability.py
index 0121df11c..b3b27b14e 100755
--- a/src/conf_mode/high-availability.py
+++ b/src/conf_mode/high-availability.py
@@ -15,6 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import os
import time
from sys import exit
@@ -24,6 +25,7 @@ from ipaddress import IPv6Interface
from vyos.base import Warning
from vyos.config import Config
+from vyos.configdict import leaf_node_changed
from vyos.ifconfig.vrrp import VRRP
from vyos.template import render
from vyos.template import is_ipv4
@@ -35,6 +37,9 @@ from vyos import airbag
airbag.enable()
+systemd_override = r'/run/systemd/system/keepalived.service.d/10-override.conf'
+
+
def get_config(config=None):
if config:
conf = config
@@ -54,6 +59,9 @@ def get_config(config=None):
if conf.exists(conntrack_path):
ha['conntrack_sync_group'] = conf.return_value(conntrack_path)
+ if leaf_node_changed(conf, base + ['vrrp', 'snmp']):
+ ha.update({'restart_required': {}})
+
return ha
def verify(ha):
@@ -164,19 +172,23 @@ def verify(ha):
def generate(ha):
if not ha or 'disable' in ha:
+ if os.path.isfile(systemd_override):
+ os.unlink(systemd_override)
return None
render(VRRP.location['config'], 'high-availability/keepalived.conf.j2', ha)
+ render(systemd_override, 'high-availability/10-override.conf.j2', ha)
return None
def apply(ha):
service_name = 'keepalived.service'
+ call('systemctl daemon-reload')
if not ha or 'disable' in ha:
call(f'systemctl stop {service_name}')
return None
# Check if IPv6 address is tentative T5533
- for group, group_config in ha['vrrp']['group'].items():
+ for group, group_config in ha.get('vrrp', {}).get('group', {}).items():
if 'hello_source_address' in group_config:
if is_ipv6(group_config['hello_source_address']):
ipv6_address = group_config['hello_source_address']
@@ -187,7 +199,11 @@ def apply(ha):
if is_ipv6_tentative(interface, ipv6_address):
time.sleep(interval)
- call(f'systemctl reload-or-restart {service_name}')
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in ha:
+ systemd_action = 'restart'
+
+ call(f'systemctl {systemd_action} {service_name}')
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py
deleted file mode 100755
index 793a90d88..000000000
--- a/src/conf_mode/http-api.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import os
-import json
-
-from time import sleep
-from copy import deepcopy
-
-import vyos.defaults
-
-from vyos.config import Config
-from vyos.configdep import set_dependents, call_dependents
-from vyos.template import render
-from vyos.utils.process import call
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-api_conf_file = '/etc/vyos/http-api.conf'
-systemd_service = '/run/systemd/system/vyos-http-api.service'
-
-vyos_conf_scripts_dir=vyos.defaults.directories['conf_mode']
-
-def _translate_values_to_boolean(d: dict) -> dict:
- for k in list(d):
- if d[k] == {}:
- d[k] = True
- elif isinstance(d[k], dict):
- _translate_values_to_boolean(d[k])
- else:
- pass
-
-def get_config(config=None):
- http_api = deepcopy(vyos.defaults.api_data)
- x = http_api.get('api_keys')
- if x is None:
- default_key = None
- else:
- default_key = x[0]
- keys_added = False
-
- if config:
- conf = config
- else:
- conf = Config()
-
- # reset on creation/deletion of 'api' node
- https_base = ['service', 'https']
- if conf.exists(https_base):
- set_dependents("https", conf)
-
- base = ['service', 'https', 'api']
- if not conf.exists(base):
- return None
-
- api_dict = conf.get_config_dict(base, key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True,
- with_recursive_defaults=True)
-
- # One needs to 'flatten' the keys dict from the config into the
- # http-api.conf format for api_keys:
- if 'keys' in api_dict:
- api_dict['api_keys'] = []
- for el in list(api_dict['keys'].get('id', {})):
- key = api_dict['keys']['id'][el].get('key', '')
- if key:
- api_dict['api_keys'].append({'id': el, 'key': key})
- del api_dict['keys']
-
- # Do we run inside a VRF context?
- vrf_path = ['service', 'https', 'vrf']
- if conf.exists(vrf_path):
- http_api['vrf'] = conf.return_value(vrf_path)
-
- if 'api_keys' in api_dict:
- keys_added = True
-
- if api_dict.from_defaults(['graphql']):
- del api_dict['graphql']
-
- http_api.update(api_dict)
-
- if keys_added and default_key:
- if default_key in http_api['api_keys']:
- http_api['api_keys'].remove(default_key)
-
- # Finally, translate entries in http_api into boolean settings for
- # backwards compatability of JSON http-api.conf file
- _translate_values_to_boolean(http_api)
-
- return http_api
-
-def verify(http_api):
- return None
-
-def generate(http_api):
- if http_api is None:
- if os.path.exists(systemd_service):
- os.unlink(systemd_service)
- return None
-
- if not os.path.exists('/etc/vyos'):
- os.mkdir('/etc/vyos')
-
- with open(api_conf_file, 'w') as f:
- json.dump(http_api, f, indent=2)
-
- render(systemd_service, 'https/vyos-http-api.service.j2', http_api)
- return None
-
-def apply(http_api):
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
- service_name = 'vyos-http-api.service'
-
- if http_api is not None:
- call(f'systemctl restart {service_name}')
- else:
- call(f'systemctl stop {service_name}')
-
- # Let uvicorn settle before restarting Nginx
- sleep(1)
-
- call_dependents()
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- sys.exit(1)
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
deleted file mode 100755
index 010490c7e..000000000
--- a/src/conf_mode/https.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2022 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-
-from copy import deepcopy
-
-import vyos.defaults
-import vyos.certbot_util
-
-from vyos.config import Config
-from vyos.configverify import verify_vrf
-from vyos import ConfigError
-from vyos.pki import wrap_certificate
-from vyos.pki import wrap_private_key
-from vyos.template import render
-from vyos.utils.process import call
-from vyos.utils.network import check_port_availability
-from vyos.utils.network import is_listen_port_bind_service
-from vyos.utils.file import write_file
-
-from vyos import airbag
-airbag.enable()
-
-config_file = '/etc/nginx/sites-available/default'
-systemd_override = r'/run/systemd/system/nginx.service.d/override.conf'
-cert_dir = '/etc/ssl/certs'
-key_dir = '/etc/ssl/private'
-certbot_dir = vyos.defaults.directories['certbot']
-
-# https config needs to coordinate several subsystems: api, certbot,
-# self-signed certificate, as well as the virtual hosts defined within the
-# https config definition itself. Consequently, one needs a general dict,
-# encompassing the https and other configs, and a list of such virtual hosts
-# (server blocks in nginx terminology) to pass to the jinja2 template.
-default_server_block = {
- 'id' : '',
- 'address' : '*',
- 'port' : '443',
- 'name' : ['_'],
- 'api' : {},
- 'vyos_cert' : {},
- 'certbot' : False
-}
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['service', 'https']
- if not conf.exists(base):
- return None
-
- https = conf.get_config_dict(base, get_first_key=True)
-
- if https:
- https['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- return https
-
-def verify(https):
- if https is None:
- return None
-
- if 'certificates' in https:
- certificates = https['certificates']
-
- if 'certificate' in certificates:
- if not https['pki']:
- raise ConfigError("PKI is not configured")
-
- cert_name = certificates['certificate']
-
- if cert_name not in https['pki']['certificate']:
- raise ConfigError("Invalid certificate on https configuration")
-
- pki_cert = https['pki']['certificate'][cert_name]
-
- if 'certificate' not in pki_cert:
- raise ConfigError("Missing certificate on https configuration")
-
- if 'private' not in pki_cert or 'key' not in pki_cert['private']:
- raise ConfigError("Missing certificate private key on https configuration")
-
- if 'certbot' in https['certificates']:
- vhost_names = []
- for vh, vh_conf in https.get('virtual-host', {}).items():
- vhost_names += vh_conf.get('server-name', [])
- domains = https['certificates']['certbot'].get('domain-name', [])
- domains_found = [domain for domain in domains if domain in vhost_names]
- if not domains_found:
- raise ConfigError("At least one 'virtual-host <id> server-name' "
- "matching the 'certbot domain-name' is required.")
-
- server_block_list = []
-
- # organize by vhosts
- vhost_dict = https.get('virtual-host', {})
-
- if not vhost_dict:
- # no specified virtual hosts (server blocks); use default
- server_block_list.append(default_server_block)
- else:
- for vhost in list(vhost_dict):
- server_block = deepcopy(default_server_block)
- data = vhost_dict.get(vhost, {})
- server_block['address'] = data.get('listen-address', '*')
- server_block['port'] = data.get('listen-port', '443')
- server_block_list.append(server_block)
-
- for entry in server_block_list:
- _address = entry.get('address')
- _address = '0.0.0.0' if _address == '*' else _address
- _port = entry.get('port')
- proto = 'tcp'
- if check_port_availability(_address, int(_port), proto) is not True and \
- not is_listen_port_bind_service(int(_port), 'nginx'):
- raise ConfigError(f'"{proto}" port "{_port}" is used by another service')
-
- verify_vrf(https)
- return None
-
-def generate(https):
- if https is None:
- return None
-
- server_block_list = []
-
- # organize by vhosts
-
- vhost_dict = https.get('virtual-host', {})
-
- if not vhost_dict:
- # no specified virtual hosts (server blocks); use default
- server_block_list.append(default_server_block)
- else:
- for vhost in list(vhost_dict):
- server_block = deepcopy(default_server_block)
- server_block['id'] = vhost
- data = vhost_dict.get(vhost, {})
- server_block['address'] = data.get('listen-address', '*')
- server_block['port'] = data.get('listen-port', '443')
- name = data.get('server-name', ['_'])
- server_block['name'] = name
- allow_client = data.get('allow-client', {})
- server_block['allow_client'] = allow_client.get('address', [])
- server_block_list.append(server_block)
-
- # get certificate data
-
- cert_dict = https.get('certificates', {})
-
- if 'certificate' in cert_dict:
- cert_name = cert_dict['certificate']
- pki_cert = https['pki']['certificate'][cert_name]
-
- cert_path = os.path.join(cert_dir, f'{cert_name}.pem')
- key_path = os.path.join(key_dir, f'{cert_name}.pem')
-
- server_cert = str(wrap_certificate(pki_cert['certificate']))
- if 'ca-certificate' in cert_dict:
- ca_cert = cert_dict['ca-certificate']
- server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate']))
-
- write_file(cert_path, server_cert)
- write_file(key_path, wrap_private_key(pki_cert['private']['key']))
-
- vyos_cert_data = {
- 'crt': cert_path,
- 'key': key_path
- }
-
- for block in server_block_list:
- block['vyos_cert'] = vyos_cert_data
-
- # letsencrypt certificate using certbot
-
- certbot = False
- cert_domains = cert_dict.get('certbot', {}).get('domain-name', [])
- if cert_domains:
- certbot = True
- for domain in cert_domains:
- sub_list = vyos.certbot_util.choose_server_block(server_block_list,
- domain)
- if sub_list:
- for sb in sub_list:
- sb['certbot'] = True
- sb['certbot_dir'] = certbot_dir
- # certbot organizes certificates by first domain
- sb['certbot_domain_dir'] = cert_domains[0]
-
- # get api data
-
- api_set = False
- api_data = {}
- if 'api' in list(https):
- api_set = True
- api_data = vyos.defaults.api_data
- api_settings = https.get('api', {})
- if api_settings:
- port = api_settings.get('port', '')
- if port:
- api_data['port'] = port
- vhosts = https.get('api-restrict', {}).get('virtual-host', [])
- if vhosts:
- api_data['vhost'] = vhosts[:]
- if 'socket' in list(api_settings):
- api_data['socket'] = True
-
- if api_data:
- vhost_list = api_data.get('vhost', [])
- if not vhost_list:
- for block in server_block_list:
- block['api'] = api_data
- else:
- for block in server_block_list:
- if block['id'] in vhost_list:
- block['api'] = api_data
-
- if 'server_block_list' not in https or not https['server_block_list']:
- https['server_block_list'] = [default_server_block]
-
- data = {
- 'server_block_list': server_block_list,
- 'api_set': api_set,
- 'certbot': certbot
- }
-
- render(config_file, 'https/nginx.default.j2', data)
- render(systemd_override, 'https/override.conf.j2', https)
- return None
-
-def apply(https):
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
- if https is not None:
- call('systemctl restart nginx.service')
- else:
- call('systemctl stop nginx.service')
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- sys.exit(1)
diff --git a/src/conf_mode/interfaces-ethernet.py b/src/conf_mode/interfaces-ethernet.py
deleted file mode 100755
index f3e65ad5e..000000000
--- a/src/conf_mode/interfaces-ethernet.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from glob import glob
-from sys import exit
-
-from vyos.base import Warning
-from vyos.config import Config
-from vyos.configdict import get_interface_dict
-from vyos.configdict import is_node_changed
-from vyos.configverify import verify_address
-from vyos.configverify import verify_dhcpv6
-from vyos.configverify import verify_eapol
-from vyos.configverify import verify_interface_exists
-from vyos.configverify import verify_mirror_redirect
-from vyos.configverify import verify_mtu
-from vyos.configverify import verify_mtu_ipv6
-from vyos.configverify import verify_vlan_config
-from vyos.configverify import verify_vrf
-from vyos.configverify import verify_bond_bridge_member
-from vyos.ethtool import Ethtool
-from vyos.ifconfig import EthernetIf
-from vyos.pki import find_chain
-from vyos.pki import encode_certificate
-from vyos.pki import load_certificate
-from vyos.pki import wrap_private_key
-from vyos.template import render
-from vyos.utils.process import call
-from vyos.utils.dict import dict_search
-from vyos.utils.file import write_file
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-# XXX: wpa_supplicant works on the source interface
-cfg_dir = '/run/wpa_supplicant'
-wpa_suppl_conf = '/run/wpa_supplicant/{ifname}.conf'
-
-def get_config(config=None):
- """
- Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
- interface name will be added or a deleted flag
- """
- if config:
- conf = config
- else:
- conf = Config()
-
- # This must be called prior to get_interface_dict(), as this function will
- # alter the config level (config.set_level())
- pki = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- base = ['interfaces', 'ethernet']
- ifname, ethernet = get_interface_dict(conf, base)
-
- if 'deleted' not in ethernet:
- if pki: ethernet['pki'] = pki
-
- tmp = is_node_changed(conf, base + [ifname, 'speed'])
- if tmp: ethernet.update({'speed_duplex_changed': {}})
-
- tmp = is_node_changed(conf, base + [ifname, 'duplex'])
- if tmp: ethernet.update({'speed_duplex_changed': {}})
-
- return ethernet
-
-def verify(ethernet):
- if 'deleted' in ethernet:
- return None
-
- ifname = ethernet['ifname']
- verify_interface_exists(ifname)
- verify_mtu(ethernet)
- verify_mtu_ipv6(ethernet)
- verify_dhcpv6(ethernet)
- verify_address(ethernet)
- verify_vrf(ethernet)
- verify_bond_bridge_member(ethernet)
- verify_eapol(ethernet)
- verify_mirror_redirect(ethernet)
-
- ethtool = Ethtool(ifname)
- # No need to check speed and duplex keys as both have default values.
- if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or
- (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')):
- raise ConfigError('Speed/Duplex missmatch. Must be both auto or manually configured')
-
- if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto':
- # We need to verify if the requested speed and duplex setting is
- # supported by the underlaying NIC.
- speed = ethernet['speed']
- duplex = ethernet['duplex']
- if not ethtool.check_speed_duplex(speed, duplex):
- raise ConfigError(f'Adapter does not support changing speed and duplex '\
- f'settings to: {speed}/{duplex}!')
-
- if 'disable_flow_control' in ethernet:
- if not ethtool.check_flow_control():
- raise ConfigError('Adapter does not support changing flow-control settings!')
-
- if 'ring_buffer' in ethernet:
- max_rx = ethtool.get_ring_buffer_max('rx')
- if not max_rx:
- raise ConfigError('Driver does not support RX ring-buffer configuration!')
-
- max_tx = ethtool.get_ring_buffer_max('tx')
- if not max_tx:
- raise ConfigError('Driver does not support TX ring-buffer configuration!')
-
- rx = dict_search('ring_buffer.rx', ethernet)
- if rx and int(rx) > int(max_rx):
- raise ConfigError(f'Driver only supports a maximum RX ring-buffer '\
- f'size of "{max_rx}" bytes!')
-
- tx = dict_search('ring_buffer.tx', ethernet)
- if tx and int(tx) > int(max_tx):
- raise ConfigError(f'Driver only supports a maximum TX ring-buffer '\
- f'size of "{max_tx}" bytes!')
-
- # verify offloading capabilities
- if dict_search('offload.rps', ethernet) != None:
- if not os.path.exists(f'/sys/class/net/{ifname}/queues/rx-0/rps_cpus'):
- raise ConfigError('Interface does not suport RPS!')
-
- driver = ethtool.get_driver_name()
- # T3342 - Xen driver requires special treatment
- if driver == 'vif':
- if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None:
- raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\
- 'for MTU size larger then 1500 bytes')
-
- if {'is_bond_member', 'mac'} <= set(ethernet):
- Warning(f'changing mac address "{mac}" will be ignored as "{ifname}" ' \
- f'is a member of bond "{is_bond_member}"'.format(**ethernet))
-
- # use common function to verify VLAN configuration
- verify_vlan_config(ethernet)
- return None
-
-def generate(ethernet):
- # render real configuration file once
- wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet)
-
- if 'deleted' in ethernet:
- # delete configuration on interface removal
- if os.path.isfile(wpa_supplicant_conf):
- os.unlink(wpa_supplicant_conf)
- return None
-
- if 'eapol' in ethernet:
- ifname = ethernet['ifname']
-
- render(wpa_supplicant_conf, 'ethernet/wpa_supplicant.conf.j2', ethernet)
-
- cert_file_path = os.path.join(cfg_dir, f'{ifname}_cert.pem')
- cert_key_path = os.path.join(cfg_dir, f'{ifname}_cert.key')
-
- cert_name = ethernet['eapol']['certificate']
- pki_cert = ethernet['pki']['certificate'][cert_name]
-
- loaded_pki_cert = load_certificate(pki_cert['certificate'])
- loaded_ca_certs = {load_certificate(c['certificate'])
- for c in ethernet['pki']['ca'].values()} if 'ca' in ethernet['pki'] else {}
-
- cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
-
- write_file(cert_file_path,
- '\n'.join(encode_certificate(c) for c in cert_full_chain))
- write_file(cert_key_path, wrap_private_key(pki_cert['private']['key']))
-
- if 'ca_certificate' in ethernet['eapol']:
- ca_cert_file_path = os.path.join(cfg_dir, f'{ifname}_ca.pem')
- ca_chains = []
-
- for ca_cert_name in ethernet['eapol']['ca_certificate']:
- pki_ca_cert = ethernet['pki']['ca'][ca_cert_name]
- loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
- ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
- ca_chains.append('\n'.join(encode_certificate(c) for c in ca_full_chain))
-
- write_file(ca_cert_file_path, '\n'.join(ca_chains))
-
- return None
-
-def apply(ethernet):
- ifname = ethernet['ifname']
- # take care about EAPoL supplicant daemon
- eapol_action='stop'
-
- e = EthernetIf(ifname)
- if 'deleted' in ethernet:
- # delete interface
- e.remove()
- else:
- e.update(ethernet)
- if 'eapol' in ethernet:
- eapol_action='reload-or-restart'
-
- call(f'systemctl {eapol_action} wpa_supplicant-wired@{ifname}')
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/interfaces-bonding.py b/src/conf_mode/interfaces_bonding.py
index 0bd306ed0..8184d8415 100755
--- a/src/conf_mode/interfaces-bonding.py
+++ b/src/conf_mode/interfaces_bonding.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2022 VyOS maintainers and contributors
+# Copyright (C) 2019-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -18,7 +18,6 @@ import os
from sys import exit
from netifaces import interfaces
-
from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
@@ -34,11 +33,16 @@ from vyos.configverify import verify_source_interface
from vyos.configverify import verify_vlan_config
from vyos.configverify import verify_vrf
from vyos.ifconfig import BondIf
+from vyos.ifconfig.ethernet import EthernetIf
from vyos.ifconfig import Section
+from vyos.template import render_to_string
from vyos.utils.dict import dict_search
+from vyos.utils.dict import dict_to_paths_values
from vyos.configdict import has_address_configured
from vyos.configdict import has_vrf_configured
+from vyos.configdep import set_dependents, call_dependents
from vyos import ConfigError
+from vyos import frr
from vyos import airbag
airbag.enable()
@@ -90,7 +94,6 @@ def get_config(config=None):
# determine which members have been removed
interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface'])
-
# Reset config level to interfaces
old_level = conf.get_level()
conf.set_level(['interfaces'])
@@ -102,6 +105,10 @@ def get_config(config=None):
tmp = {}
for interface in interfaces_removed:
+ # if member is deleted from bond, add dependencies to call
+ # ethernet commit again in apply function
+ # to apply options under ethernet section
+ set_dependents('ethernet', conf, interface)
section = Section.section(interface) # this will be 'ethernet' for 'eth0'
if conf.exists([section, interface, 'disable']):
tmp[interface] = {'disable': ''}
@@ -116,9 +123,21 @@ def get_config(config=None):
if dict_search('member.interface', bond):
for interface, interface_config in bond['member']['interface'].items():
+
+ interface_ethernet_config = conf.get_config_dict(
+ ['interfaces', 'ethernet', interface],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=False,
+ with_recursive_defaults=False)
+
+ interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config)
+
# Check if member interface is a new member
if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]):
bond['shutdown_required'] = {}
+ interface_config['new_added'] = {}
# Check if member interface is disabled
conf.set_level(['interfaces'])
@@ -151,7 +170,6 @@ def get_config(config=None):
# bond members must not have a VRF attached
tmp = has_vrf_configured(conf, interface)
if tmp: interface_config['has_vrf'] = {}
-
return bond
@@ -212,6 +230,14 @@ def verify(bond):
if 'has_vrf' in interface_config:
raise ConfigError(error_msg + 'it has a VRF assigned!')
+ if 'new_added' in interface_config and 'config_paths' in interface_config:
+ for option_path, option_value in interface_config['config_paths'].items():
+ if option_path in EthernetIf.get_bond_member_allowed_options() :
+ continue
+ if option_path in BondIf.get_inherit_bond_options():
+ continue
+ raise ConfigError(error_msg + f'it has a "{option_path.replace(".", " ")}" assigned!')
+
if 'primary' in bond:
if bond['primary'] not in bond['member']['interface']:
raise ConfigError(f'Primary interface of bond "{bond_name}" must be a member interface')
@@ -223,17 +249,38 @@ def verify(bond):
return None
def generate(bond):
+ bond['frr_zebra_config'] = ''
+ if 'deleted' not in bond:
+ bond['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', bond)
return None
def apply(bond):
- b = BondIf(bond['ifname'])
-
+ ifname = bond['ifname']
+ b = BondIf(ifname)
if 'deleted' in bond:
# delete interface
b.remove()
else:
b.update(bond)
+ if dict_search('member.interface_remove', bond):
+ try:
+ call_dependents()
+ except ConfigError:
+ raise ConfigError('Error in updating ethernet interface '
+ 'after deleting it from bond')
+
+ zebra_daemon = 'zebra'
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+
+ # The route-map used for the FIB (zebra) is part of the zebra daemon
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True)
+ if 'frr_zebra_config' in bond:
+ frr_cfg.add_before(frr.default_add_before, bond['frr_zebra_config'])
+ frr_cfg.commit_configuration(zebra_daemon)
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/interfaces-bridge.py b/src/conf_mode/interfaces_bridge.py
index c82f01e53..29991e2da 100755
--- a/src/conf_mode/interfaces-bridge.py
+++ b/src/conf_mode/interfaces_bridge.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
+# Copyright (C) 2019-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -28,7 +28,8 @@ from vyos.configverify import verify_vrf
from vyos.ifconfig import BridgeIf
from vyos.configdict import has_address_configured
from vyos.configdict import has_vrf_configured
-
+from vyos.configdep import set_dependents
+from vyos.configdep import call_dependents
from vyos.utils.dict import dict_search
from vyos import ConfigError
@@ -48,7 +49,7 @@ def get_config(config=None):
ifname, bridge = get_interface_dict(conf, base)
# determine which members have been removed
- tmp = node_changed(conf, base + [ifname, 'member', 'interface'], key_mangling=('-', '_'))
+ tmp = node_changed(conf, base + [ifname, 'member', 'interface'])
if tmp:
if 'member' in bridge:
bridge['member'].update({'interface_remove' : tmp })
@@ -83,6 +84,12 @@ def get_config(config=None):
if 'enable_vlan' in bridge and tmp:
bridge['member']['interface'][interface].update({'has_vlan' : ''})
+ # When using VXLAN member interfaces that are configured for Single
+ # VXLAN Device (SVD) we need to call the VXLAN conf-mode script to re-create
+ # VLAN to VNI mappings if required
+ if interface.startswith('vxlan'):
+ set_dependents('vxlan', conf, interface)
+
# delete empty dictionary keys - no need to run code paths if nothing is there to do
if 'member' in bridge:
if 'interface' in bridge['member'] and len(bridge['member']['interface']) == 0:
@@ -159,6 +166,13 @@ def apply(bridge):
else:
br.update(bridge)
+ for interface in dict_search('member.interface', bridge) or []:
+ if interface.startswith('vxlan'):
+ try:
+ call_dependents()
+ except ConfigError:
+ raise ConfigError('Error in updating VXLAN interface after changing bridge!')
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/interfaces-dummy.py b/src/conf_mode/interfaces_dummy.py
index e771581e1..e771581e1 100755
--- a/src/conf_mode/interfaces-dummy.py
+++ b/src/conf_mode/interfaces_dummy.py
diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py
new file mode 100755
index 000000000..2c0f846c3
--- /dev/null
+++ b/src/conf_mode/interfaces_ethernet.py
@@ -0,0 +1,391 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from glob import glob
+from sys import exit
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.configdict import get_interface_dict
+from vyos.configdict import is_node_changed
+from vyos.configverify import verify_address
+from vyos.configverify import verify_dhcpv6
+from vyos.configverify import verify_eapol
+from vyos.configverify import verify_interface_exists
+from vyos.configverify import verify_mirror_redirect
+from vyos.configverify import verify_mtu
+from vyos.configverify import verify_mtu_ipv6
+from vyos.configverify import verify_vlan_config
+from vyos.configverify import verify_vrf
+from vyos.configverify import verify_bond_bridge_member
+from vyos.ethtool import Ethtool
+from vyos.ifconfig import EthernetIf
+from vyos.ifconfig import BondIf
+from vyos.pki import find_chain
+from vyos.pki import encode_certificate
+from vyos.pki import load_certificate
+from vyos.pki import wrap_private_key
+from vyos.template import render
+from vyos.utils.process import call
+from vyos.utils.dict import dict_search
+from vyos.utils.dict import dict_to_paths_values
+from vyos.utils.dict import dict_set
+from vyos.utils.dict import dict_delete
+from vyos.utils.file import write_file
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+# XXX: wpa_supplicant works on the source interface
+cfg_dir = '/run/wpa_supplicant'
+wpa_suppl_conf = '/run/wpa_supplicant/{ifname}.conf'
+
+def update_bond_options(conf: Config, eth_conf: dict) -> list:
+ """
+ Return list of blocked options if interface is a bond member
+ :param conf: Config object
+ :type conf: Config
+ :param eth_conf: Ethernet config dictionary
+ :type eth_conf: dict
+ :return: List of blocked options
+ :rtype: list
+ """
+ blocked_list = []
+ bond_name = list(eth_conf['is_bond_member'].keys())[0]
+ config_without_defaults = conf.get_config_dict(
+ ['interfaces', 'ethernet', eth_conf['ifname']],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=False,
+ with_recursive_defaults=False)
+ config_with_defaults = conf.get_config_dict(
+ ['interfaces', 'ethernet', eth_conf['ifname']],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=True,
+ with_recursive_defaults=True)
+ bond_config_with_defaults = conf.get_config_dict(
+ ['interfaces', 'bonding', bond_name],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_defaults=True,
+ with_recursive_defaults=True)
+ eth_dict_paths = dict_to_paths_values(config_without_defaults)
+ eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']]
+
+ #if option is configured under ethernet section
+ for option_path, option_value in eth_dict_paths.items():
+ bond_option_value = dict_search(option_path, bond_config_with_defaults)
+
+ #If option is allowed for changing then continue
+ if option_path in EthernetIf.get_bond_member_allowed_options():
+ continue
+ # if option is inherited from bond then set valued from bond interface
+ if option_path in BondIf.get_inherit_bond_options():
+ # If option equals to bond option then do nothing
+ if option_value == bond_option_value:
+ continue
+ else:
+ # if ethernet has option and bond interface has
+ # then copy it from bond
+ if bond_option_value is not None:
+ if is_node_changed(conf, eth_path_base + option_path.split('.')):
+ Warning(
+ f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \
+ f' Interface "{eth_conf["ifname"]}" is a bond member.' \
+ f' Option is inherited from bond "{bond_name}"')
+ dict_set(option_path, bond_option_value, eth_conf)
+ continue
+ # if ethernet has option and bond interface does not have
+ # then delete it form dict and do not apply it
+ else:
+ if is_node_changed(conf, eth_path_base + option_path.split('.')):
+ Warning(
+ f'Cannot apply "{option_path.replace(".", " ")}".' \
+ f' Interface "{eth_conf["ifname"]}" is a bond member.' \
+ f' Option is inherited from bond "{bond_name}"')
+ dict_delete(option_path, eth_conf)
+ blocked_list.append(option_path)
+
+ # if inherited option is not configured under ethernet section but configured under bond section
+ for option_path in BondIf.get_inherit_bond_options():
+ bond_option_value = dict_search(option_path, bond_config_with_defaults)
+ if bond_option_value is not None:
+ if option_path not in eth_dict_paths:
+ if is_node_changed(conf, eth_path_base + option_path.split('.')):
+ Warning(
+ f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \
+ f' Interface "{eth_conf["ifname"]}" is a bond member. ' \
+ f'Option is inherited from bond "{bond_name}"')
+ dict_set(option_path, bond_option_value, eth_conf)
+ eth_conf['bond_blocked_changes'] = blocked_list
+ return None
+
+def get_config(config=None):
+ """
+ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
+ interface name will be added or a deleted flag
+ """
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['interfaces', 'ethernet']
+ ifname, ethernet = get_interface_dict(conf, base, with_pki=True)
+
+ if 'is_bond_member' in ethernet:
+ update_bond_options(conf, ethernet)
+
+ tmp = is_node_changed(conf, base + [ifname, 'speed'])
+ if tmp: ethernet.update({'speed_duplex_changed': {}})
+
+ tmp = is_node_changed(conf, base + [ifname, 'duplex'])
+ if tmp: ethernet.update({'speed_duplex_changed': {}})
+
+ return ethernet
+
+def verify_speed_duplex(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify speed and duplex
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or
+ (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')):
+ raise ConfigError(
+ 'Speed/Duplex missmatch. Must be both auto or manually configured')
+
+ if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto':
+ # We need to verify if the requested speed and duplex setting is
+ # supported by the underlaying NIC.
+ speed = ethernet['speed']
+ duplex = ethernet['duplex']
+ if not ethtool.check_speed_duplex(speed, duplex):
+ raise ConfigError(
+ f'Adapter does not support changing speed ' \
+ f'and duplex settings to: {speed}/{duplex}!')
+
+
+def verify_flow_control(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify flow control
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if 'disable_flow_control' in ethernet:
+ if not ethtool.check_flow_control():
+ raise ConfigError(
+ 'Adapter does not support changing flow-control settings!')
+
+
+def verify_ring_buffer(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify ring buffer
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if 'ring_buffer' in ethernet:
+ max_rx = ethtool.get_ring_buffer_max('rx')
+ if not max_rx:
+ raise ConfigError(
+ 'Driver does not support RX ring-buffer configuration!')
+
+ max_tx = ethtool.get_ring_buffer_max('tx')
+ if not max_tx:
+ raise ConfigError(
+ 'Driver does not support TX ring-buffer configuration!')
+
+ rx = dict_search('ring_buffer.rx', ethernet)
+ if rx and int(rx) > int(max_rx):
+ raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \
+ f'size of "{max_rx}" bytes!')
+
+ tx = dict_search('ring_buffer.tx', ethernet)
+ if tx and int(tx) > int(max_tx):
+ raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \
+ f'size of "{max_tx}" bytes!')
+
+
+def verify_offload(ethernet: dict, ethtool: Ethtool):
+ """
+ Verify offloading capabilities
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ :param ethtool: Ethernet object
+ :type ethtool: Ethtool
+ """
+ if dict_search('offload.rps', ethernet) != None:
+ if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'):
+ raise ConfigError('Interface does not suport RPS!')
+ driver = ethtool.get_driver_name()
+ # T3342 - Xen driver requires special treatment
+ if driver == 'vif':
+ if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None:
+ raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\
+ 'for MTU size larger then 1500 bytes')
+
+
+def verify_allowedbond_changes(ethernet: dict):
+ """
+ Verify changed options if interface is in bonding
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ """
+ if 'bond_blocked_changes' in ethernet:
+ for option in ethernet['bond_blocked_changes']:
+ raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \
+ f' on interface "{ethernet["ifname"]}".' \
+ f' Interface is a bond member')
+
+
+def verify(ethernet):
+ if 'deleted' in ethernet:
+ return None
+ if 'is_bond_member' in ethernet:
+ verify_bond_member(ethernet)
+ else:
+ verify_ethernet(ethernet)
+
+
+def verify_bond_member(ethernet):
+ """
+ Verification function for ethernet interface which is in bonding
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ """
+ ifname = ethernet['ifname']
+ verify_interface_exists(ifname)
+ verify_eapol(ethernet)
+ verify_mirror_redirect(ethernet)
+ ethtool = Ethtool(ifname)
+ verify_speed_duplex(ethernet, ethtool)
+ verify_flow_control(ethernet, ethtool)
+ verify_ring_buffer(ethernet, ethtool)
+ verify_offload(ethernet, ethtool)
+ verify_allowedbond_changes(ethernet)
+
+def verify_ethernet(ethernet):
+ """
+ Verification function for simple ethernet interface
+ :param ethernet: dictionary which is received from get_interface_dict
+ :type ethernet: dict
+ """
+ ifname = ethernet['ifname']
+ verify_interface_exists(ifname)
+ verify_mtu(ethernet)
+ verify_mtu_ipv6(ethernet)
+ verify_dhcpv6(ethernet)
+ verify_address(ethernet)
+ verify_vrf(ethernet)
+ verify_bond_bridge_member(ethernet)
+ verify_eapol(ethernet)
+ verify_mirror_redirect(ethernet)
+ ethtool = Ethtool(ifname)
+ # No need to check speed and duplex keys as both have default values.
+ verify_speed_duplex(ethernet, ethtool)
+ verify_flow_control(ethernet, ethtool)
+ verify_ring_buffer(ethernet, ethtool)
+ verify_offload(ethernet, ethtool)
+ # use common function to verify VLAN configuration
+ verify_vlan_config(ethernet)
+ return None
+
+
+def generate(ethernet):
+ # render real configuration file once
+ wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet)
+
+ if 'deleted' in ethernet:
+ # delete configuration on interface removal
+ if os.path.isfile(wpa_supplicant_conf):
+ os.unlink(wpa_supplicant_conf)
+ return None
+
+ if 'eapol' in ethernet:
+ ifname = ethernet['ifname']
+
+ render(wpa_supplicant_conf, 'ethernet/wpa_supplicant.conf.j2', ethernet)
+
+ cert_file_path = os.path.join(cfg_dir, f'{ifname}_cert.pem')
+ cert_key_path = os.path.join(cfg_dir, f'{ifname}_cert.key')
+
+ cert_name = ethernet['eapol']['certificate']
+ pki_cert = ethernet['pki']['certificate'][cert_name]
+
+ loaded_pki_cert = load_certificate(pki_cert['certificate'])
+ loaded_ca_certs = {load_certificate(c['certificate'])
+ for c in ethernet['pki']['ca'].values()} if 'ca' in ethernet['pki'] else {}
+
+ cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
+
+ write_file(cert_file_path,
+ '\n'.join(encode_certificate(c) for c in cert_full_chain))
+ write_file(cert_key_path, wrap_private_key(pki_cert['private']['key']))
+
+ if 'ca_certificate' in ethernet['eapol']:
+ ca_cert_file_path = os.path.join(cfg_dir, f'{ifname}_ca.pem')
+ ca_chains = []
+
+ for ca_cert_name in ethernet['eapol']['ca_certificate']:
+ pki_ca_cert = ethernet['pki']['ca'][ca_cert_name]
+ loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
+ ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
+ ca_chains.append(
+ '\n'.join(encode_certificate(c) for c in ca_full_chain))
+
+ write_file(ca_cert_file_path, '\n'.join(ca_chains))
+
+ return None
+
+def apply(ethernet):
+ ifname = ethernet['ifname']
+ # take care about EAPoL supplicant daemon
+ eapol_action='stop'
+
+ e = EthernetIf(ifname)
+ if 'deleted' in ethernet:
+ # delete interface
+ e.remove()
+ else:
+ e.update(ethernet)
+ if 'eapol' in ethernet:
+ eapol_action='reload-or-restart'
+
+ call(f'systemctl {eapol_action} wpa_supplicant-wired@{ifname}')
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/interfaces-geneve.py b/src/conf_mode/interfaces_geneve.py
index f6694ddde..f6694ddde 100755
--- a/src/conf_mode/interfaces-geneve.py
+++ b/src/conf_mode/interfaces_geneve.py
diff --git a/src/conf_mode/interfaces-input.py b/src/conf_mode/interfaces_input.py
index ad248843d..ad248843d 100755
--- a/src/conf_mode/interfaces-input.py
+++ b/src/conf_mode/interfaces_input.py
diff --git a/src/conf_mode/interfaces-l2tpv3.py b/src/conf_mode/interfaces_l2tpv3.py
index e1db3206e..e1db3206e 100755
--- a/src/conf_mode/interfaces-l2tpv3.py
+++ b/src/conf_mode/interfaces_l2tpv3.py
diff --git a/src/conf_mode/interfaces-loopback.py b/src/conf_mode/interfaces_loopback.py
index 08d34477a..08d34477a 100755
--- a/src/conf_mode/interfaces-loopback.py
+++ b/src/conf_mode/interfaces_loopback.py
diff --git a/src/conf_mode/interfaces-macsec.py b/src/conf_mode/interfaces_macsec.py
index 0a927ac88..0a927ac88 100755
--- a/src/conf_mode/interfaces-macsec.py
+++ b/src/conf_mode/interfaces_macsec.py
diff --git a/src/conf_mode/interfaces-openvpn.py b/src/conf_mode/interfaces_openvpn.py
index 1d0feb56f..45569dd21 100755
--- a/src/conf_mode/interfaces-openvpn.py
+++ b/src/conf_mode/interfaces_openvpn.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2023 VyOS maintainers and contributors
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -30,6 +30,7 @@ from netifaces import interfaces
from secrets import SystemRandom
from shutil import rmtree
+from vyos.base import DeprecationWarning
from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
@@ -88,16 +89,12 @@ def get_config(config=None):
conf = Config()
base = ['interfaces', 'openvpn']
- ifname, openvpn = get_interface_dict(conf, base)
+ ifname, openvpn = get_interface_dict(conf, base, with_pki=True)
openvpn['auth_user_pass_file'] = '/run/openvpn/{ifname}.pw'.format(**openvpn)
if 'deleted' in openvpn:
return openvpn
- openvpn['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True,
- no_tag_node_value_mangle=True)
-
if is_node_changed(conf, base + [ifname, 'openvpn-option']):
openvpn.update({'restart_required': {}})
if is_node_changed(conf, base + [ifname, 'enable-dco']):
@@ -165,6 +162,12 @@ def verify_pki(openvpn):
if shared_secret_key not in pki['openvpn']['shared_secret']:
raise ConfigError(f'Invalid shared-secret on openvpn interface {interface}')
+ # If PSK settings are correct, warn about its deprecation
+ DeprecationWarning('OpenVPN shared-secret support will be removed in future '\
+ 'VyOS versions. Please migrate your site-to-site tunnels to '\
+ 'TLS. You can use self-signed certificates with peer fingerprint '\
+ 'verification, consult the documentation for details.')
+
if tls:
if (mode in ['server', 'client']) and ('ca_certificate' not in tls):
raise ConfigError(f'Must specify "tls ca-certificate" on openvpn interface {interface},\
@@ -344,9 +347,6 @@ def verify(openvpn):
if v6_subnets > 1:
raise ConfigError('Cannot specify more than 1 IPv6 server subnet')
- if v6_subnets > 0 and v4_subnets == 0:
- raise ConfigError('IPv6 server requires an IPv4 server subnet')
-
for subnet in tmp:
if is_ipv4(subnet):
subnet = IPv4Network(subnet)
@@ -388,6 +388,10 @@ def verify(openvpn):
for v4PoolNet in v4PoolNets:
if IPv4Address(client['ip'][0]) in v4PoolNet:
print(f'Warning: Client "{client["name"]}" IP {client["ip"][0]} is in server IP pool, it is not reserved for this client.')
+ # configuring a client_ip_pool will set 'server ... nopool' which is currently incompatible with 'server-ipv6' (probably to be fixed upstream)
+ for subnet in (dict_search('server.subnet', openvpn) or []):
+ if is_ipv6(subnet):
+ raise ConfigError(f'Setting client-ip-pool is incompatible having an IPv6 server subnet.')
for subnet in (dict_search('server.subnet', openvpn) or []):
if is_ipv6(subnet):
@@ -722,4 +726,3 @@ if __name__ == '__main__':
except ConfigError as e:
print(e)
exit(1)
-
diff --git a/src/conf_mode/interfaces-pppoe.py b/src/conf_mode/interfaces_pppoe.py
index fca91253c..42f084309 100755
--- a/src/conf_mode/interfaces-pppoe.py
+++ b/src/conf_mode/interfaces_pppoe.py
@@ -61,6 +61,12 @@ def get_config(config=None):
# bail out early - no need to further process other nodes
break
+ if 'deleted' not in pppoe:
+ # We always set the MRU value to the MTU size. This code path only re-creates
+ # the old behavior if MRU is not set on the CLI.
+ if 'mru' not in pppoe:
+ pppoe['mru'] = pppoe['mtu']
+
return pppoe
def verify(pppoe):
@@ -77,6 +83,11 @@ def verify(pppoe):
if {'connect_on_demand', 'vrf'} <= set(pppoe):
raise ConfigError('On-demand dialing and VRF can not be used at the same time')
+ # both MTU and MRU have default values, thus we do not need to check
+ # if the key exists
+ if int(pppoe['mru']) > int(pppoe['mtu']):
+ raise ConfigError('PPPoE MRU needs to be lower then MTU!')
+
return None
def generate(pppoe):
diff --git a/src/conf_mode/interfaces-pseudo-ethernet.py b/src/conf_mode/interfaces_pseudo-ethernet.py
index dce5c2358..dce5c2358 100755
--- a/src/conf_mode/interfaces-pseudo-ethernet.py
+++ b/src/conf_mode/interfaces_pseudo-ethernet.py
diff --git a/src/conf_mode/interfaces-sstpc.py b/src/conf_mode/interfaces_sstpc.py
index b588910dc..b9d7a74fb 100755
--- a/src/conf_mode/interfaces-sstpc.py
+++ b/src/conf_mode/interfaces_sstpc.py
@@ -45,7 +45,7 @@ def get_config(config=None):
else:
conf = Config()
base = ['interfaces', 'sstpc']
- ifname, sstpc = get_interface_dict(conf, base)
+ ifname, sstpc = get_interface_dict(conf, base, with_pki=True)
# We should only terminate the SSTP client session if critical parameters
# change. All parameters that can be changed on-the-fly (like interface
@@ -57,10 +57,6 @@ def get_config(config=None):
# bail out early - no need to further process other nodes
break
- # Load PKI certificates for later processing
- sstpc['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True,
- no_tag_node_value_mangle=True)
return sstpc
def verify(sstpc):
diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces_tunnel.py
index 91aed9cc3..efa5ebc64 100755
--- a/src/conf_mode/interfaces-tunnel.py
+++ b/src/conf_mode/interfaces_tunnel.py
@@ -24,7 +24,7 @@ from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
-from vyos.configverify import verify_interface_exists
+from vyos.configverify import verify_source_interface
from vyos.configverify import verify_mtu_ipv6
from vyos.configverify import verify_mirror_redirect
from vyos.configverify import verify_vrf
@@ -166,7 +166,7 @@ def verify(tunnel):
verify_mirror_redirect(tunnel)
if 'source_interface' in tunnel:
- verify_interface_exists(tunnel['source_interface'])
+ verify_source_interface(tunnel)
# TTL != 0 and nopmtudisc are incompatible, parameters and ip use default
# values, thus the keys are always present.
diff --git a/src/conf_mode/interfaces-virtual-ethernet.py b/src/conf_mode/interfaces_virtual-ethernet.py
index 8efe89c41..8efe89c41 100755
--- a/src/conf_mode/interfaces-virtual-ethernet.py
+++ b/src/conf_mode/interfaces_virtual-ethernet.py
diff --git a/src/conf_mode/interfaces-vti.py b/src/conf_mode/interfaces_vti.py
index 9871810ae..9871810ae 100755
--- a/src/conf_mode/interfaces-vti.py
+++ b/src/conf_mode/interfaces_vti.py
diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces_vxlan.py
index a3b0867e0..4251e611b 100755
--- a/src/conf_mode/interfaces-vxlan.py
+++ b/src/conf_mode/interfaces_vxlan.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2022 VyOS maintainers and contributors
+# Copyright (C) 2019-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -24,6 +24,7 @@ from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import leaf_node_changed
from vyos.configdict import is_node_changed
+from vyos.configdict import node_changed
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
from vyos.configverify import verify_mtu_ipv6
@@ -33,6 +34,7 @@ from vyos.configverify import verify_bond_bridge_member
from vyos.ifconfig import Interface
from vyos.ifconfig import VXLANIf
from vyos.template import is_ipv6
+from vyos.utils.dict import dict_search
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -52,12 +54,21 @@ def get_config(config=None):
# VXLAN interfaces are picky and require recreation if certain parameters
# change. But a VXLAN interface should - of course - not be re-created if
# it's description or IP address is adjusted. Feels somehow logic doesn't it?
- for cli_option in ['parameters', 'external', 'gpe', 'group', 'port', 'remote',
+ for cli_option in ['parameters', 'gpe', 'group', 'port', 'remote',
'source-address', 'source-interface', 'vni']:
if is_node_changed(conf, base + [ifname, cli_option]):
vxlan.update({'rebuild_required': {}})
break
+ # When dealing with VNI filtering we need to know what VNI was actually removed,
+ # so build up a dict matching the vlan_to_vni structure but with removed values.
+ tmp = node_changed(conf, base + [ifname, 'vlan-to-vni'], recursive=True)
+ if tmp:
+ vxlan.update({'vlan_to_vni_removed': {}})
+ for vlan in tmp:
+ vni = leaf_node_changed(conf, base + [ifname, 'vlan-to-vni', vlan, 'vni'])
+ vxlan['vlan_to_vni_removed'].update({vlan : {'vni' : vni[0]}})
+
# We need to verify that no other VXLAN tunnel is configured when external
# mode is in use - Linux Kernel limitation
conf.set_level(base)
@@ -90,17 +101,34 @@ def verify(vxlan):
if not any(tmp in ['group', 'remote', 'source_address', 'source_interface'] for tmp in vxlan):
raise ConfigError('Group, remote, source-address or source-interface must be configured')
- if 'vni' not in vxlan and 'external' not in vxlan:
- raise ConfigError(
- 'Must either configure VXLAN "vni" or use "external" CLI option!')
-
- if {'external', 'vni'} <= set(vxlan):
- raise ConfigError('Can not specify both "external" and "VNI"!')
-
- if {'external', 'other_tunnels'} <= set(vxlan):
- other_tunnels = ', '.join(vxlan['other_tunnels'])
- raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\
- f'CLI option is used. Additional tunnels: {other_tunnels}')
+ if 'vni' not in vxlan and dict_search('parameters.external', vxlan) == None:
+ raise ConfigError('Must either configure VXLAN "vni" or use "external" CLI option!')
+
+ if dict_search('parameters.external', vxlan) != None:
+ if 'vni' in vxlan:
+ raise ConfigError('Can not specify both "external" and "VNI"!')
+
+ if 'other_tunnels' in vxlan:
+ # When multiple VXLAN interfaces are defined and "external" is used,
+ # all VXLAN interfaces need to have vni-filter enabled!
+ # See Linux Kernel commit f9c4bb0b245cee35ef66f75bf409c9573d934cf9
+ other_vni_filter = False
+ for tunnel, tunnel_config in vxlan['other_tunnels'].items():
+ if dict_search('parameters.vni_filter', tunnel_config) != None:
+ other_vni_filter = True
+ break
+ # eqivalent of the C foo ? 'a' : 'b' statement
+ vni_filter = True and (dict_search('parameters.vni_filter', vxlan) != None) or False
+ # If either one is enabled, so must be the other. Both can be off and both can be on
+ if (vni_filter and not other_vni_filter) or (not vni_filter and other_vni_filter):
+ raise ConfigError(f'Using multiple VXLAN interfaces with "external" '\
+ 'requires all VXLAN interfaces to have "vni-filter" configured!')
+
+ if not vni_filter and not other_vni_filter:
+ other_tunnels = ', '.join(vxlan['other_tunnels'])
+ raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\
+ f'CLI option is used and "vni-filter" is unset. '\
+ f'Additional tunnels: {other_tunnels}')
if 'gpe' in vxlan and 'external' not in vxlan:
raise ConfigError(f'VXLAN-GPE is only supported when "external" '\
@@ -146,10 +174,36 @@ def verify(vxlan):
raise ConfigError(error_msg)
protocol = 'ipv4'
+ if 'vlan_to_vni' in vxlan:
+ if 'is_bridge_member' not in vxlan:
+ raise ConfigError('VLAN to VNI mapping requires that VXLAN interface '\
+ 'is member of a bridge interface!')
+
+ vnis_used = []
+ for vif, vif_config in vxlan['vlan_to_vni'].items():
+ if 'vni' not in vif_config:
+ raise ConfigError(f'Must define VNI for VLAN "{vif}"!')
+ vni = vif_config['vni']
+ if vni in vnis_used:
+ raise ConfigError(f'VNI "{vni}" is already assigned to a different VLAN!')
+ vnis_used.append(vni)
+
+ if dict_search('parameters.neighbor_suppress', vxlan) != None:
+ if 'is_bridge_member' not in vxlan:
+ raise ConfigError('Neighbor suppression requires that VXLAN interface '\
+ 'is member of a bridge interface!')
+
verify_mtu_ipv6(vxlan)
verify_address(vxlan)
verify_bond_bridge_member(vxlan)
verify_mirror_redirect(vxlan)
+
+ # We use a defaultValue for port, thus it's always safe to use
+ if vxlan['port'] == '8472':
+ Warning('Starting from VyOS 1.4, the default port for VXLAN '\
+ 'has been changed to 4789. This matches the IANA assigned '\
+ 'standard port number!')
+
return None
def generate(vxlan):
diff --git a/src/conf_mode/interfaces-wireguard.py b/src/conf_mode/interfaces_wireguard.py
index 122d9589a..79e5d3f44 100755
--- a/src/conf_mode/interfaces-wireguard.py
+++ b/src/conf_mode/interfaces_wireguard.py
@@ -51,17 +51,9 @@ def get_config(config=None):
tmp = is_node_changed(conf, base + [ifname, 'port'])
if tmp: wireguard['port_changed'] = {}
- # Determine which Wireguard peer has been removed.
- # Peers can only be removed with their public key!
- if 'peer' in wireguard:
- peer_remove = {}
- for peer, peer_config in wireguard['peer'].items():
- # T4702: If anything on a peer changes we remove the peer first and re-add it
- if is_node_changed(conf, base + [ifname, 'peer', peer]):
- if 'public_key' in peer_config:
- peer_remove = dict_merge({'peer_remove' : {peer : peer_config['public_key']}}, peer_remove)
- if peer_remove:
- wireguard.update(peer_remove)
+ # T4702: If anything on a peer changes we remove the peer first and re-add it
+ if is_node_changed(conf, base + [ifname, 'peer']):
+ wireguard.update({'rebuild_required': {}})
return wireguard
@@ -113,12 +105,21 @@ def verify(wireguard):
public_keys.append(peer['public_key'])
def apply(wireguard):
- tmp = WireGuardIf(wireguard['ifname'])
- if 'deleted' in wireguard:
- tmp.remove()
- return None
+ if 'rebuild_required' in wireguard or 'deleted' in wireguard:
+ wg = WireGuardIf(**wireguard)
+ # WireGuard only supports peer removal based on the configured public-key,
+ # by deleting the entire interface this is the shortcut instead of parsing
+ # out all peers and removing them one by one.
+ #
+ # Peer reconfiguration will always come with a short downtime while the
+ # WireGuard interface is recreated (see below)
+ wg.remove()
+
+ # Create the new interface if required
+ if 'deleted' not in wireguard:
+ wg = WireGuardIf(**wireguard)
+ wg.update(wireguard)
- tmp.update(wireguard)
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/interfaces-wireless.py b/src/conf_mode/interfaces_wireless.py
index 02b4a2500..02b4a2500 100755
--- a/src/conf_mode/interfaces-wireless.py
+++ b/src/conf_mode/interfaces_wireless.py
diff --git a/src/conf_mode/interfaces-wwan.py b/src/conf_mode/interfaces_wwan.py
index 2515dc838..2515dc838 100755
--- a/src/conf_mode/interfaces-wwan.py
+++ b/src/conf_mode/interfaces_wwan.py
diff --git a/src/conf_mode/le_cert.py b/src/conf_mode/le_cert.py
deleted file mode 100755
index 06c7e7b72..000000000
--- a/src/conf_mode/le_cert.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import os
-
-import vyos.defaults
-from vyos.config import Config
-from vyos import ConfigError
-from vyos.utils.process import cmd
-from vyos.utils.process import call
-from vyos.utils.process import is_systemd_service_running
-
-from vyos import airbag
-airbag.enable()
-
-vyos_conf_scripts_dir = vyos.defaults.directories['conf_mode']
-vyos_certbot_dir = vyos.defaults.directories['certbot']
-
-dependencies = [
- 'https.py',
-]
-
-def request_certbot(cert):
- email = cert.get('email')
- if email is not None:
- email_flag = '-m {0}'.format(email)
- else:
- email_flag = ''
-
- domains = cert.get('domains')
- if domains is not None:
- domain_flag = '-d ' + ' -d '.join(domains)
- else:
- domain_flag = ''
-
- certbot_cmd = f'certbot certonly --config-dir {vyos_certbot_dir} -n --nginx --agree-tos --no-eff-email --expand {email_flag} {domain_flag}'
-
- cmd(certbot_cmd,
- raising=ConfigError,
- message="The certbot request failed for the specified domains.")
-
-def get_config():
- conf = Config()
- if not conf.exists('service https certificates certbot'):
- return None
- else:
- conf.set_level('service https certificates certbot')
-
- cert = {}
-
- if conf.exists('domain-name'):
- cert['domains'] = conf.return_values('domain-name')
-
- if conf.exists('email'):
- cert['email'] = conf.return_value('email')
-
- return cert
-
-def verify(cert):
- if cert is None:
- return None
-
- if 'domains' not in cert:
- raise ConfigError("At least one domain name is required to"
- " request a letsencrypt certificate.")
-
- if 'email' not in cert:
- raise ConfigError("An email address is required to request"
- " a letsencrypt certificate.")
-
-def generate(cert):
- if cert is None:
- return None
-
- # certbot will attempt to reload nginx, even with 'certonly';
- # start nginx if not active
- if not is_systemd_service_running('nginx.service'):
- call('systemctl start nginx.service')
-
- request_certbot(cert)
-
-def apply(cert):
- if cert is not None:
- call('systemctl restart certbot.timer')
- else:
- call('systemctl stop certbot.timer')
- return None
-
- for dep in dependencies:
- cmd(f'{vyos_conf_scripts_dir}/{dep}', raising=ConfigError)
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- sys.exit(1)
-
diff --git a/src/conf_mode/load-balancing-haproxy.py b/src/conf_mode/load-balancing_reverse-proxy.py
index 8fe429653..7338fe573 100755
--- a/src/conf_mode/load-balancing-haproxy.py
+++ b/src/conf_mode/load-balancing_reverse-proxy.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2023 VyOS maintainers and contributors
+# Copyright (C) 2023-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -43,17 +43,14 @@ def get_config(config=None):
conf = Config()
base = ['load-balancing', 'reverse-proxy']
+ if not conf.exists(base):
+ return None
lb = conf.get_config_dict(base,
get_first_key=True,
key_mangling=('-', '_'),
- no_tag_node_value_mangle=True)
-
- if lb:
- lb['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- if lb:
- lb = conf.merge_defaults(lb, recursive=True)
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True,
+ with_pki=True)
return lb
@@ -94,8 +91,8 @@ def generate(lb):
if os.path.isfile(file):
os.unlink(file)
# Delete old directories
- #if os.path.isdir(load_balancing_dir):
- # rmtree(load_balancing_dir, ignore_errors=True)
+ if os.path.isdir(load_balancing_dir):
+ rmtree(load_balancing_dir, ignore_errors=True)
return None
@@ -106,26 +103,26 @@ def generate(lb):
# SSL Certificates for frontend
for front, front_config in lb['service'].items():
if 'ssl' in front_config:
- cert_file_path = os.path.join(load_balancing_dir, 'cert.pem')
- cert_key_path = os.path.join(load_balancing_dir, 'cert.pem.key')
- ca_cert_file_path = os.path.join(load_balancing_dir, 'ca.pem')
if 'certificate' in front_config['ssl']:
- #cert_file_path = os.path.join(load_balancing_dir, 'cert.pem')
- #cert_key_path = os.path.join(load_balancing_dir, 'cert.key')
- cert_name = front_config['ssl']['certificate']
- pki_cert = lb['pki']['certificate'][cert_name]
+ cert_names = front_config['ssl']['certificate']
+
+ for cert_name in cert_names:
+ pki_cert = lb['pki']['certificate'][cert_name]
+ cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem')
+ cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key')
- with open(cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_cert['certificate']))
+ with open(cert_file_path, 'w') as f:
+ f.write(wrap_certificate(pki_cert['certificate']))
- if 'private' in pki_cert and 'key' in pki_cert['private']:
- with open(cert_key_path, 'w') as f:
- f.write(wrap_private_key(pki_cert['private']['key']))
+ if 'private' in pki_cert and 'key' in pki_cert['private']:
+ with open(cert_key_path, 'w') as f:
+ f.write(wrap_private_key(pki_cert['private']['key']))
if 'ca_certificate' in front_config['ssl']:
ca_name = front_config['ssl']['ca_certificate']
pki_ca_cert = lb['pki']['ca'][ca_name]
+ ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
with open(ca_cert_file_path, 'w') as f:
f.write(wrap_certificate(pki_ca_cert['certificate']))
@@ -133,11 +130,11 @@ def generate(lb):
# SSL Certificates for backend
for back, back_config in lb['backend'].items():
if 'ssl' in back_config:
- ca_cert_file_path = os.path.join(load_balancing_dir, 'ca.pem')
if 'ca_certificate' in back_config['ssl']:
ca_name = back_config['ssl']['ca_certificate']
pki_ca_cert = lb['pki']['ca'][ca_name]
+ ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
with open(ca_cert_file_path, 'w') as f:
f.write(wrap_certificate(pki_ca_cert['certificate']))
diff --git a/src/conf_mode/load-balancing-wan.py b/src/conf_mode/load-balancing_wan.py
index ad9c80d72..ad9c80d72 100755
--- a/src/conf_mode/load-balancing-wan.py
+++ b/src/conf_mode/load-balancing_wan.py
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 9da7fbe80..ffd4a33e7 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -75,15 +75,8 @@ def verify_rule(config, err_msg, groups_dict):
dict_search('source.port', config)):
if config['protocol'] not in ['tcp', 'udp', 'tcp_udp']:
- raise ConfigError(f'{err_msg}\n' \
- 'ports can only be specified when protocol is '\
- 'either tcp, udp or tcp_udp!')
-
- if is_ip_network(dict_search('translation.address', config)):
- raise ConfigError(f'{err_msg}\n' \
- 'Cannot use ports with an IPv4 network as translation address as it\n' \
- 'statically maps a whole network of addresses onto another\n' \
- 'network of addresses')
+ raise ConfigError(f'{err_msg} ports can only be specified when '\
+ 'protocol is either tcp, udp or tcp_udp!')
for side in ['destination', 'source']:
if side in config:
@@ -195,11 +188,13 @@ def verify(nat):
if dict_search('source.rule', nat):
for rule, config in dict_search('source.rule', nat).items():
err_msg = f'Source NAT configuration error in rule {rule}:'
- if 'outbound_interface' not in config:
- raise ConfigError(f'{err_msg} outbound-interface not specified')
- if config['outbound_interface'] not in 'any' and config['outbound_interface'] not in interfaces():
- Warning(f'rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
+ if 'outbound_interface' in config:
+ if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
+ elif 'name' in config['outbound_interface']:
+ if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
+ Warning(f'NAT interface "{config["outbound_interface"]["name"]}" for source NAT rule "{rule}" does not exist!')
if not dict_search('translation.address', config) and not dict_search('translation.port', config):
if 'exclude' not in config and 'backend' not in config['load_balance']:
@@ -218,11 +213,12 @@ def verify(nat):
for rule, config in dict_search('destination.rule', nat).items():
err_msg = f'Destination NAT configuration error in rule {rule}:'
- if 'inbound_interface' not in config:
- raise ConfigError(f'{err_msg}\n' \
- 'inbound-interface not specified')
- elif config['inbound_interface'] not in 'any' and config['inbound_interface'] not in interfaces():
- Warning(f'rule "{rule}" interface "{config["inbound_interface"]}" does not exist on this system')
+ if 'inbound_interface' in config:
+ if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
+ elif 'name' in config['inbound_interface']:
+ if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
+ Warning(f'NAT interface "{config["inbound_interface"]["name"]}" for destination NAT rule "{rule}" does not exist!')
if not dict_search('translation.address', config) and not dict_search('translation.port', config) and 'redirect' not in config['translation']:
if 'exclude' not in config and 'backend' not in config['load_balance']:
@@ -236,8 +232,7 @@ def verify(nat):
err_msg = f'Static NAT configuration error in rule {rule}:'
if 'inbound_interface' not in config:
- raise ConfigError(f'{err_msg}\n' \
- 'inbound-interface not specified')
+ raise ConfigError(f'{err_msg} inbound-interface not specified')
# common rule verification
verify_rule(config, err_msg, nat['firewall_group'])
diff --git a/src/conf_mode/nat64.py b/src/conf_mode/nat64.py
new file mode 100755
index 000000000..6026c61d0
--- /dev/null
+++ b/src/conf_mode/nat64.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# pylint: disable=empty-docstring,missing-module-docstring
+
+import csv
+import os
+import re
+
+from ipaddress import IPv6Network
+from json import dumps as json_write
+
+from vyos import ConfigError
+from vyos import airbag
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.configdict import is_node_changed
+from vyos.utils.dict import dict_search
+from vyos.utils.file import write_file
+from vyos.utils.kernel import check_kmod
+from vyos.utils.process import cmd
+from vyos.utils.process import run
+
+airbag.enable()
+
+INSTANCE_REGEX = re.compile(r"instance-(\d+)")
+JOOL_CONFIG_DIR = "/run/jool"
+
+
+def get_config(config: Config | None = None) -> None:
+ if config is None:
+ config = Config()
+
+ base = ["nat64"]
+ nat64 = config.get_config_dict(base, key_mangling=("-", "_"), get_first_key=True)
+
+ base_src = base + ["source", "rule"]
+
+ # Load in existing instances so we can destroy any unknown
+ lines = cmd("jool instance display --csv").splitlines()
+ for _, instance, _ in csv.reader(lines):
+ match = INSTANCE_REGEX.fullmatch(instance)
+ if not match:
+ # FIXME: Instances that don't match should be ignored but WARN'ed to the user
+ continue
+ num = match.group(1)
+
+ rules = nat64.setdefault("source", {}).setdefault("rule", {})
+ # Mark it for deletion
+ if num not in rules:
+ rules[num] = {"deleted": True}
+ continue
+
+ # If the user changes the mode, recreate the instance else Jool fails with:
+ # Jool error: Sorry; you can't change an instance's framework for now.
+ if is_node_changed(config, base_src + [f"instance-{num}", "mode"]):
+ rules[num]["recreate"] = True
+
+ # If the user changes the pool6, recreate the instance else Jool fails with:
+ # Jool error: Sorry; you can't change a NAT64 instance's pool6 for now.
+ if dict_search("source.prefix", rules[num]) and is_node_changed(
+ config,
+ base_src + [num, "source", "prefix"],
+ ):
+ rules[num]["recreate"] = True
+
+ return nat64
+
+
+def verify(nat64) -> None:
+ if not nat64:
+ # no need to verify the CLI as nat64 is going to be deactivated
+ return
+
+ if dict_search("source.rule", nat64):
+ # Ensure only 1 netfilter instance per namespace
+ nf_rules = filter(
+ lambda i: "deleted" not in i and i.get('mode') == "netfilter",
+ nat64["source"]["rule"].values(),
+ )
+ next(nf_rules, None) # Discard the first element
+ if next(nf_rules, None) is not None:
+ raise ConfigError(
+ "Jool permits only 1 NAT64 netfilter instance (per network namespace)"
+ )
+
+ for rule, instance in nat64["source"]["rule"].items():
+ if "deleted" in instance:
+ continue
+
+ # Verify that source.prefix is set and is a /96
+ if not dict_search("source.prefix", instance):
+ raise ConfigError(f"Source NAT64 rule {rule} missing source prefix")
+ if IPv6Network(instance["source"]["prefix"]).prefixlen != 96:
+ raise ConfigError(f"Source NAT64 rule {rule} source prefix must be /96")
+
+ pools = dict_search("translation.pool", instance)
+ if pools:
+ for num, pool in pools.items():
+ if "address" not in pool:
+ raise ConfigError(
+ f"Source NAT64 rule {rule} translation pool "
+ f"{num} missing address/prefix"
+ )
+ if "port" not in pool:
+ raise ConfigError(
+ f"Source NAT64 rule {rule} translation pool "
+ f"{num} missing port(-range)"
+ )
+
+
+def generate(nat64) -> None:
+ os.makedirs(JOOL_CONFIG_DIR, exist_ok=True)
+
+ if dict_search("source.rule", nat64):
+ for rule, instance in nat64["source"]["rule"].items():
+ if "deleted" in instance:
+ # Delete the unused instance file
+ os.unlink(os.path.join(JOOL_CONFIG_DIR, f"instance-{rule}.json"))
+ continue
+
+ name = f"instance-{rule}"
+ config = {
+ "instance": name,
+ "framework": "netfilter",
+ "global": {
+ "pool6": instance["source"]["prefix"],
+ "manually-enabled": "disable" not in instance,
+ },
+ # "bib": [],
+ }
+
+ if "description" in instance:
+ config["comment"] = instance["description"]
+
+ if dict_search("translation.pool", instance):
+ pool4 = []
+ # mark
+ mark = ''
+ if dict_search("match.mark", instance):
+ mark = instance["match"]["mark"]
+
+ for pool in instance["translation"]["pool"].values():
+ if "disable" in pool:
+ continue
+
+ protos = pool.get("protocol", {}).keys() or ("tcp", "udp", "icmp")
+ for proto in protos:
+ obj = {
+ "protocol": proto.upper(),
+ "prefix": pool["address"],
+ "port range": pool["port"],
+ }
+ if mark:
+ obj["mark"] = int(mark)
+ if "description" in pool:
+ obj["comment"] = pool["description"]
+
+ pool4.append(obj)
+
+ if pool4:
+ config["pool4"] = pool4
+
+ write_file(f'{JOOL_CONFIG_DIR}/{name}.json', json_write(config, indent=2))
+
+
+def apply(nat64) -> None:
+ if not nat64:
+ return
+
+ if dict_search("source.rule", nat64):
+ # Deletions first to avoid conflicts
+ for rule, instance in nat64["source"]["rule"].items():
+ if not any(k in instance for k in ("deleted", "recreate")):
+ continue
+
+ ret = run(f"jool instance remove instance-{rule}")
+ if ret != 0:
+ raise ConfigError(
+ f"Failed to remove nat64 source rule {rule} (jool instance instance-{rule})"
+ )
+
+ # Now creations
+ for rule, instance in nat64["source"]["rule"].items():
+ if "deleted" in instance:
+ continue
+
+ name = f"instance-{rule}"
+ ret = run(f"jool -i {name} file handle {JOOL_CONFIG_DIR}/{name}.json")
+ if ret != 0:
+ raise ConfigError(f"Failed to set jool instance {name}")
+
+
+if __name__ == "__main__":
+ try:
+ check_kmod(["jool"])
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py
index 4c12618bc..ed716b2a2 100755
--- a/src/conf_mode/nat66.py
+++ b/src/conf_mode/nat66.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2020-2021 VyOS maintainers and contributors
+# Copyright (C) 2020-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -35,7 +35,6 @@ airbag.enable()
k_mod = ['nft_nat', 'nft_chain_nat']
nftables_nat66_config = '/run/nftables_nat66.nft'
-ndppd_config = '/run/ndppd/ndppd.conf'
def get_handler(json, chain, target):
""" Get nftable rule handler number of given chain/target combination.
@@ -102,11 +101,13 @@ def verify(nat):
if dict_search('source.rule', nat):
for rule, config in dict_search('source.rule', nat).items():
err_msg = f'Source NAT66 configuration error in rule {rule}:'
- if 'outbound_interface' not in config:
- raise ConfigError(f'{err_msg} outbound-interface not specified')
- if config['outbound_interface'] not in interfaces():
- raise ConfigError(f'rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
+ if 'outbound_interface' in config:
+ if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
+ elif 'name' in config['outbound_interface']:
+ if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
+ Warning(f'NAT66 interface "{config["outbound_interface"]["name"]}" for source NAT66 rule "{rule}" does not exist!')
addr = dict_search('translation.address', config)
if addr != None:
@@ -125,12 +126,12 @@ def verify(nat):
for rule, config in dict_search('destination.rule', nat).items():
err_msg = f'Destination NAT66 configuration error in rule {rule}:'
- if 'inbound_interface' not in config:
- raise ConfigError(f'{err_msg}\n' \
- 'inbound-interface not specified')
- else:
- if config['inbound_interface'] not in 'any' and config['inbound_interface'] not in interfaces():
- Warning(f'rule "{rule}" interface "{config["inbound_interface"]}" does not exist on this system')
+ if 'inbound_interface' in config:
+ if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
+ raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
+ elif 'name' in config['inbound_interface']:
+ if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
+ Warning(f'NAT66 interface "{config["inbound_interface"]["name"]}" for destination NAT66 rule "{rule}" does not exist!')
return None
@@ -139,7 +140,6 @@ def generate(nat):
nat['first_install'] = True
render(nftables_nat66_config, 'firewall/nftables-nat66.j2', nat, permission=0o755)
- render(ndppd_config, 'ndppd/ndppd.conf.j2', nat, permission=0o755)
return None
def apply(nat):
@@ -148,13 +148,6 @@ def apply(nat):
cmd(f'nft -f {nftables_nat66_config}')
- if 'deleted' in nat or not dict_search('source.rule', nat):
- cmd('systemctl stop ndppd')
- if os.path.isfile(ndppd_config):
- os.unlink(ndppd_config)
- else:
- cmd('systemctl restart ndppd')
-
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/netns.py b/src/conf_mode/netns.py
index 95ab83dbc..7cee33bc6 100755
--- a/src/conf_mode/netns.py
+++ b/src/conf_mode/netns.py
@@ -77,8 +77,8 @@ def verify(netns):
if 'netns_remove' in netns:
for name, config in netns['netns_remove'].items():
if 'interface' in config:
- raise ConfigError(f'Can not remove NETNS "{name}", it still has '\
- f'member interfaces!')
+ raise ConfigError(f'Can not remove network namespace "{name}", it '\
+ f'still has member interfaces!')
if 'name' in netns:
for name, config in netns['name'].items():
@@ -87,7 +87,6 @@ def verify(netns):
return None
-
def generate(netns):
if not netns:
return None
diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py
index 34ba2fe69..4be40e99e 100755
--- a/src/conf_mode/pki.py
+++ b/src/conf_mode/pki.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2021 VyOS maintainers and contributors
+# Copyright (C) 2021-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -14,59 +14,66 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import os
+
+from sys import argv
from sys import exit
from vyos.config import Config
-from vyos.configdep import set_dependents, call_dependents
+from vyos.config import config_dict_merge
+from vyos.configdep import set_dependents
+from vyos.configdep import call_dependents
from vyos.configdict import node_changed
+from vyos.configdiff import Diff
+from vyos.defaults import directories
from vyos.pki import is_ca_certificate
from vyos.pki import load_certificate
from vyos.pki import load_public_key
from vyos.pki import load_private_key
from vyos.pki import load_crl
from vyos.pki import load_dh_parameters
+from vyos.utils.boot import boot_configuration_complete
+from vyos.utils.dict import dict_search
from vyos.utils.dict import dict_search_args
from vyos.utils.dict import dict_search_recursive
+from vyos.utils.process import call
+from vyos.utils.process import cmd
+from vyos.utils.process import is_systemd_service_active
from vyos import ConfigError
from vyos import airbag
airbag.enable()
-# keys to recursively search for under specified path, script to call if update required
+vyos_certbot_dir = directories['certbot']
+
+# keys to recursively search for under specified path
sync_search = [
{
'keys': ['certificate'],
'path': ['service', 'https'],
- 'script': '/usr/libexec/vyos/conf_mode/https.py'
},
{
'keys': ['certificate', 'ca_certificate'],
'path': ['interfaces', 'ethernet'],
- 'script': '/usr/libexec/vyos/conf_mode/interfaces-ethernet.py'
},
{
'keys': ['certificate', 'ca_certificate', 'dh_params', 'shared_secret_key', 'auth_key', 'crypt_key'],
'path': ['interfaces', 'openvpn'],
- 'script': '/usr/libexec/vyos/conf_mode/interfaces-openvpn.py'
},
{
'keys': ['ca_certificate'],
'path': ['interfaces', 'sstpc'],
- 'script': '/usr/libexec/vyos/conf_mode/interfaces-sstpc.py'
},
{
'keys': ['certificate', 'ca_certificate', 'local_key', 'remote_key'],
'path': ['vpn', 'ipsec'],
- 'script': '/usr/libexec/vyos/conf_mode/vpn_ipsec.py'
},
{
'keys': ['certificate', 'ca_certificate'],
'path': ['vpn', 'openconnect'],
- 'script': '/usr/libexec/vyos/conf_mode/vpn_openconnect.py'
},
{
'keys': ['certificate', 'ca_certificate'],
'path': ['vpn', 'sstp'],
- 'script': '/usr/libexec/vyos/conf_mode/vpn_sstp.py'
}
]
@@ -82,6 +89,33 @@ sync_translate = {
'crypt_key': 'openvpn'
}
+def certbot_delete(certificate):
+ if not boot_configuration_complete():
+ return
+ if os.path.exists(f'{vyos_certbot_dir}/renewal/{certificate}.conf'):
+ cmd(f'certbot delete --non-interactive --config-dir {vyos_certbot_dir} --cert-name {certificate}')
+
+def certbot_request(name: str, config: dict, dry_run: bool=True):
+ # We do not call certbot when booting the system - there is no need to do so and
+ # request new certificates during boot/image upgrade as the certbot configuration
+ # is stored persistent under /config - thus we do not open the door to transient
+ # errors
+ if not boot_configuration_complete():
+ return
+
+ domains = '--domains ' + ' --domains '.join(config['domain_name'])
+ tmp = f'certbot certonly --non-interactive --config-dir {vyos_certbot_dir} --cert-name {name} '\
+ f'--standalone --agree-tos --no-eff-email --expand --server {config["url"]} '\
+ f'--email {config["email"]} --key-type rsa --rsa-key-size {config["rsa_key_size"]} '\
+ f'{domains}'
+ if 'listen_address' in config:
+ tmp += f' --http-01-address {config["listen_address"]}'
+ # verify() does not need to actually request a cert but only test for plausability
+ if dry_run:
+ tmp += ' --dry-run'
+
+ cmd(tmp, raising=ConfigError, message=f'ACME certbot request failed for "{name}"!')
+
def get_config(config=None):
if config:
conf = config
@@ -93,25 +127,60 @@ def get_config(config=None):
get_first_key=True,
no_tag_node_value_mangle=True)
- pki['changed'] = {}
- tmp = node_changed(conf, base + ['ca'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'ca' : tmp})
+ if len(argv) > 1 and argv[1] == 'certbot_renew':
+ pki['certbot_renew'] = {}
- tmp = node_changed(conf, base + ['certificate'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'certificate' : tmp})
+ tmp = node_changed(conf, base + ['ca'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'ca' : tmp})
- tmp = node_changed(conf, base + ['dh'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'dh' : tmp})
+ tmp = node_changed(conf, base + ['certificate'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'certificate' : tmp})
- tmp = node_changed(conf, base + ['key-pair'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'key_pair' : tmp})
+ tmp = node_changed(conf, base + ['dh'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'dh' : tmp})
- tmp = node_changed(conf, base + ['openvpn', 'shared-secret'], key_mangling=('-', '_'), recursive=True)
- if tmp: pki['changed'].update({'openvpn' : tmp})
+ tmp = node_changed(conf, base + ['key-pair'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'key_pair' : tmp})
+
+ tmp = node_changed(conf, base + ['openvpn', 'shared-secret'], recursive=True)
+ if tmp:
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'openvpn' : tmp})
# We only merge on the defaults of there is a configuration at all
if conf.exists(base):
- pki = conf.merge_defaults(pki, recursive=True)
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**pki.kwargs, recursive=True)
+ # remove ACME default configuration if unused by CLI
+ if 'certificate' in pki:
+ for name, cert_config in pki['certificate'].items():
+ if 'acme' not in cert_config:
+ # Remove ACME default values
+ del default_values['certificate'][name]['acme']
+
+ # merge CLI and default dictionary
+ pki = config_dict_merge(default_values, pki)
+
+ # Certbot triggered an external renew of the certificates.
+ # Mark all ACME based certificates as "changed" to trigger
+ # update of dependent services
+ if 'certificate' in pki and 'certbot_renew' in pki:
+ renew = []
+ for name, cert_config in pki['certificate'].items():
+ if 'acme' in cert_config:
+ renew.append(name)
+ # If triggered externally by certbot, certificate key is not present in changed
+ if 'changed' not in pki: pki.update({'changed':{}})
+ pki['changed'].update({'certificate' : renew})
# We need to get the entire system configuration to verify that we are not
# deleting a certificate that is still referenced somewhere!
@@ -119,38 +188,34 @@ def get_config(config=None):
get_first_key=True,
no_tag_node_value_mangle=True)
- if 'changed' in pki:
- for search in sync_search:
- for key in search['keys']:
- changed_key = sync_translate[key]
-
- if changed_key not in pki['changed']:
- continue
-
- for item_name in pki['changed'][changed_key]:
- node_present = False
- if changed_key == 'openvpn':
- node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name)
- else:
- node_present = dict_search_args(pki, changed_key, item_name)
-
- if node_present:
- search_dict = dict_search_args(pki['system'], *search['path'])
-
- if not search_dict:
- continue
-
- for found_name, found_path in dict_search_recursive(search_dict, key):
- if found_name == item_name:
- path = search['path']
- path_str = ' '.join(path + found_path)
- print(f'pki: Updating config: {path_str} {found_name}')
-
- if path[0] == 'interfaces':
- ifname = found_path[0]
- set_dependents(path[1], conf, ifname)
- else:
- set_dependents(path[1], conf)
+ for search in sync_search:
+ for key in search['keys']:
+ changed_key = sync_translate[key]
+ if 'changed' not in pki or changed_key not in pki['changed']:
+ continue
+
+ for item_name in pki['changed'][changed_key]:
+ node_present = False
+ if changed_key == 'openvpn':
+ node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name)
+ else:
+ node_present = dict_search_args(pki, changed_key, item_name)
+
+ if node_present:
+ search_dict = dict_search_args(pki['system'], *search['path'])
+ if not search_dict:
+ continue
+ for found_name, found_path in dict_search_recursive(search_dict, key):
+ if found_name == item_name:
+ path = search['path']
+ path_str = ' '.join(path + found_path)
+ print(f'PKI: Updating config: {path_str} {found_name}')
+
+ if path[0] == 'interfaces':
+ ifname = found_path[0]
+ set_dependents(path[1], conf, ifname)
+ else:
+ set_dependents(path[1], conf)
return pki
@@ -223,6 +288,22 @@ def verify(pki):
if not is_valid_private_key(private['key'], protected):
raise ConfigError(f'Invalid private key on certificate "{name}"')
+ if 'acme' in cert_conf:
+ if 'domain_name' not in cert_conf['acme']:
+ raise ConfigError(f'At least one domain-name is required to request '\
+ f'certificate for "{name}" via ACME!')
+
+ if 'email' not in cert_conf['acme']:
+ raise ConfigError(f'An email address is required to request '\
+ f'certificate for "{name}" via ACME!')
+
+ if 'certbot_renew' not in pki:
+ # Only run the ACME command if something on this entity changed,
+ # as this is time intensive
+ tmp = dict_search('changed.certificate', pki)
+ if tmp != None and name in tmp:
+ certbot_request(name, cert_conf['acme'])
+
if 'dh' in pki:
for name, dh_conf in pki['dh'].items():
if 'parameters' in dh_conf:
@@ -283,12 +364,58 @@ def generate(pki):
if not pki:
return None
+ # Certbot renewal only needs to re-trigger the services to load up the
+ # new PEM file
+ if 'certbot_renew' in pki:
+ return None
+
+ certbot_list = []
+ certbot_list_on_disk = []
+ if os.path.exists(f'{vyos_certbot_dir}/live'):
+ certbot_list_on_disk = [f.path.split('/')[-1] for f in os.scandir(f'{vyos_certbot_dir}/live') if f.is_dir()]
+
+ if 'certificate' in pki:
+ changed_certificates = dict_search('changed.certificate', pki)
+ for name, cert_conf in pki['certificate'].items():
+ if 'acme' in cert_conf:
+ certbot_list.append(name)
+ # generate certificate if not found on disk
+ if name not in certbot_list_on_disk:
+ certbot_request(name, cert_conf['acme'], dry_run=False)
+ elif changed_certificates != None and name in changed_certificates:
+ # when something for the certificate changed, we should delete it
+ if name in certbot_list_on_disk:
+ certbot_delete(name)
+ certbot_request(name, cert_conf['acme'], dry_run=False)
+
+ # Cleanup certbot configuration and certificates if no longer in use by CLI
+ # Get foldernames under vyos_certbot_dir which each represent a certbot cert
+ if os.path.exists(f'{vyos_certbot_dir}/live'):
+ for cert in certbot_list_on_disk:
+ if cert not in certbot_list:
+ # certificate is no longer active on the CLI - remove it
+ certbot_delete(cert)
+
return None
def apply(pki):
+ systemd_certbot_name = 'certbot.timer'
if not pki:
+ call(f'systemctl stop {systemd_certbot_name}')
return None
+ has_certbot = False
+ if 'certificate' in pki:
+ for name, cert_conf in pki['certificate'].items():
+ if 'acme' in cert_conf:
+ has_certbot = True
+ break
+
+ if not has_certbot:
+ call(f'systemctl stop {systemd_certbot_name}')
+ elif has_certbot and not is_systemd_service_active(systemd_certbot_name):
+ call(f'systemctl restart {systemd_certbot_name}')
+
if 'changed' in pki:
call_dependents()
diff --git a/src/conf_mode/policy-local-route.py b/src/conf_mode/policy-local-route.py
deleted file mode 100755
index 79526f82a..000000000
--- a/src/conf_mode/policy-local-route.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2020-2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from sys import exit
-
-from netifaces import interfaces
-from vyos.config import Config
-from vyos.configdict import dict_merge
-from vyos.configdict import node_changed
-from vyos.configdict import leaf_node_changed
-from vyos.template import render
-from vyos.utils.process import call
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-
-def get_config(config=None):
-
- if config:
- conf = config
- else:
- conf = Config()
- base = ['policy']
-
- pbr = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
-
- for route in ['local_route', 'local_route6']:
- dict_id = 'rule_remove' if route == 'local_route' else 'rule6_remove'
- route_key = 'local-route' if route == 'local_route' else 'local-route6'
- base_rule = base + [route_key, 'rule']
-
- # delete policy local-route
- dict = {}
- tmp = node_changed(conf, base_rule, key_mangling=('-', '_'))
- if tmp:
- for rule in (tmp or []):
- src = leaf_node_changed(conf, base_rule + [rule, 'source'])
- fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
- iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
- dst = leaf_node_changed(conf, base_rule + [rule, 'destination'])
- rule_def = {}
- if src:
- rule_def = dict_merge({'source' : src}, rule_def)
- if fwmk:
- rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
- if iif:
- rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
- if dst:
- rule_def = dict_merge({'destination' : dst}, rule_def)
- dict = dict_merge({dict_id : {rule : rule_def}}, dict)
- pbr.update(dict)
-
- if not route in pbr:
- continue
-
- # delete policy local-route rule x source x.x.x.x
- # delete policy local-route rule x fwmark x
- # delete policy local-route rule x destination x.x.x.x
- if 'rule' in pbr[route]:
- for rule, rule_config in pbr[route]['rule'].items():
- src = leaf_node_changed(conf, base_rule + [rule, 'source'])
- fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
- iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
- dst = leaf_node_changed(conf, base_rule + [rule, 'destination'])
- # keep track of changes in configuration
- # otherwise we might remove an existing node although nothing else has changed
- changed = False
-
- rule_def = {}
- # src is None if there are no changes to src
- if src is None:
- # if src hasn't changed, include it in the removal selector
- # if a new selector is added, we have to remove all previous rules without this selector
- # to make sure we remove all previous rules with this source(s), it will be included
- if 'source' in rule_config:
- rule_def = dict_merge({'source': rule_config['source']}, rule_def)
- else:
- # if src is not None, it's previous content will be returned
- # this can be an empty array if it's just being set, or the previous value
- # either way, something has to be changed and we only want to remove previous values
- changed = True
- # set the old value for removal if it's not empty
- if len(src) > 0:
- rule_def = dict_merge({'source' : src}, rule_def)
- if fwmk is None:
- if 'fwmark' in rule_config:
- rule_def = dict_merge({'fwmark': rule_config['fwmark']}, rule_def)
- else:
- changed = True
- if len(fwmk) > 0:
- rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
- if iif is None:
- if 'inbound_interface' in rule_config:
- rule_def = dict_merge({'inbound_interface': rule_config['inbound_interface']}, rule_def)
- else:
- changed = True
- if len(iif) > 0:
- rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
- if dst is None:
- if 'destination' in rule_config:
- rule_def = dict_merge({'destination': rule_config['destination']}, rule_def)
- else:
- changed = True
- if len(dst) > 0:
- rule_def = dict_merge({'destination' : dst}, rule_def)
- if changed:
- dict = dict_merge({dict_id : {rule : rule_def}}, dict)
- pbr.update(dict)
-
- return pbr
-
-def verify(pbr):
- # bail out early - looks like removal from running config
- if not pbr:
- return None
-
- for route in ['local_route', 'local_route6']:
- if not route in pbr:
- continue
-
- pbr_route = pbr[route]
- if 'rule' in pbr_route:
- for rule in pbr_route['rule']:
- if 'source' not in pbr_route['rule'][rule] \
- and 'destination' not in pbr_route['rule'][rule] \
- and 'fwmark' not in pbr_route['rule'][rule] \
- and 'inbound_interface' not in pbr_route['rule'][rule]:
- raise ConfigError('Source or destination address or fwmark or inbound-interface is required!')
- else:
- if 'set' not in pbr_route['rule'][rule] or 'table' not in pbr_route['rule'][rule]['set']:
- raise ConfigError('Table set is required!')
- if 'inbound_interface' in pbr_route['rule'][rule]:
- interface = pbr_route['rule'][rule]['inbound_interface']
- if interface not in interfaces():
- raise ConfigError(f'Interface "{interface}" does not exist')
-
- return None
-
-def generate(pbr):
- if not pbr:
- return None
-
- return None
-
-def apply(pbr):
- if not pbr:
- return None
-
- # Delete old rule if needed
- for rule_rm in ['rule_remove', 'rule6_remove']:
- if rule_rm in pbr:
- v6 = " -6" if rule_rm == 'rule6_remove' else ""
- for rule, rule_config in pbr[rule_rm].items():
- rule_config['source'] = rule_config['source'] if 'source' in rule_config else ['']
- for src in rule_config['source']:
- f_src = '' if src == '' else f' from {src} '
- rule_config['destination'] = rule_config['destination'] if 'destination' in rule_config else ['']
- for dst in rule_config['destination']:
- f_dst = '' if dst == '' else f' to {dst} '
- rule_config['fwmark'] = rule_config['fwmark'] if 'fwmark' in rule_config else ['']
- for fwmk in rule_config['fwmark']:
- f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} '
- rule_config['inbound_interface'] = rule_config['inbound_interface'] if 'inbound_interface' in rule_config else ['']
- for iif in rule_config['inbound_interface']:
- f_iif = '' if iif == '' else f' iif {iif} '
- call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_fwmk}{f_iif}')
-
- # Generate new config
- for route in ['local_route', 'local_route6']:
- if not route in pbr:
- continue
-
- v6 = " -6" if route == 'local_route6' else ""
-
- pbr_route = pbr[route]
- if 'rule' in pbr_route:
- for rule, rule_config in pbr_route['rule'].items():
- table = rule_config['set']['table']
-
- rule_config['source'] = rule_config['source'] if 'source' in rule_config else ['all']
- for src in rule_config['source'] or ['all']:
- f_src = '' if src == '' else f' from {src} '
- rule_config['destination'] = rule_config['destination'] if 'destination' in rule_config else ['all']
- for dst in rule_config['destination']:
- f_dst = '' if dst == '' else f' to {dst} '
- f_fwmk = ''
- if 'fwmark' in rule_config:
- fwmk = rule_config['fwmark']
- f_fwmk = f' fwmark {fwmk} '
- f_iif = ''
- if 'inbound_interface' in rule_config:
- iif = rule_config['inbound_interface']
- f_iif = f' iif {iif} '
- call(f'ip{v6} rule add prio {rule} {f_src}{f_dst}{f_fwmk}{f_iif} lookup {table}')
-
- return None
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/policy_local-route.py b/src/conf_mode/policy_local-route.py
new file mode 100755
index 000000000..91e4fce2c
--- /dev/null
+++ b/src/conf_mode/policy_local-route.py
@@ -0,0 +1,315 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2020-2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from itertools import product
+from sys import exit
+
+from netifaces import interfaces
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.configdict import node_changed
+from vyos.configdict import leaf_node_changed
+from vyos.template import render
+from vyos.utils.process import call
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+
+def get_config(config=None):
+
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['policy']
+
+ pbr = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ for route in ['local_route', 'local_route6']:
+ dict_id = 'rule_remove' if route == 'local_route' else 'rule6_remove'
+ route_key = 'local-route' if route == 'local_route' else 'local-route6'
+ base_rule = base + [route_key, 'rule']
+
+ # delete policy local-route
+ dict = {}
+ tmp = node_changed(conf, base_rule, key_mangling=('-', '_'))
+ if tmp:
+ for rule in (tmp or []):
+ src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address'])
+ src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port'])
+ fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
+ iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
+ dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address'])
+ dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port'])
+ table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table'])
+ proto = leaf_node_changed(conf, base_rule + [rule, 'protocol'])
+ rule_def = {}
+ if src:
+ rule_def = dict_merge({'source': {'address': src}}, rule_def)
+ if src_port:
+ rule_def = dict_merge({'source': {'port': src_port}}, rule_def)
+ if fwmk:
+ rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
+ if iif:
+ rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
+ if dst:
+ rule_def = dict_merge({'destination': {'address': dst}}, rule_def)
+ if dst_port:
+ rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def)
+ if table:
+ rule_def = dict_merge({'table' : table}, rule_def)
+ if proto:
+ rule_def = dict_merge({'protocol' : proto}, rule_def)
+ dict = dict_merge({dict_id : {rule : rule_def}}, dict)
+ pbr.update(dict)
+
+ if not route in pbr:
+ continue
+
+ # delete policy local-route rule x source x.x.x.x
+ # delete policy local-route rule x fwmark x
+ # delete policy local-route rule x destination x.x.x.x
+ if 'rule' in pbr[route]:
+ for rule, rule_config in pbr[route]['rule'].items():
+ src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address'])
+ src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port'])
+ fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
+ iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
+ dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address'])
+ dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port'])
+ table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table'])
+ proto = leaf_node_changed(conf, base_rule + [rule, 'protocol'])
+ # keep track of changes in configuration
+ # otherwise we might remove an existing node although nothing else has changed
+ changed = False
+
+ rule_def = {}
+ # src is None if there are no changes to src
+ if src is None:
+ # if src hasn't changed, include it in the removal selector
+ # if a new selector is added, we have to remove all previous rules without this selector
+ # to make sure we remove all previous rules with this source(s), it will be included
+ if 'source' in rule_config:
+ if 'address' in rule_config['source']:
+ rule_def = dict_merge({'source': {'address': rule_config['source']['address']}}, rule_def)
+ else:
+ # if src is not None, it's previous content will be returned
+ # this can be an empty array if it's just being set, or the previous value
+ # either way, something has to be changed and we only want to remove previous values
+ changed = True
+ # set the old value for removal if it's not empty
+ if len(src) > 0:
+ rule_def = dict_merge({'source': {'address': src}}, rule_def)
+
+ # source port
+ if src_port is None:
+ if 'source' in rule_config:
+ if 'port' in rule_config['source']:
+ tmp = rule_config['source']['port']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'source': {'port': tmp}}, rule_def)
+ else:
+ changed = True
+ if len(src_port) > 0:
+ rule_def = dict_merge({'source': {'port': src_port}}, rule_def)
+
+ # fwmark
+ if fwmk is None:
+ if 'fwmark' in rule_config:
+ tmp = rule_config['fwmark']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'fwmark': tmp}, rule_def)
+ else:
+ changed = True
+ if len(fwmk) > 0:
+ rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
+
+ # inbound-interface
+ if iif is None:
+ if 'inbound_interface' in rule_config:
+ rule_def = dict_merge({'inbound_interface': rule_config['inbound_interface']}, rule_def)
+ else:
+ changed = True
+ if len(iif) > 0:
+ rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
+
+ # destination address
+ if dst is None:
+ if 'destination' in rule_config:
+ if 'address' in rule_config['destination']:
+ rule_def = dict_merge({'destination': {'address': rule_config['destination']['address']}}, rule_def)
+ else:
+ changed = True
+ if len(dst) > 0:
+ rule_def = dict_merge({'destination': {'address': dst}}, rule_def)
+
+ # destination port
+ if dst_port is None:
+ if 'destination' in rule_config:
+ if 'port' in rule_config['destination']:
+ tmp = rule_config['destination']['port']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'destination': {'port': tmp}}, rule_def)
+ else:
+ changed = True
+ if len(dst_port) > 0:
+ rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def)
+
+ # table
+ if table is None:
+ if 'set' in rule_config and 'table' in rule_config['set']:
+ rule_def = dict_merge({'table': [rule_config['set']['table']]}, rule_def)
+ else:
+ changed = True
+ if len(table) > 0:
+ rule_def = dict_merge({'table' : table}, rule_def)
+
+ # protocol
+ if proto is None:
+ if 'protocol' in rule_config:
+ tmp = rule_config['protocol']
+ if isinstance(tmp, str):
+ tmp = [tmp]
+ rule_def = dict_merge({'protocol': tmp}, rule_def)
+ else:
+ changed = True
+ if len(proto) > 0:
+ rule_def = dict_merge({'protocol' : proto}, rule_def)
+
+ if changed:
+ dict = dict_merge({dict_id : {rule : rule_def}}, dict)
+ pbr.update(dict)
+
+ return pbr
+
+def verify(pbr):
+ # bail out early - looks like removal from running config
+ if not pbr:
+ return None
+
+ for route in ['local_route', 'local_route6']:
+ if not route in pbr:
+ continue
+
+ pbr_route = pbr[route]
+ if 'rule' in pbr_route:
+ for rule in pbr_route['rule']:
+ if (
+ 'source' not in pbr_route['rule'][rule] and
+ 'destination' not in pbr_route['rule'][rule] and
+ 'fwmark' not in pbr_route['rule'][rule] and
+ 'inbound_interface' not in pbr_route['rule'][rule] and
+ 'protocol' not in pbr_route['rule'][rule]
+ ):
+ raise ConfigError('Source or destination address or fwmark or inbound-interface or protocol is required!')
+
+ if 'set' not in pbr_route['rule'][rule] or 'table' not in pbr_route['rule'][rule]['set']:
+ raise ConfigError('Table set is required!')
+
+ if 'inbound_interface' in pbr_route['rule'][rule]:
+ interface = pbr_route['rule'][rule]['inbound_interface']
+ if interface not in interfaces():
+ raise ConfigError(f'Interface "{interface}" does not exist')
+
+ return None
+
+def generate(pbr):
+ if not pbr:
+ return None
+
+ return None
+
+def apply(pbr):
+ if not pbr:
+ return None
+
+ # Delete old rule if needed
+ for rule_rm in ['rule_remove', 'rule6_remove']:
+ if rule_rm in pbr:
+ v6 = " -6" if rule_rm == 'rule6_remove' else ""
+
+ for rule, rule_config in pbr[rule_rm].items():
+ source = rule_config.get('source', {}).get('address', [''])
+ source_port = rule_config.get('source', {}).get('port', [''])
+ destination = rule_config.get('destination', {}).get('address', [''])
+ destination_port = rule_config.get('destination', {}).get('port', [''])
+ fwmark = rule_config.get('fwmark', [''])
+ inbound_interface = rule_config.get('inbound_interface', [''])
+ protocol = rule_config.get('protocol', [''])
+ table = rule_config.get('table', [''])
+
+ for src, dst, src_port, dst_port, fwmk, iif, proto, table in product(
+ source, destination, source_port, destination_port,
+ fwmark, inbound_interface, protocol, table):
+ f_src = '' if src == '' else f' from {src} '
+ f_src_port = '' if src_port == '' else f' sport {src_port} '
+ f_dst = '' if dst == '' else f' to {dst} '
+ f_dst_port = '' if dst_port == '' else f' dport {dst_port} '
+ f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} '
+ f_iif = '' if iif == '' else f' iif {iif} '
+ f_proto = '' if proto == '' else f' ipproto {proto} '
+ f_table = '' if table == '' else f' lookup {table} '
+
+ call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif}{f_table}')
+
+ # Generate new config
+ for route in ['local_route', 'local_route6']:
+ if not route in pbr:
+ continue
+
+ v6 = " -6" if route == 'local_route6' else ""
+ pbr_route = pbr[route]
+
+ if 'rule' in pbr_route:
+ for rule, rule_config in pbr_route['rule'].items():
+ table = rule_config['set'].get('table', '')
+ source = rule_config.get('source', {}).get('address', ['all'])
+ source_port = rule_config.get('source', {}).get('port', '')
+ destination = rule_config.get('destination', {}).get('address', ['all'])
+ destination_port = rule_config.get('destination', {}).get('port', '')
+ fwmark = rule_config.get('fwmark', '')
+ inbound_interface = rule_config.get('inbound_interface', '')
+ protocol = rule_config.get('protocol', '')
+
+ for src in source:
+ f_src = f' from {src} ' if src else ''
+ for dst in destination:
+ f_dst = f' to {dst} ' if dst else ''
+ f_src_port = f' sport {source_port} ' if source_port else ''
+ f_dst_port = f' dport {destination_port} ' if destination_port else ''
+ f_fwmk = f' fwmark {fwmark} ' if fwmark else ''
+ f_iif = f' iif {inbound_interface} ' if inbound_interface else ''
+ f_proto = f' ipproto {protocol} ' if protocol else ''
+
+ call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/policy-route.py b/src/conf_mode/policy_route.py
index adad012de..adad012de 100755
--- a/src/conf_mode/policy-route.py
+++ b/src/conf_mode/policy_route.py
diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py
index 00015023c..bf807fa5f 100755
--- a/src/conf_mode/protocols_bgp.py
+++ b/src/conf_mode/protocols_bgp.py
@@ -30,6 +30,7 @@ from vyos.template import render_to_string
from vyos.utils.dict import dict_search
from vyos.utils.network import get_interface_vrf
from vyos.utils.network import is_addr_assigned
+from vyos.utils.process import process_named_running
from vyos import ConfigError
from vyos import frr
from vyos import airbag
@@ -49,8 +50,13 @@ def get_config(config=None):
# eqivalent of the C foo ? 'a' : 'b' statement
base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path
- bgp = conf.get_config_dict(base, key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
+ bgp = conf.get_config_dict(
+ base,
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True,
+ )
bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'],
key_mangling=('-', '_'),
@@ -93,6 +99,7 @@ def get_config(config=None):
tmp = conf.get_config_dict(['policy'])
# Merge policy dict into "regular" config dict
bgp = dict_merge(tmp, bgp)
+
return bgp
@@ -246,6 +253,19 @@ def verify(bgp):
if 'system_as' not in bgp:
raise ConfigError('BGP system-as number must be defined!')
+ # Verify BMP
+ if 'bmp' in bgp:
+ # check bmp flag "bgpd -d -F traditional --daemon -A 127.0.0.1 -M rpki -M bmp"
+ if not process_named_running('bgpd', 'bmp'):
+ raise ConfigError(
+ f'"bmp" flag is not found in bgpd. Configure "set system frr bmp" and restart bgp process'
+ )
+ # check bmp target
+ if 'target' in bgp['bmp']:
+ for target, target_config in bgp['bmp']['target'].items():
+ if 'address' not in target_config:
+ raise ConfigError(f'BMP target "{target}" address must be defined!')
+
# Verify vrf on interface and bgp section
if 'interface' in bgp:
for interface in bgp['interface']:
diff --git a/src/conf_mode/igmp_proxy.py b/src/conf_mode/protocols_igmp-proxy.py
index 40db417dd..40db417dd 100755
--- a/src/conf_mode/igmp_proxy.py
+++ b/src/conf_mode/protocols_igmp-proxy.py
diff --git a/src/conf_mode/protocols_igmp.py b/src/conf_mode/protocols_igmp.py
deleted file mode 100755
index 435189025..000000000
--- a/src/conf_mode/protocols_igmp.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2020-2023 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ipaddress import IPv4Address
-from sys import exit
-
-from vyos import ConfigError
-from vyos.config import Config
-from vyos.utils.process import process_named_running
-from vyos.utils.process import call
-from vyos.template import render
-from signal import SIGTERM
-
-from vyos import airbag
-airbag.enable()
-
-# Required to use the full path to pimd, in another case daemon will not be started
-pimd_cmd = f'/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1'
-
-config_file = r'/tmp/igmp.frr'
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
- igmp_conf = {
- 'igmp_conf' : False,
- 'pim_conf' : False,
- 'igmp_proxy_conf' : False,
- 'old_ifaces' : {},
- 'ifaces' : {}
- }
- if not (conf.exists('protocols igmp') or conf.exists_effective('protocols igmp')):
- return None
-
- if conf.exists('protocols igmp-proxy'):
- igmp_conf['igmp_proxy_conf'] = True
-
- if conf.exists('protocols pim'):
- igmp_conf['pim_conf'] = True
-
- if conf.exists('protocols igmp'):
- igmp_conf['igmp_conf'] = True
-
- conf.set_level('protocols igmp')
-
- # # Get interfaces
- for iface in conf.list_effective_nodes('interface'):
- igmp_conf['old_ifaces'].update({
- iface : {
- 'version' : conf.return_effective_value('interface {0} version'.format(iface)),
- 'query_interval' : conf.return_effective_value('interface {0} query-interval'.format(iface)),
- 'query_max_resp_time' : conf.return_effective_value('interface {0} query-max-response-time'.format(iface)),
- 'gr_join' : {}
- }
- })
- for gr_join in conf.list_effective_nodes('interface {0} join'.format(iface)):
- igmp_conf['old_ifaces'][iface]['gr_join'][gr_join] = conf.return_effective_values('interface {0} join {1} source'.format(iface, gr_join))
-
- for iface in conf.list_nodes('interface'):
- igmp_conf['ifaces'].update({
- iface : {
- 'version' : conf.return_value('interface {0} version'.format(iface)),
- 'query_interval' : conf.return_value('interface {0} query-interval'.format(iface)),
- 'query_max_resp_time' : conf.return_value('interface {0} query-max-response-time'.format(iface)),
- 'gr_join' : {}
- }
- })
- for gr_join in conf.list_nodes('interface {0} join'.format(iface)):
- igmp_conf['ifaces'][iface]['gr_join'][gr_join] = conf.return_values('interface {0} join {1} source'.format(iface, gr_join))
-
- return igmp_conf
-
-def verify(igmp):
- if igmp is None:
- return None
-
- if igmp['igmp_conf']:
- # Check conflict with IGMP-Proxy
- if igmp['igmp_proxy_conf']:
- raise ConfigError(f"IGMP proxy and PIM cannot be both configured at the same time")
-
- # Check interfaces
- if not igmp['ifaces']:
- raise ConfigError(f"IGMP require defined interfaces!")
- # Check, is this multicast group
- for intfc in igmp['ifaces']:
- for gr_addr in igmp['ifaces'][intfc]['gr_join']:
- if not IPv4Address(gr_addr).is_multicast:
- raise ConfigError(gr_addr + " not a multicast group")
-
-def generate(igmp):
- if igmp is None:
- return None
-
- render(config_file, 'frr/igmp.frr.j2', igmp)
- return None
-
-def apply(igmp):
- if igmp is None:
- return None
-
- pim_pid = process_named_running('pimd')
- if igmp['igmp_conf'] or igmp['pim_conf']:
- if not pim_pid:
- call(pimd_cmd)
-
- if os.path.exists(config_file):
- call(f'vtysh -d pimd -f {config_file}')
- os.remove(config_file)
- elif pim_pid:
- os.kill(int(pim_pid), SIGTERM)
-
- return None
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py
index e00c58ee4..8d594bb68 100755
--- a/src/conf_mode/protocols_isis.py
+++ b/src/conf_mode/protocols_isis.py
@@ -48,7 +48,8 @@ def get_config(config=None):
# eqivalent of the C foo ? 'a' : 'b' statement
base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path
isis = conf.get_config_dict(base, key_mangling=('-', '_'),
- get_first_key=True)
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
# Assign the name of our VRF context. This MUST be done before the return
# statement below, else on deletion we will delete the default instance
@@ -219,6 +220,51 @@ def verify(isis):
if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']):
raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
f'and no-php-flag configured at the same time.')
+
+ # Check for index ranges being larger than the segment routing global block
+ if dict_search('segment_routing.global_block', isis):
+ g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis)
+ g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis)
+ g_label_difference = int(g_high_label_value) - int(g_low_label_value)
+ if dict_search('segment_routing.prefix', isis):
+ for prefix, prefix_config in isis['segment_routing']['prefix'].items():
+ if 'index' in prefix_config:
+ index_size = isis['segment_routing']['prefix'][prefix]['index']['value']
+ if int(index_size) > int(g_label_difference):
+ raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\
+ f'index base size larger than the SRGB label base.')
+
+ # Check for LFA tiebreaker index duplication
+ if dict_search('fast_reroute.lfa.local.tiebreaker', isis):
+ comparison_dictionary = {}
+ for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items():
+ for index, index_options in item_options.items():
+ for index_value, index_value_options in index_options.items():
+ if index_value not in comparison_dictionary.keys():
+ comparison_dictionary[index_value] = [item]
+ else:
+ comparison_dictionary[index_value].append(item)
+ for index, index_length in comparison_dictionary.items():
+ if int(len(index_length)) > 1:
+ raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.')
+
+ # Check for LFA priority-limit configured multiple times per level
+ if dict_search('fast_reroute.lfa.local.priority_limit', isis):
+ comparison_dictionary = {}
+ for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items():
+ for level, level_options in priority_options.items():
+ if level not in comparison_dictionary.keys():
+ comparison_dictionary[level] = [priority]
+ else:
+ comparison_dictionary[level].append(priority)
+ for level, level_length in comparison_dictionary.items():
+ if int(len(level_length)) > 1:
+ raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.')
+
+ # Check for LFA remote prefix list configured with more than one list
+ if dict_search('fast_reroute.lfa.remote.prefix_list', isis):
+ if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1:
+ raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.')
return None
@@ -265,4 +311,4 @@ if __name__ == '__main__':
apply(c)
except ConfigError as e:
print(e)
- exit(1)
+ exit(1) \ No newline at end of file
diff --git a/src/conf_mode/protocols_nhrp.py b/src/conf_mode/protocols_nhrp.py
index 5ec0bc9e5..c339c6391 100755
--- a/src/conf_mode/protocols_nhrp.py
+++ b/src/conf_mode/protocols_nhrp.py
@@ -37,7 +37,7 @@ def get_config(config=None):
nhrp = conf.get_config_dict(base, key_mangling=('-', '_'),
get_first_key=True, no_tag_node_value_mangle=True)
- nhrp['del_tunnels'] = node_changed(conf, base + ['tunnel'], key_mangling=('-', '_'))
+ nhrp['del_tunnels'] = node_changed(conf, base + ['tunnel'])
if not conf.exists(base):
return nhrp
diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py
index cddd3765e..198d78ee0 100755
--- a/src/conf_mode/protocols_ospf.py
+++ b/src/conf_mode/protocols_ospf.py
@@ -215,6 +215,19 @@ def verify(ospf):
raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
f'and no-php-flag configured at the same time.')
+ # Check for index ranges being larger than the segment routing global block
+ if dict_search('segment_routing.global_block', ospf):
+ g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf)
+ g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf)
+ g_label_difference = int(g_high_label_value) - int(g_low_label_value)
+ if dict_search('segment_routing.prefix', ospf):
+ for prefix, prefix_config in ospf['segment_routing']['prefix'].items():
+ if 'index' in prefix_config:
+ index_size = ospf['segment_routing']['prefix'][prefix]['index']['value']
+ if int(index_size) > int(g_label_difference):
+ raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\
+ f'index base size larger than the SRGB label base.')
+
# Check route summarisation
if 'summary_address' in ospf:
for prefix, prefix_options in ospf['summary_address'].items():
diff --git a/src/conf_mode/protocols_pim.py b/src/conf_mode/protocols_pim.py
index 0aaa0d2c6..09c3be8df 100755
--- a/src/conf_mode/protocols_pim.py
+++ b/src/conf_mode/protocols_pim.py
@@ -16,144 +16,139 @@
import os
-from ipaddress import IPv4Address
+from ipaddress import IPv4Network
+from signal import SIGTERM
from sys import exit
from vyos.config import Config
-from vyos import ConfigError
+from vyos.config import config_dict_merge
+from vyos.configdict import node_changed
+from vyos.configverify import verify_interface_exists
from vyos.utils.process import process_named_running
from vyos.utils.process import call
-from vyos.template import render
-from signal import SIGTERM
-
+from vyos.template import render_to_string
+from vyos import ConfigError
+from vyos import frr
from vyos import airbag
airbag.enable()
-# Required to use the full path to pimd, in another case daemon will not be started
-pimd_cmd = f'/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1'
-
-config_file = r'/tmp/pimd.frr'
-
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- pim_conf = {
- 'pim_conf' : False,
- 'igmp_conf' : False,
- 'igmp_proxy_conf' : False,
- 'old_pim' : {
- 'ifaces' : {},
- 'rp' : {}
- },
- 'pim' : {
- 'ifaces' : {},
- 'rp' : {}
- }
- }
- if not (conf.exists('protocols pim') or conf.exists_effective('protocols pim')):
- return None
-
- if conf.exists('protocols igmp-proxy'):
- pim_conf['igmp_proxy_conf'] = True
-
- if conf.exists('protocols igmp'):
- pim_conf['igmp_conf'] = True
-
- if conf.exists('protocols pim'):
- pim_conf['pim_conf'] = True
-
- conf.set_level('protocols pim')
-
- # Get interfaces
- for iface in conf.list_effective_nodes('interface'):
- pim_conf['old_pim']['ifaces'].update({
- iface : {
- 'hello' : conf.return_effective_value('interface {0} hello'.format(iface)),
- 'dr_prio' : conf.return_effective_value('interface {0} dr-priority'.format(iface))
- }
- })
- for iface in conf.list_nodes('interface'):
- pim_conf['pim']['ifaces'].update({
- iface : {
- 'hello' : conf.return_value('interface {0} hello'.format(iface)),
- 'dr_prio' : conf.return_value('interface {0} dr-priority'.format(iface)),
- }
- })
-
- conf.set_level('protocols pim rp')
-
- # Get RPs addresses
- for rp_addr in conf.list_effective_nodes('address'):
- pim_conf['old_pim']['rp'][rp_addr] = conf.return_effective_values('address {0} group'.format(rp_addr))
-
- for rp_addr in conf.list_nodes('address'):
- pim_conf['pim']['rp'][rp_addr] = conf.return_values('address {0} group'.format(rp_addr))
-
- # Get RP keep-alive-timer
- if conf.exists_effective('rp keep-alive-timer'):
- pim_conf['old_pim']['rp_keep_alive'] = conf.return_effective_value('rp keep-alive-timer')
- if conf.exists('rp keep-alive-timer'):
- pim_conf['pim']['rp_keep_alive'] = conf.return_value('rp keep-alive-timer')
-
- return pim_conf
+ base = ['protocols', 'pim']
+
+ pim = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, no_tag_node_value_mangle=True)
+
+ # We can not run both IGMP proxy and PIM at the same time - get IGMP
+ # proxy status
+ if conf.exists(['protocols', 'igmp-proxy']):
+ pim.update({'igmp_proxy_enabled' : {}})
+
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ pim['interface_removed'] = list(interfaces_removed)
+
+ # Bail out early if configuration tree does no longer exist. this must
+ # be done after retrieving the list of interfaces to be removed.
+ if not conf.exists(base):
+ pim.update({'deleted' : ''})
+ return pim
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**pim.kwargs, recursive=True)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: default-information
+ # originate comes with a default metric-type of 2, which will enable the
+ # entire default-information originate tree, even when not set via CLI so we
+ # need to check this first and probably drop that key.
+ for interface in pim.get('interface', []):
+ # We need to reload the defaults on every pass b/c of
+ # hello-multiplier dependency on dead-interval
+ # If hello-multiplier is set, we need to remove the default from
+ # dead-interval.
+ if 'igmp' not in pim['interface'][interface]:
+ del default_values['interface'][interface]['igmp']
+
+ pim = config_dict_merge(default_values, pim)
+ return pim
def verify(pim):
- if pim is None:
+ if not pim or 'deleted' in pim:
return None
- if pim['pim_conf']:
- # Check conflict with IGMP-Proxy
- if pim['igmp_proxy_conf']:
- raise ConfigError(f"IGMP proxy and PIM cannot be both configured at the same time")
-
- # Check interfaces
- if not pim['pim']['ifaces']:
- raise ConfigError(f"PIM require defined interfaces!")
+ if 'igmp_proxy_enabled' in pim:
+ raise ConfigError('IGMP proxy and PIM cannot be configured at the same time!')
- if not pim['pim']['rp']:
- raise ConfigError(f"RP address required")
+ if 'interface' not in pim:
+ raise ConfigError('PIM require defined interfaces!')
- # Check unique multicast groups
- uniq_groups = []
- for rp_addr in pim['pim']['rp']:
- if not pim['pim']['rp'][rp_addr]:
- raise ConfigError(f"Group should be specified for RP " + rp_addr)
- for group in pim['pim']['rp'][rp_addr]:
- if (group in uniq_groups):
- raise ConfigError(f"Group range " + group + " specified cannot exact match another")
+ for interface in pim['interface']:
+ verify_interface_exists(interface)
- # Check, is this multicast group
- gr_addr = group.split('/')
- if IPv4Address(gr_addr[0]) < IPv4Address('224.0.0.0'):
- raise ConfigError(group + " not a multicast group")
+ if 'rp' in pim:
+ if 'address' not in pim['rp']:
+ raise ConfigError('PIM rendezvous point needs to be defined!')
- uniq_groups.extend(pim['pim']['rp'][rp_addr])
+ # Check unique multicast groups
+ unique = []
+ pim_base_error = 'PIM rendezvous point group'
+ for address, address_config in pim['rp']['address'].items():
+ if 'group' not in address_config:
+ raise ConfigError(f'{pim_base_error} should be defined for "{address}"!')
+
+ # Check if it is a multicast group
+ for gr_addr in address_config['group']:
+ if not IPv4Network(gr_addr).is_multicast:
+ raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!')
+ if gr_addr in unique:
+ raise ConfigError(f'{pim_base_error} must be unique!')
+ unique.append(gr_addr)
def generate(pim):
- if pim is None:
+ if not pim or 'deleted' in pim:
return None
-
- render(config_file, 'frr/pimd.frr.j2', pim)
+ pim['frr_pimd_config'] = render_to_string('frr/pimd.frr.j2', pim)
return None
def apply(pim):
- if pim is None:
+ pim_daemon = 'pimd'
+ pim_pid = process_named_running(pim_daemon)
+
+ if not pim or 'deleted' in pim:
+ if 'deleted' in pim:
+ os.kill(int(pim_pid), SIGTERM)
+
return None
- pim_pid = process_named_running('pimd')
- if pim['igmp_conf'] or pim['pim_conf']:
- if not pim_pid:
- call(pimd_cmd)
+ if not pim_pid:
+ call('/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1')
+
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+
+ frr_cfg.load_configuration(pim_daemon)
+ frr_cfg.modify_section(f'^ip pim')
+ frr_cfg.modify_section(f'^ip igmp')
- if os.path.exists(config_file):
- call("vtysh -d pimd -f " + config_file)
- os.remove(config_file)
- elif pim_pid:
- os.kill(int(pim_pid), SIGTERM)
+ for key in ['interface', 'interface_removed']:
+ if key not in pim:
+ continue
+ for interface in pim[key]:
+ frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
+ if 'frr_pimd_config' in pim:
+ frr_cfg.add_before(frr.default_add_before, pim['frr_pimd_config'])
+ frr_cfg.commit_configuration(pim_daemon)
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/protocols_pim6.py b/src/conf_mode/protocols_pim6.py
new file mode 100755
index 000000000..2003a1014
--- /dev/null
+++ b/src/conf_mode/protocols_pim6.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from ipaddress import IPv6Address
+from ipaddress import IPv6Network
+from sys import exit
+
+from vyos.config import Config
+from vyos.config import config_dict_merge
+from vyos.configdict import node_changed
+from vyos.configverify import verify_interface_exists
+from vyos.template import render_to_string
+from vyos import ConfigError
+from vyos import frr
+from vyos import airbag
+airbag.enable()
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['protocols', 'pim6']
+ pim6 = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, with_recursive_defaults=True)
+
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ pim6['interface_removed'] = list(interfaces_removed)
+
+ # Bail out early if configuration tree does no longer exist. this must
+ # be done after retrieving the list of interfaces to be removed.
+ if not conf.exists(base):
+ pim6.update({'deleted' : ''})
+ return pim6
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**pim6.kwargs, recursive=True)
+
+ pim6 = config_dict_merge(default_values, pim6)
+ return pim6
+
+def verify(pim6):
+ if not pim6 or 'deleted' in pim6:
+ return
+
+ for interface, interface_config in pim6.get('interface', {}).items():
+ verify_interface_exists(interface)
+ if 'mld' in interface_config:
+ mld = interface_config['mld']
+ for group in mld.get('join', {}).keys():
+ # Validate multicast group address
+ if not IPv6Address(group).is_multicast:
+ raise ConfigError(f"{group} is not a multicast group")
+
+ if 'rp' in pim6:
+ if 'address' not in pim6['rp']:
+ raise ConfigError('PIM6 rendezvous point needs to be defined!')
+
+ # Check unique multicast groups
+ unique = []
+ pim_base_error = 'PIM6 rendezvous point group'
+
+ if {'address', 'prefix-list6'} <= set(pim6['rp']):
+ raise ConfigError(f'{pim_base_error} supports either address or a prefix-list!')
+
+ for address, address_config in pim6['rp']['address'].items():
+ if 'group' not in address_config:
+ raise ConfigError(f'{pim_base_error} should be defined for "{address}"!')
+
+ # Check if it is a multicast group
+ for gr_addr in address_config['group']:
+ if not IPv6Network(gr_addr).is_multicast:
+ raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!')
+ if gr_addr in unique:
+ raise ConfigError(f'{pim_base_error} must be unique!')
+ unique.append(gr_addr)
+
+def generate(pim6):
+ if not pim6 or 'deleted' in pim6:
+ return
+ pim6['new_frr_config'] = render_to_string('frr/pim6d.frr.j2', pim6)
+ return None
+
+def apply(pim6):
+ if pim6 is None:
+ return
+
+ pim6_daemon = 'pim6d'
+
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+
+ frr_cfg.load_configuration(pim6_daemon)
+
+ for key in ['interface', 'interface_removed']:
+ if key not in pim6:
+ continue
+ for interface in pim6[key]:
+ frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
+
+ if 'new_frr_config' in pim6:
+ frr_cfg.add_before(frr.default_add_before, pim6['new_frr_config'])
+ frr_cfg.commit_configuration(pim6_daemon)
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/protocols_segment-routing.py b/src/conf_mode/protocols_segment-routing.py
new file mode 100755
index 000000000..d865c2ac0
--- /dev/null
+++ b/src/conf_mode/protocols_segment-routing.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import node_changed
+from vyos.template import render_to_string
+from vyos.utils.dict import dict_search
+from vyos.utils.system import sysctl_write
+from vyos import ConfigError
+from vyos import frr
+from vyos import airbag
+airbag.enable()
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['protocols', 'segment-routing']
+ sr = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ sr['interface_removed'] = list(interfaces_removed)
+
+ import pprint
+ pprint.pprint(sr)
+ return sr
+
+def verify(sr):
+ if 'srv6' in sr:
+ srv6_enable = False
+ if 'interface' in sr:
+ for interface, interface_config in sr['interface'].items():
+ if 'srv6' in interface_config:
+ srv6_enable = True
+ break
+ if not srv6_enable:
+ raise ConfigError('SRv6 should be enabled on at least one interface!')
+ return None
+
+def generate(sr):
+ if not sr:
+ return None
+
+ sr['new_frr_config'] = render_to_string('frr/zebra.segment_routing.frr.j2', sr)
+ return None
+
+def apply(sr):
+ zebra_daemon = 'zebra'
+
+ if 'interface_removed' in sr:
+ for interface in sr['interface_removed']:
+ # Disable processing of IPv6-SR packets
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0')
+
+ if 'interface' in sr:
+ for interface, interface_config in sr['interface'].items():
+ # Accept or drop SR-enabled IPv6 packets on this interface
+ if 'srv6' in interface_config:
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1')
+ # Define HMAC policy for ingress SR-enabled packets on this interface
+ # It's a redundant check as HMAC has a default value - but better safe
+ # then sorry
+ tmp = dict_search('srv6.hmac', interface_config)
+ if tmp == 'accept':
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0')
+ elif tmp == 'drop':
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1')
+ elif tmp == 'ignore':
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1')
+ else:
+ sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0')
+
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section(r'^segment-routing')
+ if 'new_frr_config' in sr:
+ frr_cfg.add_before(frr.default_add_before, sr['new_frr_config'])
+ frr_cfg.commit_configuration(zebra_daemon)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/arp.py b/src/conf_mode/protocols_static_arp.py
index b141f1141..b141f1141 100755
--- a/src/conf_mode/arp.py
+++ b/src/conf_mode/protocols_static_arp.py
diff --git a/src/conf_mode/protocols_static_neighbor-proxy.py b/src/conf_mode/protocols_static_neighbor-proxy.py
new file mode 100755
index 000000000..10cc1e748
--- /dev/null
+++ b/src/conf_mode/protocols_static_neighbor-proxy.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import node_changed
+from vyos.utils.process import call
+from vyos import ConfigError
+from vyos import airbag
+
+airbag.enable()
+
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['protocols', 'static', 'neighbor-proxy']
+ config = conf.get_config_dict(base, get_first_key=True)
+
+ return config
+
+
+def verify(config):
+
+ if 'arp' in config:
+ for neighbor, neighbor_conf in config['arp'].items():
+ if 'interface' not in neighbor_conf:
+ raise ConfigError(
+ f"ARP neighbor-proxy for '{neighbor}' requires an interface to be set!"
+ )
+
+ if 'nd' in config:
+ for neighbor, neighbor_conf in config['nd'].items():
+ if 'interface' not in neighbor_conf:
+ raise ConfigError(
+ f"ARP neighbor-proxy for '{neighbor}' requires an interface to be set!"
+ )
+
+
+def generate(config):
+ pass
+
+
+def apply(config):
+ if not config:
+ # Cleanup proxy
+ call('ip neighbor flush proxy')
+ call('ip -6 neighbor flush proxy')
+ return None
+
+ # Add proxy ARP
+ if 'arp' in config:
+ # Cleanup entries before config
+ call('ip neighbor flush proxy')
+ for neighbor, neighbor_conf in config['arp'].items():
+ for interface in neighbor_conf.get('interface'):
+ call(f'ip neighbor add proxy {neighbor} dev {interface}')
+
+ # Add proxy NDP
+ if 'nd' in config:
+ # Cleanup entries before config
+ call('ip -6 neighbor flush proxy')
+ for neighbor, neighbor_conf in config['nd'].items():
+ for interface in neighbor_conf['interface']:
+ call(f'ip -6 neighbor add proxy {neighbor} dev {interface}')
+
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/bcast_relay.py b/src/conf_mode/service_broadcast-relay.py
index 31c552f5a..31c552f5a 100755
--- a/src/conf_mode/bcast_relay.py
+++ b/src/conf_mode/service_broadcast-relay.py
diff --git a/src/conf_mode/service_config_sync.py b/src/conf_mode/service_config-sync.py
index 4b8a7f6ee..4b8a7f6ee 100755
--- a/src/conf_mode/service_config_sync.py
+++ b/src/conf_mode/service_config-sync.py
diff --git a/src/conf_mode/conntrack_sync.py b/src/conf_mode/service_conntrack-sync.py
index 4fb2ce27f..4fb2ce27f 100755
--- a/src/conf_mode/conntrack_sync.py
+++ b/src/conf_mode/service_conntrack-sync.py
diff --git a/src/conf_mode/dhcp_relay.py b/src/conf_mode/service_dhcp-relay.py
index 37d708847..37d708847 100755
--- a/src/conf_mode/dhcp_relay.py
+++ b/src/conf_mode/service_dhcp-relay.py
diff --git a/src/conf_mode/dhcp_server.py b/src/conf_mode/service_dhcp-server.py
index ac7d95632..ac7d95632 100755
--- a/src/conf_mode/dhcp_server.py
+++ b/src/conf_mode/service_dhcp-server.py
diff --git a/src/conf_mode/dhcpv6_relay.py b/src/conf_mode/service_dhcpv6-relay.py
index 6537ca3c2..6537ca3c2 100755
--- a/src/conf_mode/dhcpv6_relay.py
+++ b/src/conf_mode/service_dhcpv6-relay.py
diff --git a/src/conf_mode/dhcpv6_server.py b/src/conf_mode/service_dhcpv6-server.py
index 427001609..427001609 100755
--- a/src/conf_mode/dhcpv6_server.py
+++ b/src/conf_mode/service_dhcpv6-server.py
diff --git a/src/conf_mode/service_dns_dynamic.py b/src/conf_mode/service_dns_dynamic.py
new file mode 100755
index 000000000..99fa8feee
--- /dev/null
+++ b/src/conf_mode/service_dns_dynamic.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018-2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from sys import exit
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.configverify import verify_interface_exists
+from vyos.template import render
+from vyos.utils.process import call
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+config_file = r'/run/ddclient/ddclient.conf'
+systemd_override = r'/run/systemd/system/ddclient.service.d/override.conf'
+
+# Dynamic interfaces that might not exist when the configuration is loaded
+dynamic_interfaces = ('pppoe', 'sstpc')
+
+# Protocols that require zone
+zone_necessary = ['cloudflare', 'digitalocean', 'godaddy', 'hetzner', 'gandi',
+ 'nfsn', 'nsupdate']
+zone_supported = zone_necessary + ['dnsexit2', 'zoneedit1']
+
+# Protocols that do not require username
+username_unnecessary = ['1984', 'cloudflare', 'cloudns', 'digitalocean', 'dnsexit2',
+ 'duckdns', 'freemyip', 'hetzner', 'keysystems', 'njalla',
+ 'nsupdate', 'regfishde']
+
+# Protocols that support TTL
+ttl_supported = ['cloudflare', 'dnsexit2', 'gandi', 'hetzner', 'godaddy', 'nfsn',
+ 'nsupdate']
+
+# Protocols that support both IPv4 and IPv6
+dualstack_supported = ['cloudflare', 'digitalocean', 'dnsexit2', 'duckdns',
+ 'dyndns2', 'easydns', 'freedns', 'hetzner', 'infomaniak',
+ 'njalla']
+
+# dyndns2 protocol in ddclient honors dual stack for selective servers
+# because of the way it is implemented in ddclient
+dyndns_dualstack_servers = ['members.dyndns.org', 'dynv6.com']
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['service', 'dns', 'dynamic']
+ if not conf.exists(base):
+ return None
+
+ dyndns = conf.get_config_dict(base, key_mangling=('-', '_'),
+ no_tag_node_value_mangle=True,
+ get_first_key=True,
+ with_recursive_defaults=True)
+
+ dyndns['config_file'] = config_file
+ return dyndns
+
+def verify(dyndns):
+ # bail out early - looks like removal from running config
+ if not dyndns or 'name' not in dyndns:
+ return None
+
+ # Dynamic DNS service provider - configuration validation
+ for service, config in dyndns['name'].items():
+
+ error_msg_req = f'is required for Dynamic DNS service "{service}"'
+ error_msg_uns = f'is not supported for Dynamic DNS service "{service}"'
+
+ for field in ['protocol', 'address', 'host_name']:
+ if field not in config:
+ raise ConfigError(f'"{field.replace("_", "-")}" {error_msg_req}')
+
+ # If dyndns address is an interface, ensure
+ # that the interface exists (or just warn if dynamic interface)
+ # and that web-options are not set
+ if config['address'] != 'web':
+ # exclude check interface for dynamic interfaces
+ if config['address'].startswith(dynamic_interfaces):
+ Warning(f'Interface "{config["address"]}" does not exist yet and cannot '
+ f'be used for Dynamic DNS service "{service}" until it is up!')
+ else:
+ verify_interface_exists(config['address'])
+ if 'web_options' in config:
+ raise ConfigError(f'"web-options" is applicable only when using HTTP(S) '
+ f'web request to obtain the IP address')
+
+ # Warn if using checkip.dyndns.org, as it does not support HTTPS
+ # See: https://github.com/ddclient/ddclient/issues/597
+ if 'web_options' in config:
+ if 'url' not in config['web_options']:
+ raise ConfigError(f'"url" in "web-options" {error_msg_req} '
+ f'with protocol "{config["protocol"]}"')
+ elif re.search("^(https?://)?checkip\.dyndns\.org", config['web_options']['url']):
+ Warning(f'"checkip.dyndns.org" does not support HTTPS requests for IP address '
+ f'lookup. Please use a different IP address lookup service.')
+
+ # RFC2136 uses 'key' instead of 'password'
+ if config['protocol'] != 'nsupdate' and 'password' not in config:
+ raise ConfigError(f'"password" {error_msg_req}')
+
+ # Other RFC2136 specific configuration validation
+ if config['protocol'] == 'nsupdate':
+ if 'password' in config:
+ raise ConfigError(f'"password" {error_msg_uns} with protocol "{config["protocol"]}"')
+ for field in ['server', 'key']:
+ if field not in config:
+ raise ConfigError(f'"{field}" {error_msg_req} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] in zone_necessary and 'zone' not in config:
+ raise ConfigError(f'"zone" {error_msg_req} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] not in zone_supported and 'zone' in config:
+ raise ConfigError(f'"zone" {error_msg_uns} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] not in username_unnecessary and 'username' not in config:
+ raise ConfigError(f'"username" {error_msg_req} with protocol "{config["protocol"]}"')
+
+ if config['protocol'] not in ttl_supported and 'ttl' in config:
+ raise ConfigError(f'"ttl" {error_msg_uns} with protocol "{config["protocol"]}"')
+
+ if config['ip_version'] == 'both':
+ if config['protocol'] not in dualstack_supported:
+ raise ConfigError(f'Both IPv4 and IPv6 at the same time {error_msg_uns} '
+ f'with protocol "{config["protocol"]}"')
+ # dyndns2 protocol in ddclient honors dual stack only for dyn.com (dyndns.org)
+ if config['protocol'] == 'dyndns2' and 'server' in config and config['server'] not in dyndns_dualstack_servers:
+ raise ConfigError(f'Both IPv4 and IPv6 at the same time {error_msg_uns} '
+ f'for "{config["server"]}" with protocol "{config["protocol"]}"')
+
+ if {'wait_time', 'expiry_time'} <= config.keys() and int(config['expiry_time']) < int(config['wait_time']):
+ raise ConfigError(f'"expiry-time" must be greater than "wait-time" for '
+ f'Dynamic DNS service "{service}"')
+
+ return None
+
+def generate(dyndns):
+ # bail out early - looks like removal from running config
+ if not dyndns or 'name' not in dyndns:
+ return None
+
+ render(config_file, 'dns-dynamic/ddclient.conf.j2', dyndns, permission=0o600)
+ render(systemd_override, 'dns-dynamic/override.conf.j2', dyndns)
+ return None
+
+def apply(dyndns):
+ systemd_service = 'ddclient.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
+ # bail out early - looks like removal from running config
+ if not dyndns or 'name' not in dyndns:
+ call(f'systemctl stop {systemd_service}')
+ if os.path.exists(config_file):
+ os.unlink(config_file)
+ else:
+ call(f'systemctl reload-or-restart {systemd_service}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/dns_forwarding.py b/src/conf_mode/service_dns_forwarding.py
index c186f47af..c186f47af 100755
--- a/src/conf_mode/dns_forwarding.py
+++ b/src/conf_mode/service_dns_forwarding.py
diff --git a/src/conf_mode/service_event_handler.py b/src/conf_mode/service_event-handler.py
index 5028ef52f..5028ef52f 100755
--- a/src/conf_mode/service_event_handler.py
+++ b/src/conf_mode/service_event-handler.py
diff --git a/src/conf_mode/service_https.py b/src/conf_mode/service_https.py
new file mode 100755
index 000000000..46efc3c93
--- /dev/null
+++ b/src/conf_mode/service_https.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2019-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import socket
+import sys
+import json
+
+from time import sleep
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.config import config_dict_merge
+from vyos.configdiff import get_config_diff
+from vyos.configverify import verify_vrf
+from vyos.defaults import api_config_state
+from vyos.pki import wrap_certificate
+from vyos.pki import wrap_private_key
+from vyos.pki import wrap_dh_parameters
+from vyos.pki import load_dh_parameters
+from vyos.template import render
+from vyos.utils.dict import dict_search
+from vyos.utils.process import call
+from vyos.utils.process import is_systemd_service_active
+from vyos.utils.network import check_port_availability
+from vyos.utils.network import is_listen_port_bind_service
+from vyos.utils.file import write_file
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+config_file = '/etc/nginx/sites-enabled/default'
+systemd_override = r'/run/systemd/system/nginx.service.d/override.conf'
+cert_dir = '/run/nginx/certs'
+
+user = 'www-data'
+group = 'www-data'
+
+systemd_service_api = '/run/systemd/system/vyos-http-api.service'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['service', 'https']
+ if not conf.exists(base):
+ return None
+
+ https = conf.get_config_dict(base, get_first_key=True,
+ key_mangling=('-', '_'),
+ with_pki=True)
+
+ # store path to API config file for later use in templates
+ https['api_config_state'] = api_config_state
+ # get fully qualified system hsotname
+ https['hostname'] = socket.getfqdn()
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(**https.kwargs, recursive=True)
+ if 'api' not in https or 'graphql' not in https['api']:
+ del default_values['api']
+
+ # merge CLI and default dictionary
+ https = config_dict_merge(default_values, https)
+ return https
+
+def verify(https):
+ if https is None:
+ return None
+
+ if 'certificates' in https and 'certificate' in https['certificates']:
+ cert_name = https['certificates']['certificate']
+ if 'pki' not in https:
+ raise ConfigError('PKI is not configured!')
+
+ if cert_name not in https['pki']['certificate']:
+ raise ConfigError('Invalid certificate in configuration!')
+
+ pki_cert = https['pki']['certificate'][cert_name]
+
+ if 'certificate' not in pki_cert:
+ raise ConfigError('Missing certificate in configuration!')
+
+ if 'private' not in pki_cert or 'key' not in pki_cert['private']:
+ raise ConfigError('Missing certificate private key in configuration!')
+
+ if 'dh_params' in https['certificates']:
+ dh_name = https['certificates']['dh_params']
+ if dh_name not in https['pki']['dh']:
+ raise ConfigError('Invalid DH parameter in configuration!')
+
+ pki_dh = https['pki']['dh'][dh_name]
+ dh_params = load_dh_parameters(pki_dh['parameters'])
+ dh_numbers = dh_params.parameter_numbers()
+ dh_bits = dh_numbers.p.bit_length()
+ if dh_bits < 2048:
+ raise ConfigError(f'Minimum DH key-size is 2048 bits')
+
+ else:
+ Warning('No certificate specified, using build-in self-signed certificates. '\
+ 'Do not use them in a production environment!')
+
+ # Check if server port is already in use by a different appliaction
+ listen_address = ['0.0.0.0']
+ port = int(https['port'])
+ if 'listen_address' in https:
+ listen_address = https['listen_address']
+
+ for address in listen_address:
+ if not check_port_availability(address, port, 'tcp') and not is_listen_port_bind_service(port, 'nginx'):
+ raise ConfigError(f'TCP port "{port}" is used by another service!')
+
+ verify_vrf(https)
+
+ # Verify API server settings, if present
+ if 'api' in https:
+ keys = dict_search('api.keys.id', https)
+ gql_auth_type = dict_search('api.graphql.authentication.type', https)
+
+ # If "api graphql" is not defined and `gql_auth_type` is None,
+ # there's certainly no JWT auth option, and keys are required
+ jwt_auth = (gql_auth_type == "token")
+
+ # Check for incomplete key configurations in every case
+ valid_keys_exist = False
+ if keys:
+ for k in keys:
+ if 'key' not in keys[k]:
+ raise ConfigError(f'Missing HTTPS API key string for key id "{k}"')
+ else:
+ valid_keys_exist = True
+
+ # If only key-based methods are enabled,
+ # fail the commit if no valid key configurations are found
+ if (not valid_keys_exist) and (not jwt_auth):
+ raise ConfigError('At least one HTTPS API key is required unless GraphQL token authentication is enabled!')
+
+ if (not valid_keys_exist) and jwt_auth:
+ Warning(f'API keys are not configured: classic (non-GraphQL) API will be unavailable!')
+
+ return None
+
+def generate(https):
+ if https is None:
+ for file in [systemd_service_api, config_file, systemd_override]:
+ if os.path.exists(file):
+ os.unlink(file)
+ return None
+
+ if 'api' in https:
+ render(systemd_service_api, 'https/vyos-http-api.service.j2', https)
+ with open(api_config_state, 'w') as f:
+ json.dump(https['api'], f, indent=2)
+ else:
+ if os.path.exists(systemd_service_api):
+ os.unlink(systemd_service_api)
+
+ # get certificate data
+ if 'certificates' in https and 'certificate' in https['certificates']:
+ cert_name = https['certificates']['certificate']
+ pki_cert = https['pki']['certificate'][cert_name]
+
+ cert_path = os.path.join(cert_dir, f'{cert_name}_cert.pem')
+ key_path = os.path.join(cert_dir, f'{cert_name}_key.pem')
+
+ server_cert = str(wrap_certificate(pki_cert['certificate']))
+
+ # Append CA certificate if specified to form a full chain
+ if 'ca_certificate' in https['certificates']:
+ ca_cert = https['certificates']['ca_certificate']
+ server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate']))
+
+ write_file(cert_path, server_cert, user=user, group=group, mode=0o644)
+ write_file(key_path, wrap_private_key(pki_cert['private']['key']),
+ user=user, group=group, mode=0o600)
+
+ tmp_path = {'cert_path': cert_path, 'key_path': key_path}
+
+ if 'dh_params' in https['certificates']:
+ dh_name = https['certificates']['dh_params']
+ pki_dh = https['pki']['dh'][dh_name]
+ if 'parameters' in pki_dh:
+ dh_path = os.path.join(cert_dir, f'{dh_name}_dh.pem')
+ write_file(dh_path, wrap_dh_parameters(pki_dh['parameters']),
+ user=user, group=group, mode=0o600)
+ tmp_path.update({'dh_file' : dh_path})
+
+ https['certificates'].update(tmp_path)
+
+ render(config_file, 'https/nginx.default.j2', https)
+ render(systemd_override, 'https/override.conf.j2', https)
+ return None
+
+def apply(https):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+ http_api_service_name = 'vyos-http-api.service'
+ https_service_name = 'nginx.service'
+
+ if https is None:
+ call(f'systemctl stop {http_api_service_name}')
+ call(f'systemctl stop {https_service_name}')
+ return
+
+ if 'api' in https:
+ call(f'systemctl reload-or-restart {http_api_service_name}')
+ # Let uvicorn settle before (possibly) restarting nginx
+ sleep(1)
+ elif is_systemd_service_active(http_api_service_name):
+ call(f'systemctl stop {http_api_service_name}')
+
+ call(f'systemctl reload-or-restart {https_service_name}')
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/conf_mode/service_ids_fastnetmon.py b/src/conf_mode/service_ids_ddos-protection.py
index 276a71fcb..276a71fcb 100755
--- a/src/conf_mode/service_ids_fastnetmon.py
+++ b/src/conf_mode/service_ids_ddos-protection.py
diff --git a/src/conf_mode/service_ipoe-server.py b/src/conf_mode/service_ipoe-server.py
index b70e32373..36f00dec5 100755
--- a/src/conf_mode/service_ipoe-server.py
+++ b/src/conf_mode/service_ipoe-server.py
@@ -15,17 +15,17 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import jmespath
from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
-from vyos.configverify import verify_accel_ppp_base_service
from vyos.configverify import verify_interface_exists
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import get_pools_in_order
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -35,87 +35,6 @@ ipoe_conf = '/run/accel-pppd/ipoe.conf'
ipoe_chap_secrets = '/run/accel-pppd/ipoe.chap-secrets'
-def get_pools_in_order(data: dict) -> list:
- """Return a list of dictionaries representing pool data in the order
- in which they should be allocated. Pool must be defined before we can
- use it with 'next-pool' option.
-
- Args:
- data: A dictionary of pool data, where the keys are pool names and the
- values are dictionaries containing the 'subnet' key and the optional
- 'next_pool' key.
-
- Returns:
- list: A list of dictionaries
-
- Raises:
- ValueError: If a 'next_pool' key references a pool name that
- has not been defined.
- ValueError: If a circular reference is found in the 'next_pool' keys.
-
- Example:
- config_data = {
- ... 'first-pool': {
- ... 'next_pool': 'second-pool',
- ... 'subnet': '192.0.2.0/25'
- ... },
- ... 'second-pool': {
- ... 'next_pool': 'third-pool',
- ... 'subnet': '203.0.113.0/25'
- ... },
- ... 'third-pool': {
- ... 'subnet': '198.51.100.0/24'
- ... },
- ... 'foo': {
- ... 'subnet': '100.64.0.0/24',
- ... 'next_pool': 'second-pool'
- ... }
- ... }
-
- % get_pools_in_order(config_data)
- [{'third-pool': {'subnet': '198.51.100.0/24'}},
- {'second-pool': {'next_pool': 'third-pool', 'subnet': '203.0.113.0/25'}},
- {'first-pool': {'next_pool': 'second-pool', 'subnet': '192.0.2.0/25'}},
- {'foo': {'next_pool': 'second-pool', 'subnet': '100.64.0.0/24'}}]
- """
- pools = []
- unresolved_pools = {}
-
- for pool, pool_config in data.items():
- if 'next_pool' not in pool_config:
- pools.insert(0, {pool: pool_config})
- else:
- unresolved_pools[pool] = pool_config
-
- while unresolved_pools:
- resolved_pools = []
-
- for pool, pool_config in unresolved_pools.items():
- next_pool_name = pool_config['next_pool']
-
- if any(p for p in pools if next_pool_name in p):
- index = next(
- (i for i, p in enumerate(pools) if next_pool_name in p),
- None)
- pools.insert(index + 1, {pool: pool_config})
- resolved_pools.append(pool)
- elif next_pool_name in unresolved_pools:
- # next pool not yet resolved
- pass
- else:
- raise ValueError(
- f"Pool '{next_pool_name}' not defined in configuration data"
- )
-
- if not resolved_pools:
- raise ValueError("Circular reference in configuration data")
-
- for pool in resolved_pools:
- unresolved_pools.pop(pool)
-
- return pools
-
-
def get_config(config=None):
if config:
conf = config
@@ -128,18 +47,11 @@ def get_config(config=None):
# retrieve common dictionary keys
ipoe = get_accel_dict(conf, base, ipoe_chap_secrets)
- if jmespath.search('client_ip_pool.name', ipoe):
- dict_named_pools = jmespath.search('client_ip_pool.name', ipoe)
+ if dict_search('client_ip_pool', ipoe):
# Multiple named pools require ordered values T5099
- ipoe['ordered_named_pools'] = get_pools_in_order(dict_named_pools)
- # T5099 'next-pool' option
- if jmespath.search('client_ip_pool.name.*.next_pool', ipoe):
- for pool, pool_config in ipoe['client_ip_pool']['name'].items():
- if 'next_pool' in pool_config:
- ipoe['first_named_pool'] = pool
- ipoe['first_named_pool_subnet'] = pool_config
- break
+ ipoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', ipoe))
+ ipoe['server_type'] = 'ipoe'
return ipoe
@@ -156,9 +68,7 @@ def verify(ipoe):
raise ConfigError('Option "client-subnet" incompatible with "vlan"!'
'Use "ipoe client-ip-pool" instead.')
- #verify_accel_ppp_base_service(ipoe, local_users=False)
- # IPoE server does not have 'gateway' option in the CLI
- # we cannot use configverify.py verify_accel_ppp_base_service for ipoe-server
+ verify_accel_ppp_ip_pool(ipoe)
if dict_search('authentication.mode', ipoe) == 'radius':
if not dict_search('authentication.radius.server', ipoe):
diff --git a/src/conf_mode/lldp.py b/src/conf_mode/service_lldp.py
index c2e87d171..3c647a0e8 100755
--- a/src/conf_mode/lldp.py
+++ b/src/conf_mode/service_lldp.py
@@ -86,9 +86,9 @@ def verify(lldp):
raise ConfigError(f'Must define both longitude and latitude for "{interface}" location!')
# check options
- if 'snmp' in lldp and 'enable' in lldp['snmp']:
+ if 'snmp' in lldp:
if 'system_snmp_enabled' not in lldp:
- raise ConfigError('SNMP must be configured to enable LLDP SNMP')
+ raise ConfigError('SNMP must be configured to enable LLDP SNMP!')
def generate(lldp):
@@ -121,4 +121,3 @@ if __name__ == '__main__':
except ConfigError as e:
print(e)
exit(1)
-
diff --git a/src/conf_mode/service_mdns-repeater.py b/src/conf_mode/service_mdns_repeater.py
index a2c90b537..6526c23d1 100755
--- a/src/conf_mode/service_mdns-repeater.py
+++ b/src/conf_mode/service_mdns_repeater.py
@@ -18,7 +18,7 @@ import os
from json import loads
from sys import exit
-from netifaces import ifaddresses, interfaces, AF_INET
+from netifaces import ifaddresses, interfaces, AF_INET, AF_INET6
from vyos.config import Config
from vyos.ifconfig.vrrp import VRRP
@@ -29,6 +29,7 @@ from vyos import airbag
airbag.enable()
config_file = '/run/avahi-daemon/avahi-daemon.conf'
+systemd_override = r'/run/systemd/system/avahi-daemon.service.d/override.conf'
vrrp_running_file = '/run/mdns_vrrp_active'
def get_config(config=None):
@@ -36,18 +37,24 @@ def get_config(config=None):
conf = config
else:
conf = Config()
+
base = ['service', 'mdns', 'repeater']
- mdns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ if not conf.exists(base):
+ return None
+
+ mdns = conf.get_config_dict(base, key_mangling=('-', '_'),
+ no_tag_node_value_mangle=True,
+ get_first_key=True,
+ with_recursive_defaults=True)
if mdns:
mdns['vrrp_exists'] = conf.exists('high-availability vrrp')
+ mdns['config_file'] = config_file
+
return mdns
def verify(mdns):
- if not mdns:
- return None
-
- if 'disable' in mdns:
+ if not mdns or 'disable' in mdns:
return None
# We need at least two interfaces to repeat mDNS advertisments
@@ -60,10 +67,14 @@ def verify(mdns):
if interface not in interfaces():
raise ConfigError(f'Interface "{interface}" does not exist!')
- if AF_INET not in ifaddresses(interface):
+ if mdns['ip_version'] in ['ipv4', 'both'] and AF_INET not in ifaddresses(interface):
raise ConfigError('mDNS repeater requires an IPv4 address to be '
f'configured on interface "{interface}"')
+ if mdns['ip_version'] in ['ipv6', 'both'] and AF_INET6 not in ifaddresses(interface):
+ raise ConfigError('mDNS repeater requires an IPv6 address to be '
+ f'configured on interface "{interface}"')
+
return None
# Get VRRP states from interfaces, returns only interfaces where state is MASTER
@@ -92,12 +103,17 @@ def generate(mdns):
if len(mdns['interface']) < 2:
return None
- render(config_file, 'mdns-repeater/avahi-daemon.j2', mdns)
+ render(config_file, 'mdns-repeater/avahi-daemon.conf.j2', mdns)
+ render(systemd_override, 'mdns-repeater/override.conf.j2', mdns)
return None
def apply(mdns):
+ systemd_service = 'avahi-daemon.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
if not mdns or 'disable' in mdns:
- call('systemctl stop avahi-daemon.service')
+ call(f'systemctl stop {systemd_service}')
if os.path.exists(config_file):
os.unlink(config_file)
@@ -112,10 +128,10 @@ def apply(mdns):
os.mknod(vrrp_running_file) # vrrp script looks for this file to update mdns repeater
if len(mdns['interface']) < 2:
- call('systemctl stop avahi-daemon.service')
+ call(f'systemctl stop {systemd_service}')
return None
- call('systemctl restart avahi-daemon.service')
+ call(f'systemctl restart {systemd_service}')
return None
diff --git a/src/conf_mode/service_ndp-proxy.py b/src/conf_mode/service_ndp-proxy.py
new file mode 100755
index 000000000..aa2374f4c
--- /dev/null
+++ b/src/conf_mode/service_ndp-proxy.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configverify import verify_interface_exists
+from vyos.utils.process import call
+from vyos.template import render
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+systemd_service = 'ndppd.service'
+ndppd_config = '/run/ndppd/ndppd.conf'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['service', 'ndp-proxy']
+ if not conf.exists(base):
+ return None
+
+ ndpp = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
+
+ return ndpp
+
+def verify(ndpp):
+ if not ndpp:
+ return None
+
+ if 'interface' in ndpp:
+ for interface, interface_config in ndpp['interface'].items():
+ verify_interface_exists(interface)
+
+ if 'rule' in interface_config:
+ for rule, rule_config in interface_config['rule'].items():
+ if rule_config['mode'] == 'interface' and 'interface' not in rule_config:
+ raise ConfigError(f'Rule "{rule}" uses interface mode but no interface defined!')
+
+ if rule_config['mode'] != 'interface' and 'interface' in rule_config:
+ if interface_config['mode'] != 'interface' and 'interface' in interface_config:
+ raise ConfigError(f'Rule "{rule}" does not use interface mode, thus interface can not be defined!')
+
+ return None
+
+def generate(ndpp):
+ if not ndpp:
+ return None
+
+ render(ndppd_config, 'ndppd/ndppd.conf.j2', ndpp)
+ return None
+
+def apply(ndpp):
+ if not ndpp:
+ call(f'systemctl stop {systemd_service}')
+ if os.path.isfile(ndppd_config):
+ os.unlink(ndppd_config)
+ return None
+
+ call(f'systemctl reload-or-restart {systemd_service}')
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/ntp.py b/src/conf_mode/service_ntp.py
index 1cc23a7df..1cc23a7df 100755
--- a/src/conf_mode/ntp.py
+++ b/src/conf_mode/service_ntp.py
diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py
index aace267a7..7c624f034 100755
--- a/src/conf_mode/service_pppoe-server.py
+++ b/src/conf_mode/service_pppoe-server.py
@@ -21,13 +21,16 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
from vyos.configdict import is_node_changed
-from vyos.configverify import verify_accel_ppp_base_service
from vyos.configverify import verify_interface_exists
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
from vyos import ConfigError
from vyos import airbag
+
airbag.enable()
pppoe_conf = r'/run/accel-pppd/pppoe.conf'
@@ -45,12 +48,19 @@ def get_config(config=None):
# retrieve common dictionary keys
pppoe = get_accel_dict(conf, base, pppoe_chap_secrets)
+ if dict_search('client_ip_pool', pppoe):
+ # Multiple named pools require ordered values T5099
+ pppoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', pppoe))
+
# reload-or-restart does not implemented in accel-ppp
# use this workaround until it will be implemented
# https://phabricator.accel-ppp.org/T3
- if is_node_changed(conf, base + ['client-ip-pool']) or is_node_changed(
- conf, base + ['client-ipv6-pool']):
+ conditions = [is_node_changed(conf, base + ['client-ip-pool']),
+ is_node_changed(conf, base + ['client-ipv6-pool']),
+ is_node_changed(conf, base + ['interface'])]
+ if any(conditions):
pppoe.update({'restart_required': {}})
+ pppoe['server_type'] = 'pppoe'
return pppoe
def verify(pppoe):
@@ -69,12 +79,7 @@ def verify(pppoe):
for interface in pppoe['interface']:
verify_interface_exists(interface)
- # local ippool and gateway settings config checks
- if not (dict_search('client_ip_pool.subnet', pppoe) or
- (dict_search('client_ip_pool.name', pppoe) or
- (dict_search('client_ip_pool.start', pppoe) and
- dict_search('client_ip_pool.stop', pppoe)))):
- print('Warning: No PPPoE client pool defined')
+ verify_accel_ppp_ip_pool(pppoe)
if dict_search('authentication.radius.dynamic_author.server', pppoe):
if not dict_search('authentication.radius.dynamic_author.key', pppoe):
diff --git a/src/conf_mode/salt-minion.py b/src/conf_mode/service_salt-minion.py
index a8fce8e01..a8fce8e01 100755
--- a/src/conf_mode/salt-minion.py
+++ b/src/conf_mode/service_salt-minion.py
diff --git a/src/conf_mode/snmp.py b/src/conf_mode/service_snmp.py
index 7882f8510..f65d0d6bc 100755
--- a/src/conf_mode/snmp.py
+++ b/src/conf_mode/service_snmp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2021 VyOS maintainers and contributors
+# Copyright (C) 2018-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -54,7 +54,7 @@ def get_config(config=None):
if not conf.exists(base):
snmp.update({'deleted' : ''})
- if conf.exists(['service', 'lldp', 'snmp', 'enable']):
+ if conf.exists(['service', 'lldp', 'snmp']):
snmp.update({'lldp_snmp' : ''})
if 'deleted' in snmp:
@@ -86,7 +86,7 @@ def get_config(config=None):
return snmp
def verify(snmp):
- if not snmp:
+ if 'deleted' in snmp:
return None
if {'deleted', 'lldp_snmp'} <= set(snmp):
@@ -178,8 +178,6 @@ def verify(snmp):
return None
def generate(snmp):
-
- #
# As we are manipulating the snmpd user database we have to stop it first!
# This is even save if service is going to be removed
call(f'systemctl stop {systemd_service}')
@@ -190,7 +188,7 @@ def generate(snmp):
if os.path.isfile(file):
os.unlink(file)
- if not snmp:
+ if 'deleted' in snmp:
return None
if 'v3' in snmp:
@@ -244,7 +242,7 @@ def apply(snmp):
# Always reload systemd manager configuration
call('systemctl daemon-reload')
- if not snmp:
+ if 'deleted' in snmp:
return None
# start SNMP daemon
@@ -257,9 +255,7 @@ def apply(snmp):
'bgpd', 'ospf6d', 'ospfd', 'ripd', 'ripngd', 'isisd', 'ldpd', 'zebra'
]
for frr_daemon in frr_daemons_list:
- call(
- f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null'
- )
+ call(f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null')
return None
diff --git a/src/conf_mode/ssh.py b/src/conf_mode/service_ssh.py
index ee5e1eca2..ee5e1eca2 100755
--- a/src/conf_mode/ssh.py
+++ b/src/conf_mode/service_ssh.py
diff --git a/src/conf_mode/tftp_server.py b/src/conf_mode/service_tftp-server.py
index 3ad346e2e..3ad346e2e 100755
--- a/src/conf_mode/tftp_server.py
+++ b/src/conf_mode/service_tftp-server.py
diff --git a/src/conf_mode/intel_qat.py b/src/conf_mode/system_acceleration.py
index e4b248675..e4b248675 100755
--- a/src/conf_mode/intel_qat.py
+++ b/src/conf_mode/system_acceleration.py
diff --git a/src/conf_mode/config_mgmt.py b/src/conf_mode/system_config-management.py
index c681a8405..c681a8405 100755
--- a/src/conf_mode/config_mgmt.py
+++ b/src/conf_mode/system_config-management.py
diff --git a/src/conf_mode/conntrack.py b/src/conf_mode/system_conntrack.py
index 9c43640a9..9c43640a9 100755
--- a/src/conf_mode/conntrack.py
+++ b/src/conf_mode/system_conntrack.py
diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/system_flow-accounting.py
index 71acd69fa..f29fc94fb 100755
--- a/src/conf_mode/flow_accounting_conf.py
+++ b/src/conf_mode/system_flow-accounting.py
@@ -28,6 +28,7 @@ from vyos.ifconfig import Section
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.process import cmd
+from vyos.utils.process import run
from vyos.utils.network import is_addr_assigned
from vyos import ConfigError
from vyos import airbag
@@ -116,6 +117,30 @@ def _nftables_config(configured_ifaces, direction, length=None):
cmd(command, raising=ConfigError)
+def _nftables_trigger_setup(operation: str) -> None:
+ """Add a dummy rule to unlock the main pmacct loop with a packet-trigger
+
+ Args:
+ operation (str): 'add' or 'delete' a trigger
+ """
+ # check if a chain exists
+ table_exists = False
+ if run('nft -snj list table ip pmacct') == 0:
+ table_exists = True
+
+ if operation == 'delete' and table_exists:
+ nft_cmd: str = 'nft delete table ip pmacct'
+ cmd(nft_cmd, raising=ConfigError)
+ if operation == 'add' and not table_exists:
+ nft_cmds: list[str] = [
+ 'nft add table ip pmacct',
+ 'nft add chain ip pmacct pmacct_out { type filter hook output priority raw - 50 \\; policy accept \\; }',
+ 'nft add rule ip pmacct pmacct_out oif lo ip daddr 127.0.254.0 counter log group 2 snaplen 1 queue-threshold 0 comment NFLOG_TRIGGER'
+ ]
+ for nft_cmd in nft_cmds:
+ cmd(nft_cmd, raising=ConfigError)
+
+
def get_config(config=None):
if config:
conf = config
@@ -252,7 +277,6 @@ def generate(flow_config):
call('systemctl daemon-reload')
def apply(flow_config):
- action = 'restart'
# Check if flow-accounting was removed and define command
if not flow_config:
_nftables_config([], 'ingress')
@@ -262,6 +286,10 @@ def apply(flow_config):
call(f'systemctl stop {systemd_service}')
if os.path.exists(uacctd_conf_path):
os.unlink(uacctd_conf_path)
+
+ # must be done after systemctl
+ _nftables_trigger_setup('delete')
+
return
# Start/reload flow-accounting daemon
@@ -277,6 +305,10 @@ def apply(flow_config):
else:
_nftables_config([], 'egress')
+ # add a trigger for signal processing
+ _nftables_trigger_setup('add')
+
+
if __name__ == '__main__':
try:
config = get_config()
diff --git a/src/conf_mode/system_frr.py b/src/conf_mode/system_frr.py
index fb252238a..d43b172a6 100755
--- a/src/conf_mode/system_frr.py
+++ b/src/conf_mode/system_frr.py
@@ -22,17 +22,14 @@ from vyos import airbag
from vyos.config import Config
from vyos.logger import syslog
from vyos.template import render_to_string
+from vyos.utils.boot import boot_configuration_complete
from vyos.utils.file import read_file
from vyos.utils.file import write_file
-from vyos.utils.process import run
+from vyos.utils.process import call
airbag.enable()
# path to daemons config and config status files
config_file = '/etc/frr/daemons'
-vyos_status_file = '/tmp/vyos-config-status'
-# path to watchfrr for FRR control
-watchfrr = '/usr/lib/frr/watchfrr.sh'
-
def get_config(config=None):
if config:
@@ -41,16 +38,16 @@ def get_config(config=None):
conf = Config()
base = ['system', 'frr']
- frr_config = conf.get_config_dict(base, get_first_key=True)
+ frr_config = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
return frr_config
-
def verify(frr_config):
# Nothing to verify here
pass
-
def generate(frr_config):
# read daemons config file
daemons_config_current = read_file(config_file)
@@ -62,25 +59,21 @@ def generate(frr_config):
write_file(config_file, daemons_config_new)
frr_config['config_file_changed'] = True
-
def apply(frr_config):
- # check if this is initial commit during boot or intiated by CLI
- # if the file exists, this must be CLI commit
- commit_type_cli = Path(vyos_status_file).exists()
# display warning to user
- if commit_type_cli and frr_config.get('config_file_changed'):
+ if boot_configuration_complete() and frr_config.get('config_file_changed'):
# Since FRR restart is not safe thing, better to give
# control over this to users
print('''
You need to reboot a router (preferred) or restart FRR
to apply changes in modules settings
''')
- # restart FRR automatically. DUring the initial boot this should be
- # safe in most cases
- if not commit_type_cli and frr_config.get('config_file_changed'):
- syslog.warning('Restarting FRR to apply changes in modules')
- run(f'{watchfrr} restart')
+ # restart FRR automatically
+ # During initial boot this should be safe in most cases
+ if not boot_configuration_complete() and frr_config.get('config_file_changed'):
+ syslog.warning('Restarting FRR to apply changes in modules')
+ call(f'systemctl restart frr.service')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/host_name.py b/src/conf_mode/system_host-name.py
index 36d1f6493..6204cf247 100755
--- a/src/conf_mode/host_name.py
+++ b/src/conf_mode/system_host-name.py
@@ -61,8 +61,9 @@ def get_config(config=None):
hosts['domain_name'] = conf.return_value(['system', 'domain-name'])
hosts['domain_search'].append(hosts['domain_name'])
- for search in conf.return_values(['system', 'domain-search', 'domain']):
- hosts['domain_search'].append(search)
+ if conf.exists(['system', 'domain-search']):
+ for search in conf.return_values(['system', 'domain-search']):
+ hosts['domain_search'].append(search)
if conf.exists(['system', 'name-server']):
for ns in conf.return_values(['system', 'name-server']):
diff --git a/src/conf_mode/system-ip.py b/src/conf_mode/system_ip.py
index 5e4e5ec28..7612e2c0d 100755
--- a/src/conf_mode/system-ip.py
+++ b/src/conf_mode/system_ip.py
@@ -20,10 +20,12 @@ from vyos.config import Config
from vyos.configdict import dict_merge
from vyos.configverify import verify_route_map
from vyos.template import render_to_string
-from vyos.utils.process import call
from vyos.utils.dict import dict_search
from vyos.utils.file import write_file
+from vyos.utils.process import call
+from vyos.utils.process import is_systemd_service_active
from vyos.utils.system import sysctl_write
+
from vyos import ConfigError
from vyos import frr
from vyos import airbag
@@ -115,16 +117,20 @@ def apply(opt):
value = '48' if (tmp is None) else tmp
sysctl_write('net.ipv4.tcp_mtu_probe_floor', value)
- zebra_daemon = 'zebra'
- # Save original configuration prior to starting any commit actions
- frr_cfg = frr.FRRConfig()
-
- # The route-map used for the FIB (zebra) is part of the zebra daemon
- frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'ip protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
- if 'frr_zebra_config' in opt:
- frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config'])
- frr_cfg.commit_configuration(zebra_daemon)
+ # During startup of vyos-router that brings up FRR, the service is not yet
+ # running when this script is called first. Skip this part and wait for initial
+ # commit of the configuration to trigger this statement
+ if is_systemd_service_active('frr.service'):
+ zebra_daemon = 'zebra'
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
+
+ # The route-map used for the FIB (zebra) is part of the zebra daemon
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section(r'ip protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
+ if 'frr_zebra_config' in opt:
+ frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config'])
+ frr_cfg.commit_configuration(zebra_daemon)
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/system-ipv6.py b/src/conf_mode/system_ipv6.py
index e40ed38e2..90a1a8087 100755
--- a/src/conf_mode/system-ipv6.py
+++ b/src/conf_mode/system_ipv6.py
@@ -22,8 +22,9 @@ from vyos.configdict import dict_merge
from vyos.configverify import verify_route_map
from vyos.template import render_to_string
from vyos.utils.dict import dict_search
-from vyos.utils.system import sysctl_write
from vyos.utils.file import write_file
+from vyos.utils.process import is_systemd_service_active
+from vyos.utils.system import sysctl_write
from vyos import ConfigError
from vyos import frr
from vyos import airbag
@@ -93,16 +94,20 @@ def apply(opt):
if name == 'accept_dad':
write_file(os.path.join(root, name), value)
- zebra_daemon = 'zebra'
- # Save original configuration prior to starting any commit actions
- frr_cfg = frr.FRRConfig()
+ # During startup of vyos-router that brings up FRR, the service is not yet
+ # running when this script is called first. Skip this part and wait for initial
+ # commit of the configuration to trigger this statement
+ if is_systemd_service_active('frr.service'):
+ zebra_daemon = 'zebra'
+ # Save original configuration prior to starting any commit actions
+ frr_cfg = frr.FRRConfig()
- # The route-map used for the FIB (zebra) is part of the zebra daemon
- frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'ipv6 protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
- if 'frr_zebra_config' in opt:
- frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config'])
- frr_cfg.commit_configuration(zebra_daemon)
+ # The route-map used for the FIB (zebra) is part of the zebra daemon
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section(r'ipv6 protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
+ if 'frr_zebra_config' in opt:
+ frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config'])
+ frr_cfg.commit_configuration(zebra_daemon)
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/system-login.py b/src/conf_mode/system_login.py
index 02c97afaa..30e823bd4 100755
--- a/src/conf_mode/system-login.py
+++ b/src/conf_mode/system_login.py
@@ -20,6 +20,7 @@ from passlib.hosts import linux_context
from psutil import users
from pwd import getpwall
from pwd import getpwnam
+from pwd import getpwuid
from sys import exit
from time import sleep
@@ -29,6 +30,7 @@ from vyos.defaults import directories
from vyos.template import render
from vyos.template import is_ipv4
from vyos.utils.dict import dict_search
+from vyos.utils.file import chown
from vyos.utils.process import cmd
from vyos.utils.process import call
from vyos.utils.process import rc_cmd
@@ -104,6 +106,9 @@ def get_config(config=None):
# prune TACACS global defaults if not set by user
if login.from_defaults(['tacacs']):
del login['tacacs']
+ # same for RADIUS
+ if login.from_defaults(['radius']):
+ del login['radius']
# create a list of all users, cli and users
all_users = list(set(local_users + cli_users))
@@ -303,6 +308,7 @@ def generate(login):
def apply(login):
+ enable_otp = False
if 'user' in login:
for user, user_config in login['user'].items():
# make new user using vyatta shell and make home directory (-m),
@@ -330,13 +336,19 @@ def apply(login):
command += f' --groups frr,frrvty,vyattacfg,sudo,adm,dip,disk {user}'
try:
cmd(command)
-
# we should not rely on the value stored in
# user_config['home_directory'], as a crazy user will choose
# username root or any other system user which will fail.
#
# XXX: Should we deny using root at all?
home_dir = getpwnam(user).pw_dir
+ # T5875: ensure UID is properly set on home directory if user is re-added
+ # the home directory will always exist, as it's created above by --create-home,
+ # retrieve current owner of home directory and adjust it on demand
+ dir_owner = getpwuid(os.stat(home_dir).st_uid).pw_name
+ if dir_owner != user:
+ chown(home_dir, user=user, recursive=True)
+
render(f'{home_dir}/.ssh/authorized_keys', 'login/authorized_keys.j2',
user_config, permission=0o600,
formater=lambda _: _.replace("&quot;", '"'),
@@ -347,6 +359,7 @@ def apply(login):
# Generate 2FA/MFA One-Time-Pad configuration
if dict_search('authentication.otp.key', user_config):
+ enable_otp = True
render(f'{home_dir}/.google_authenticator', 'login/pam_otp_ga.conf.j2',
user_config, permission=0o400, user=user, group='users')
else:
@@ -377,17 +390,28 @@ def apply(login):
except Exception as e:
raise ConfigError(f'Deleting user "{user}" raised exception: {e}')
- # Enable RADIUS in PAM configuration
- pam_cmd = '--remove'
+ # Enable/disable RADIUS in PAM configuration
+ cmd('pam-auth-update --disable radius-mandatory radius-optional')
if 'radius' in login:
- pam_cmd = '--enable'
- cmd(f'pam-auth-update --package {pam_cmd} radius')
-
- # Enable/Disable TACACS in PAM configuration
- pam_cmd = '--remove'
+ if login['radius'].get('security_mode', '') == 'mandatory':
+ pam_profile = 'radius-mandatory'
+ else:
+ pam_profile = 'radius-optional'
+ cmd(f'pam-auth-update --enable {pam_profile}')
+
+ # Enable/disable TACACS+ in PAM configuration
+ cmd('pam-auth-update --disable tacplus-mandatory tacplus-optional')
if 'tacacs' in login:
- pam_cmd = '--enable'
- cmd(f'pam-auth-update --package {pam_cmd} tacplus')
+ if login['tacacs'].get('security_mode', '') == 'mandatory':
+ pam_profile = 'tacplus-mandatory'
+ else:
+ pam_profile = 'tacplus-optional'
+ cmd(f'pam-auth-update --enable {pam_profile}')
+
+ # Enable/disable Google authenticator
+ cmd('pam-auth-update --disable mfa-google-authenticator')
+ if enable_otp:
+ cmd(f'pam-auth-update --enable mfa-google-authenticator')
return None
diff --git a/src/conf_mode/system-login-banner.py b/src/conf_mode/system_login_banner.py
index 65fa04417..65fa04417 100755
--- a/src/conf_mode/system-login-banner.py
+++ b/src/conf_mode/system_login_banner.py
diff --git a/src/conf_mode/system-logs.py b/src/conf_mode/system_logs.py
index 8ad4875d4..8ad4875d4 100755
--- a/src/conf_mode/system-logs.py
+++ b/src/conf_mode/system_logs.py
diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system_option.py
index d92121b3d..d92121b3d 100755
--- a/src/conf_mode/system-option.py
+++ b/src/conf_mode/system_option.py
diff --git a/src/conf_mode/system-proxy.py b/src/conf_mode/system_proxy.py
index 079c43e7e..079c43e7e 100755
--- a/src/conf_mode/system-proxy.py
+++ b/src/conf_mode/system_proxy.py
diff --git a/src/conf_mode/system-syslog.py b/src/conf_mode/system_syslog.py
index 07fbb0734..07fbb0734 100755
--- a/src/conf_mode/system-syslog.py
+++ b/src/conf_mode/system_syslog.py
diff --git a/src/conf_mode/task_scheduler.py b/src/conf_mode/system_task-scheduler.py
index 129be5d3c..129be5d3c 100755
--- a/src/conf_mode/task_scheduler.py
+++ b/src/conf_mode/system_task-scheduler.py
diff --git a/src/conf_mode/system-timezone.py b/src/conf_mode/system_timezone.py
index cd3d4b229..cd3d4b229 100755
--- a/src/conf_mode/system-timezone.py
+++ b/src/conf_mode/system_timezone.py
diff --git a/src/conf_mode/system_update_check.py b/src/conf_mode/system_update-check.py
index 8d641a97d..8d641a97d 100755
--- a/src/conf_mode/system_update_check.py
+++ b/src/conf_mode/system_update-check.py
diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py
index fa271cbdb..adbac0405 100755
--- a/src/conf_mode/vpn_ipsec.py
+++ b/src/conf_mode/vpn_ipsec.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2021-2023 VyOS maintainers and contributors
+# Copyright (C) 2021-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -29,7 +29,10 @@ from vyos.configdict import leaf_node_changed
from vyos.configverify import verify_interface_exists
from vyos.defaults import directories
from vyos.ifconfig import Interface
+from vyos.pki import encode_certificate
from vyos.pki import encode_public_key
+from vyos.pki import find_chain
+from vyos.pki import load_certificate
from vyos.pki import load_private_key
from vyos.pki import wrap_certificate
from vyos.pki import wrap_crl
@@ -40,6 +43,7 @@ from vyos.template import is_ipv4
from vyos.template import is_ipv6
from vyos.template import render
from vyos.utils.network import is_ipv6_link_local
+from vyos.utils.network import interface_exists
from vyos.utils.dict import dict_search
from vyos.utils.dict import dict_search_args
from vyos.utils.process import call
@@ -62,11 +66,11 @@ default_install_routes = 'yes'
vici_socket = '/var/run/charon.vici'
-CERT_PATH = f'{swanctl_dir}/x509/'
+CERT_PATH = f'{swanctl_dir}/x509/'
PUBKEY_PATH = f'{swanctl_dir}/pubkey/'
-KEY_PATH = f'{swanctl_dir}/private/'
-CA_PATH = f'{swanctl_dir}/x509ca/'
-CRL_PATH = f'{swanctl_dir}/x509crl/'
+KEY_PATH = f'{swanctl_dir}/private/'
+CA_PATH = f'{swanctl_dir}/x509ca/'
+CRL_PATH = f'{swanctl_dir}/x509crl/'
DHCP_HOOK_IFLIST = '/tmp/ipsec_dhcp_waiting'
@@ -84,15 +88,13 @@ def get_config(config=None):
ipsec = conf.get_config_dict(base, key_mangling=('-', '_'),
no_tag_node_value_mangle=True,
get_first_key=True,
- with_recursive_defaults=True)
+ with_recursive_defaults=True,
+ with_pki=True)
ipsec['dhcp_no_address'] = {}
ipsec['install_routes'] = 'no' if conf.exists(base + ["options", "disable-route-autoinstall"]) else default_install_routes
ipsec['interface_change'] = leaf_node_changed(conf, base + ['interface'])
ipsec['nhrp_exists'] = conf.exists(['protocols', 'nhrp', 'tunnel'])
- ipsec['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True)
tmp = conf.get_config_dict(l2tp_base, key_mangling=('-', '_'),
no_tag_node_value_mangle=True,
@@ -157,7 +159,7 @@ def verify(ipsec):
if 'id' not in psk_config or 'secret' not in psk_config:
raise ConfigError(f'Authentication psk "{psk}" missing "id" or "secret"')
- if 'interfaces' in ipsec :
+ if 'interface' in ipsec:
for ifname in ipsec['interface']:
verify_interface_exists(ifname)
@@ -393,7 +395,7 @@ def verify(ipsec):
if 'bind' in peer_conf['vti']:
vti_interface = peer_conf['vti']['bind']
- if not os.path.exists(f'/sys/class/net/{vti_interface}'):
+ if not interface_exists(vti_interface):
raise ConfigError(f'VTI interface {vti_interface} for site-to-site peer {peer} does not exist!')
if 'vti' not in peer_conf and 'tunnel' not in peer_conf:
@@ -431,15 +433,23 @@ def generate_pki_files_x509(pki, x509_conf):
ca_cert_name = x509_conf['ca_certificate']
ca_cert_data = dict_search_args(pki, 'ca', ca_cert_name, 'certificate')
ca_cert_crls = dict_search_args(pki, 'ca', ca_cert_name, 'crl') or []
+ ca_index = 1
crl_index = 1
+ ca_cert = load_certificate(ca_cert_data)
+ pki_ca_certs = [load_certificate(ca['certificate']) for ca in pki['ca'].values()]
+
+ ca_cert_chain = find_chain(ca_cert, pki_ca_certs)
+
cert_name = x509_conf['certificate']
cert_data = dict_search_args(pki, 'certificate', cert_name, 'certificate')
key_data = dict_search_args(pki, 'certificate', cert_name, 'private', 'key')
protected = 'passphrase' in x509_conf
- with open(os.path.join(CA_PATH, f'{ca_cert_name}.pem'), 'w') as f:
- f.write(wrap_certificate(ca_cert_data))
+ for ca_cert_obj in ca_cert_chain:
+ with open(os.path.join(CA_PATH, f'{ca_cert_name}_{ca_index}.pem'), 'w') as f:
+ f.write(encode_certificate(ca_cert_obj))
+ ca_index += 1
for crl in ca_cert_crls:
with open(os.path.join(CRL_PATH, f'{ca_cert_name}_{crl_index}.pem'), 'w') as f:
diff --git a/src/conf_mode/vpn_l2tp.py b/src/conf_mode/vpn_l2tp.py
index 6232ce64a..03a27d3cd 100755
--- a/src/conf_mode/vpn_l2tp.py
+++ b/src/conf_mode/vpn_l2tp.py
@@ -15,321 +15,48 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import re
-from copy import deepcopy
-from stat import S_IRUSR, S_IWUSR, S_IRGRP
from sys import exit
-from ipaddress import ip_network
-
from vyos.config import Config
-from vyos.template import is_ipv4
+from vyos.configdict import get_accel_dict
from vyos.template import render
from vyos.utils.process import call
-from vyos.utils.system import get_half_cpus
-from vyos.utils.network import check_port_availability
-from vyos.utils.network import is_listen_port_bind_service
+from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
+from vyos.base import Warning
from vyos import ConfigError
from vyos import airbag
airbag.enable()
+
l2tp_conf = '/run/accel-pppd/l2tp.conf'
l2tp_chap_secrets = '/run/accel-pppd/l2tp.chap-secrets'
-default_config_data = {
- 'auth_mode': 'local',
- 'auth_ppp_mppe': 'prefer',
- 'auth_proto': ['auth_mschap_v2'],
- 'chap_secrets_file': l2tp_chap_secrets, # used in Jinja2 template
- 'client_ip_pool': None,
- 'client_ip_subnets': [],
- 'client_ipv6_pool': [],
- 'client_ipv6_pool_configured': False,
- 'client_ipv6_delegate_prefix': [],
- 'dnsv4': [],
- 'dnsv6': [],
- 'gateway_address': '10.255.255.0',
- 'local_users' : [],
- 'mtu': '1436',
- 'outside_addr': '',
- 'ppp_mppe': 'prefer',
- 'ppp_echo_failure' : '3',
- 'ppp_echo_interval' : '30',
- 'ppp_echo_timeout': '0',
- 'ppp_ipv6_accept_peer_intf_id': False,
- 'ppp_ipv6_intf_id': None,
- 'ppp_ipv6_peer_intf_id': None,
- 'radius_server': [],
- 'radius_acct_inter_jitter': '',
- 'radius_acct_interim_interval': None,
- 'radius_acct_tmo': '3',
- 'radius_max_try': '3',
- 'radius_timeout': '3',
- 'radius_nas_id': '',
- 'radius_nas_ip': '',
- 'radius_source_address': '',
- 'radius_shaper_attr': '',
- 'radius_shaper_vendor': '',
- 'radius_dynamic_author': {},
- 'wins': [],
- 'ip6_column': [],
- 'thread_cnt': get_half_cpus()
-}
-
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base_path = ['vpn', 'l2tp', 'remote-access']
- if not conf.exists(base_path):
+ base = ['vpn', 'l2tp', 'remote-access']
+ if not conf.exists(base):
return None
- conf.set_level(base_path)
- l2tp = deepcopy(default_config_data)
-
- ### general options ###
- if conf.exists(['name-server']):
- for name_server in conf.return_values(['name-server']):
- if is_ipv4(name_server):
- l2tp['dnsv4'].append(name_server)
- else:
- l2tp['dnsv6'].append(name_server)
-
- if conf.exists(['wins-server']):
- l2tp['wins'] = conf.return_values(['wins-server'])
-
- if conf.exists('outside-address'):
- l2tp['outside_addr'] = conf.return_value('outside-address')
-
- if conf.exists(['authentication', 'mode']):
- l2tp['auth_mode'] = conf.return_value(['authentication', 'mode'])
-
- if conf.exists(['authentication', 'require']):
- l2tp['auth_proto'] = []
- auth_mods = {
- 'pap': 'auth_pap',
- 'chap': 'auth_chap_md5',
- 'mschap': 'auth_mschap_v1',
- 'mschap-v2': 'auth_mschap_v2'
- }
-
- for proto in conf.return_values(['authentication', 'require']):
- l2tp['auth_proto'].append(auth_mods[proto])
-
- if conf.exists(['authentication', 'mppe']):
- l2tp['auth_ppp_mppe'] = conf.return_value(['authentication', 'mppe'])
-
- #
- # local auth
- if conf.exists(['authentication', 'local-users']):
- for username in conf.list_nodes(['authentication', 'local-users', 'username']):
- user = {
- 'name' : username,
- 'password' : '',
- 'state' : 'enabled',
- 'ip' : '*',
- 'upload' : None,
- 'download' : None
- }
-
- conf.set_level(base_path + ['authentication', 'local-users', 'username', username])
-
- if conf.exists(['password']):
- user['password'] = conf.return_value(['password'])
-
- if conf.exists(['disable']):
- user['state'] = 'disable'
-
- if conf.exists(['static-ip']):
- user['ip'] = conf.return_value(['static-ip'])
-
- if conf.exists(['rate-limit', 'download']):
- user['download'] = conf.return_value(['rate-limit', 'download'])
-
- if conf.exists(['rate-limit', 'upload']):
- user['upload'] = conf.return_value(['rate-limit', 'upload'])
-
- l2tp['local_users'].append(user)
-
- #
- # RADIUS auth and settings
- conf.set_level(base_path + ['authentication', 'radius'])
- if conf.exists(['server']):
- for server in conf.list_nodes(['server']):
- radius = {
- 'server' : server,
- 'key' : '',
- 'fail_time' : 0,
- 'port' : '1812',
- 'acct_port' : '1813'
- }
-
- conf.set_level(base_path + ['authentication', 'radius', 'server', server])
-
- if conf.exists(['disable-accounting']):
- radius['acct_port'] = '0'
-
- if conf.exists(['fail-time']):
- radius['fail_time'] = conf.return_value(['fail-time'])
-
- if conf.exists(['port']):
- radius['port'] = conf.return_value(['port'])
-
- if conf.exists(['acct-port']):
- radius['acct_port'] = conf.return_value(['acct-port'])
-
- if conf.exists(['key']):
- radius['key'] = conf.return_value(['key'])
-
- if not conf.exists(['disable']):
- l2tp['radius_server'].append(radius)
-
- #
- # advanced radius-setting
- conf.set_level(base_path + ['authentication', 'radius'])
-
- if conf.exists(['accounting-interim-interval']):
- l2tp['radius_acct_interim_interval'] = conf.return_value(['accounting-interim-interval'])
-
- if conf.exists(['acct-interim-jitter']):
- l2tp['radius_acct_inter_jitter'] = conf.return_value(['acct-interim-jitter'])
-
- if conf.exists(['acct-timeout']):
- l2tp['radius_acct_tmo'] = conf.return_value(['acct-timeout'])
-
- if conf.exists(['max-try']):
- l2tp['radius_max_try'] = conf.return_value(['max-try'])
-
- if conf.exists(['timeout']):
- l2tp['radius_timeout'] = conf.return_value(['timeout'])
-
- if conf.exists(['nas-identifier']):
- l2tp['radius_nas_id'] = conf.return_value(['nas-identifier'])
-
- if conf.exists(['nas-ip-address']):
- l2tp['radius_nas_ip'] = conf.return_value(['nas-ip-address'])
-
- if conf.exists(['source-address']):
- l2tp['radius_source_address'] = conf.return_value(['source-address'])
-
- # Dynamic Authorization Extensions (DOA)/Change Of Authentication (COA)
- if conf.exists(['dae-server']):
- dae = {
- 'port' : '',
- 'server' : '',
- 'key' : ''
- }
-
- if conf.exists(['dae-server', 'ip-address']):
- dae['server'] = conf.return_value(['dae-server', 'ip-address'])
-
- if conf.exists(['dae-server', 'port']):
- dae['port'] = conf.return_value(['dae-server', 'port'])
-
- if conf.exists(['dae-server', 'secret']):
- dae['key'] = conf.return_value(['dae-server', 'secret'])
-
- l2tp['radius_dynamic_author'] = dae
-
- if conf.exists(['rate-limit', 'enable']):
- l2tp['radius_shaper_attr'] = 'Filter-Id'
- c_attr = ['rate-limit', 'enable', 'attribute']
- if conf.exists(c_attr):
- l2tp['radius_shaper_attr'] = conf.return_value(c_attr)
-
- c_vendor = ['rate-limit', 'enable', 'vendor']
- if conf.exists(c_vendor):
- l2tp['radius_shaper_vendor'] = conf.return_value(c_vendor)
-
- conf.set_level(base_path)
- if conf.exists(['client-ip-pool']):
- if conf.exists(['client-ip-pool', 'start']) and conf.exists(['client-ip-pool', 'stop']):
- start = conf.return_value(['client-ip-pool', 'start'])
- stop = conf.return_value(['client-ip-pool', 'stop'])
- l2tp['client_ip_pool'] = start + '-' + re.search('[0-9]+$', stop).group(0)
-
- if conf.exists(['client-ip-pool', 'subnet']):
- l2tp['client_ip_subnets'] = conf.return_values(['client-ip-pool', 'subnet'])
-
- if conf.exists(['client-ipv6-pool', 'prefix']):
- l2tp['client_ipv6_pool_configured'] = True
- l2tp['ip6_column'].append('ip6')
- for prefix in conf.list_nodes(['client-ipv6-pool', 'prefix']):
- tmp = {
- 'prefix': prefix,
- 'mask': '64'
- }
-
- if conf.exists(['client-ipv6-pool', 'prefix', prefix, 'mask']):
- tmp['mask'] = conf.return_value(['client-ipv6-pool', 'prefix', prefix, 'mask'])
-
- l2tp['client_ipv6_pool'].append(tmp)
-
- if conf.exists(['client-ipv6-pool', 'delegate']):
+ # retrieve common dictionary keys
+ l2tp = get_accel_dict(conf, base, l2tp_chap_secrets)
+ if dict_search('client_ip_pool', l2tp):
+ # Multiple named pools require ordered values T5099
+ l2tp['ordered_named_pools'] = get_pools_in_order(
+ dict_search('client_ip_pool', l2tp))
+ l2tp['ip6_column'] = []
+ if dict_search('client_ipv6_pool.prefix', l2tp):
+ l2tp['ip6_column'].append('ipv6')
+ if dict_search('client_ipv6_pool.delegate', l2tp):
l2tp['ip6_column'].append('ip6-db')
- for prefix in conf.list_nodes(['client-ipv6-pool', 'delegate']):
- tmp = {
- 'prefix': prefix,
- 'mask': ''
- }
-
- if conf.exists(['client-ipv6-pool', 'delegate', prefix, 'delegation-prefix']):
- tmp['mask'] = conf.return_value(['client-ipv6-pool', 'delegate', prefix, 'delegation-prefix'])
-
- l2tp['client_ipv6_delegate_prefix'].append(tmp)
-
- if conf.exists(['mtu']):
- l2tp['mtu'] = conf.return_value(['mtu'])
-
- # gateway address
- if conf.exists(['gateway-address']):
- l2tp['gateway_address'] = conf.return_value(['gateway-address'])
- else:
- # calculate gw-ip-address
- if conf.exists(['client-ip-pool', 'start']):
- # use start ip as gw-ip-address
- l2tp['gateway_address'] = conf.return_value(['client-ip-pool', 'start'])
-
- elif conf.exists(['client-ip-pool', 'subnet']):
- # use first ip address from first defined pool
- subnet = conf.return_values(['client-ip-pool', 'subnet'])[0]
- subnet = ip_network(subnet)
- l2tp['gateway_address'] = str(list(subnet.hosts())[0])
-
- # LNS secret
- if conf.exists(['lns', 'shared-secret']):
- l2tp['lns_shared_secret'] = conf.return_value(['lns', 'shared-secret'])
- if conf.exists(['lns', 'host-name']):
- l2tp['lns_host_name'] = conf.return_value(['lns', 'host-name'])
-
- if conf.exists(['ccp-disable']):
- l2tp['ccp_disable'] = True
-
- # PPP options
- if conf.exists(['idle']):
- l2tp['ppp_echo_timeout'] = conf.return_value(['idle'])
-
- if conf.exists(['ppp-options', 'lcp-echo-failure']):
- l2tp['ppp_echo_failure'] = conf.return_value(['ppp-options', 'lcp-echo-failure'])
-
- if conf.exists(['ppp-options', 'lcp-echo-interval']):
- l2tp['ppp_echo_interval'] = conf.return_value(['ppp-options', 'lcp-echo-interval'])
-
- if conf.exists(['ppp-options', 'ipv6']):
- l2tp['ppp_ipv6'] = conf.return_value(['ppp-options', 'ipv6'])
-
- if conf.exists(['ppp-options', 'ipv6-accept-peer-intf-id']):
- l2tp['ppp_ipv6_accept_peer_intf_id'] = True
-
- if conf.exists(['ppp-options', 'ipv6-intf-id']):
- l2tp['ppp_ipv6_intf_id'] = conf.return_value(['ppp-options', 'ipv6-intf-id'])
-
- if conf.exists(['ppp-options', 'ipv6-peer-intf-id']):
- l2tp['ppp_ipv6_peer_intf_id'] = conf.return_value(['ppp-options', 'ipv6-peer-intf-id'])
-
+ l2tp['server_type'] = 'l2tp'
return l2tp
@@ -337,56 +64,24 @@ def verify(l2tp):
if not l2tp:
return None
- if l2tp['auth_mode'] == 'local':
- if not l2tp['local_users']:
- raise ConfigError('L2TP local auth mode requires local users to be configured!')
-
- for user in l2tp['local_users']:
- if not user['password']:
- raise ConfigError(f"Password required for user {user['name']}")
+ verify_accel_ppp_base_service(l2tp)
- elif l2tp['auth_mode'] == 'radius':
- if len(l2tp['radius_server']) == 0:
- raise ConfigError("RADIUS authentication requires at least one server")
+ if dict_search('authentication.radius.dynamic_author.server', l2tp):
+ if not dict_search('authentication.radius.dynamic_author.key', l2tp):
+ raise ConfigError('DA/CoE server key required!')
- for radius in l2tp['radius_server']:
- if not radius['key']:
- raise ConfigError(f"Missing RADIUS secret for server { radius['key'] }")
+ if dict_search('authentication.mode', l2tp) in ['local', 'noauth']:
+ if not l2tp['client_ip_pool'] and not l2tp['client_ipv6_pool']:
+ raise ConfigError(
+ "L2TP local auth mode requires local client-ip-pool or client-ipv6-pool to be configured!")
+ if dict_search('client_ip_pool', l2tp) and not dict_search('default_pool', l2tp):
+ Warning("'default-pool' is not defined")
- if l2tp['radius_dynamic_author']:
- if not l2tp['radius_dynamic_author']['server']:
- raise ConfigError("Missing ip-address for dae-server")
- if not l2tp['radius_dynamic_author']['key']:
- raise ConfigError("Missing secret for dae-server")
- address = l2tp['radius_dynamic_author']['server']
- port = l2tp['radius_dynamic_author']['port']
- proto = 'tcp'
- # check if dae listen port is not used by another service
- if check_port_availability(address, int(port), proto) is not True and \
- not is_listen_port_bind_service(int(port), 'accel-pppd'):
- raise ConfigError(f'"{proto}" port "{port}" is used by another service')
+ verify_accel_ppp_ip_pool(l2tp)
- # check for the existence of a client ip pool
- if not (l2tp['client_ip_pool'] or l2tp['client_ip_subnets']):
+ if 'wins_server' in l2tp and len(l2tp['wins_server']) > 2:
raise ConfigError(
- "set vpn l2tp remote-access client-ip-pool requires subnet or start/stop IP pool")
-
- # check ipv6
- if l2tp['client_ipv6_delegate_prefix'] and not l2tp['client_ipv6_pool']:
- raise ConfigError('IPv6 prefix delegation requires client-ipv6-pool prefix')
-
- for prefix in l2tp['client_ipv6_delegate_prefix']:
- if not prefix['mask']:
- raise ConfigError('Delegation-prefix required for individual delegated networks')
-
- if len(l2tp['wins']) > 2:
- raise ConfigError('Not more then two IPv4 WINS name-servers can be configured')
-
- if len(l2tp['dnsv4']) > 2:
- raise ConfigError('Not more then two IPv4 DNS name-servers can be configured')
-
- if len(l2tp['dnsv6']) > 3:
- raise ConfigError('Not more then three IPv6 DNS name-servers can be configured')
+ 'Not more then two WINS name-servers can be configured')
return None
@@ -397,13 +92,9 @@ def generate(l2tp):
render(l2tp_conf, 'accel-ppp/l2tp.config.j2', l2tp)
- if l2tp['auth_mode'] == 'local':
- render(l2tp_chap_secrets, 'accel-ppp/chap-secrets.j2', l2tp)
- os.chmod(l2tp_chap_secrets, S_IRUSR | S_IWUSR | S_IRGRP)
-
- else:
- if os.path.exists(l2tp_chap_secrets):
- os.unlink(l2tp_chap_secrets)
+ if dict_search('authentication.mode', l2tp) == 'local':
+ render(l2tp_chap_secrets, 'accel-ppp/chap-secrets.config_dict.j2',
+ l2tp, permission=0o640)
return None
@@ -419,12 +110,14 @@ def apply(l2tp):
call('systemctl restart accel-ppp@l2tp.service')
+
if __name__ == '__main__':
try:
c = get_config()
verify(c)
generate(c)
apply(c)
+
except ConfigError as e:
print(e)
exit(1)
diff --git a/src/conf_mode/vpn_openconnect.py b/src/conf_mode/vpn_openconnect.py
index a039172c4..421ac6997 100755
--- a/src/conf_mode/vpn_openconnect.py
+++ b/src/conf_mode/vpn_openconnect.py
@@ -56,12 +56,8 @@ def get_config(config=None):
ocserv = conf.get_config_dict(base, key_mangling=('-', '_'),
get_first_key=True,
- with_recursive_defaults=True)
-
- if ocserv:
- ocserv['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True)
+ with_recursive_defaults=True,
+ with_pki=True)
return ocserv
diff --git a/src/conf_mode/vpn_pptp.py b/src/conf_mode/vpn_pptp.py
index d542f57fe..f769be39f 100755
--- a/src/conf_mode/vpn_pptp.py
+++ b/src/conf_mode/vpn_pptp.py
@@ -15,17 +15,17 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import re
-
-from copy import deepcopy
-from stat import S_IRUSR, S_IWUSR, S_IRGRP
from sys import exit
+
from vyos.config import Config
from vyos.template import render
-from vyos.utils.system import get_half_cpus
from vyos.utils.process import call
+from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
from vyos import ConfigError
+from vyos.configdict import get_accel_dict
from vyos import airbag
airbag.enable()
@@ -33,241 +33,70 @@ airbag.enable()
pptp_conf = '/run/accel-pppd/pptp.conf'
pptp_chap_secrets = '/run/accel-pppd/pptp.chap-secrets'
-default_pptp = {
- 'auth_mode' : 'local',
- 'local_users' : [],
- 'radius_server' : [],
- 'radius_acct_inter_jitter': '',
- 'radius_acct_interim_interval': None,
- 'radius_acct_tmo' : '30',
- 'radius_max_try' : '3',
- 'radius_timeout' : '30',
- 'radius_nas_id' : '',
- 'radius_nas_ip' : '',
- 'radius_source_address' : '',
- 'radius_shaper_attr' : '',
- 'radius_shaper_enable': False,
- 'radius_shaper_multiplier': '',
- 'radius_shaper_vendor': '',
- 'radius_dynamic_author' : '',
- 'chap_secrets_file': pptp_chap_secrets, # used in Jinja2 template
- 'outside_addr': '',
- 'dnsv4': [],
- 'wins': [],
- 'client_ip_pool': '',
- 'mtu': '1436',
- 'auth_proto' : ['auth_mschap_v2'],
- 'ppp_mppe' : 'prefer',
- 'thread_cnt': get_half_cpus()
-}
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base_path = ['vpn', 'pptp', 'remote-access']
- if not conf.exists(base_path):
+ base = ['vpn', 'pptp', 'remote-access']
+ if not conf.exists(base):
return None
- pptp = deepcopy(default_pptp)
- conf.set_level(base_path)
-
- if conf.exists(['name-server']):
- pptp['dnsv4'] = conf.return_values(['name-server'])
-
- if conf.exists(['wins-server']):
- pptp['wins'] = conf.return_values(['wins-server'])
-
- if conf.exists(['outside-address']):
- pptp['outside_addr'] = conf.return_value(['outside-address'])
-
- if conf.exists(['authentication', 'mode']):
- pptp['auth_mode'] = conf.return_value(['authentication', 'mode'])
-
- #
- # local auth
- if conf.exists(['authentication', 'local-users']):
- for username in conf.list_nodes(['authentication', 'local-users', 'username']):
- user = {
- 'name': username,
- 'password' : '',
- 'state' : 'enabled',
- 'ip' : '*',
- }
-
- conf.set_level(base_path + ['authentication', 'local-users', 'username', username])
-
- if conf.exists(['password']):
- user['password'] = conf.return_value(['password'])
-
- if conf.exists(['disable']):
- user['state'] = 'disable'
-
- if conf.exists(['static-ip']):
- user['ip'] = conf.return_value(['static-ip'])
-
- if not conf.exists(['disable']):
- pptp['local_users'].append(user)
-
- #
- # RADIUS auth and settings
- conf.set_level(base_path + ['authentication', 'radius'])
- if conf.exists(['server']):
- for server in conf.list_nodes(['server']):
- radius = {
- 'server' : server,
- 'key' : '',
- 'fail_time' : 0,
- 'port' : '1812',
- 'acct_port' : '1813'
- }
-
- conf.set_level(base_path + ['authentication', 'radius', 'server', server])
-
- if conf.exists(['disable-accounting']):
- radius['acct_port'] = '0'
-
- if conf.exists(['fail-time']):
- radius['fail_time'] = conf.return_value(['fail-time'])
-
- if conf.exists(['port']):
- radius['port'] = conf.return_value(['port'])
-
- if conf.exists(['acct-port']):
- radius['acct_port'] = conf.return_value(['acct-port'])
-
- if conf.exists(['key']):
- radius['key'] = conf.return_value(['key'])
-
- if not conf.exists(['disable']):
- pptp['radius_server'].append(radius)
-
- #
- # advanced radius-setting
- conf.set_level(base_path + ['authentication', 'radius'])
-
- if conf.exists(['accounting-interim-interval']):
- pptp['radius_acct_interim_interval'] = conf.return_value(['accounting-interim-interval'])
-
- if conf.exists(['acct-interim-jitter']):
- pptp['radius_acct_inter_jitter'] = conf.return_value(['acct-interim-jitter'])
-
- if conf.exists(['acct-timeout']):
- pptp['radius_acct_tmo'] = conf.return_value(['acct-timeout'])
-
- if conf.exists(['max-try']):
- pptp['radius_max_try'] = conf.return_value(['max-try'])
-
- if conf.exists(['timeout']):
- pptp['radius_timeout'] = conf.return_value(['timeout'])
-
- if conf.exists(['nas-identifier']):
- pptp['radius_nas_id'] = conf.return_value(['nas-identifier'])
-
- if conf.exists(['nas-ip-address']):
- pptp['radius_nas_ip'] = conf.return_value(['nas-ip-address'])
-
- if conf.exists(['source-address']):
- pptp['radius_source_address'] = conf.return_value(['source-address'])
-
- # Dynamic Authorization Extensions (DOA)/Change Of Authentication (COA)
- if conf.exists(['dae-server']):
- dae = {
- 'port' : '',
- 'server' : '',
- 'key' : ''
- }
-
- if conf.exists(['dynamic-author', 'ip-address']):
- dae['server'] = conf.return_value(['dynamic-author', 'ip-address'])
-
- if conf.exists(['dynamic-author', 'port']):
- dae['port'] = conf.return_value(['dynamic-author', 'port'])
-
- if conf.exists(['dynamic-author', 'key']):
- dae['key'] = conf.return_value(['dynamic-author', 'key'])
-
- pptp['radius_dynamic_author'] = dae
-
- # Rate limit
- if conf.exists(['rate-limit', 'attribute']):
- pptp['radius_shaper_attr'] = conf.return_value(['rate-limit', 'attribute'])
-
- if conf.exists(['rate-limit', 'enable']):
- pptp['radius_shaper_enable'] = True
-
- if conf.exists(['rate-limit', 'multiplier']):
- pptp['radius_shaper_multiplier'] = conf.return_value(['rate-limit', 'multiplier'])
-
- if conf.exists(['rate-limit', 'vendor']):
- pptp['radius_shaper_vendor'] = conf.return_value(['rate-limit', 'vendor'])
-
- conf.set_level(base_path)
- if conf.exists(['client-ip-pool']):
- if conf.exists(['client-ip-pool', 'start']) and conf.exists(['client-ip-pool', 'stop']):
- start = conf.return_value(['client-ip-pool', 'start'])
- stop = conf.return_value(['client-ip-pool', 'stop'])
- pptp['client_ip_pool'] = start + '-' + re.search('[0-9]+$', stop).group(0)
-
- if conf.exists(['mtu']):
- pptp['mtu'] = conf.return_value(['mtu'])
-
- # gateway address
- if conf.exists(['gateway-address']):
- pptp['gw_ip'] = conf.return_value(['gateway-address'])
- else:
- # calculate gw-ip-address
- if conf.exists(['client-ip-pool', 'start']):
- # use start ip as gw-ip-address
- pptp['gateway_address'] = conf.return_value(['client-ip-pool', 'start'])
-
- if conf.exists(['authentication', 'require']):
- # clear default list content, now populate with actual CLI values
- pptp['auth_proto'] = []
- auth_mods = {
- 'pap': 'auth_pap',
- 'chap': 'auth_chap_md5',
- 'mschap': 'auth_mschap_v1',
- 'mschap-v2': 'auth_mschap_v2'
- }
-
- for proto in conf.return_values(['authentication', 'require']):
- pptp['auth_proto'].append(auth_mods[proto])
-
- if conf.exists(['authentication', 'mppe']):
- pptp['ppp_mppe'] = conf.return_value(['authentication', 'mppe'])
+ # retrieve common dictionary keys
+ pptp = get_accel_dict(conf, base, pptp_chap_secrets)
+ if dict_search('client_ip_pool', pptp):
+ # Multiple named pools require ordered values T5099
+ pptp['ordered_named_pools'] = get_pools_in_order(
+ dict_search('client_ip_pool', pptp))
+ pptp['chap_secrets_file'] = pptp_chap_secrets
+ pptp['server_type'] = 'pptp'
return pptp
def verify(pptp):
if not pptp:
return None
-
- if pptp['auth_mode'] == 'local':
- if not pptp['local_users']:
- raise ConfigError('PPTP local auth mode requires local users to be configured!')
-
- for user in pptp['local_users']:
- username = user['name']
- if not user['password']:
- raise ConfigError(f'Password required for local user "{username}"')
-
- elif pptp['auth_mode'] == 'radius':
- if len(pptp['radius_server']) == 0:
- raise ConfigError('RADIUS authentication requires at least one server')
-
- for radius in pptp['radius_server']:
- if not radius['key']:
- server = radius['server']
- raise ConfigError(f'Missing RADIUS secret key for server "{ server }"')
-
- if len(pptp['dnsv4']) > 2:
- raise ConfigError('Not more then two IPv4 DNS name-servers can be configured')
-
- if len(pptp['wins']) > 2:
- raise ConfigError('Not more then two IPv4 WINS name-servers can be configured')
+ auth_mode = dict_search('authentication.mode', pptp)
+ if auth_mode == 'local':
+ if not dict_search('authentication.local_users', pptp):
+ raise ConfigError(
+ 'PPTP local auth mode requires local users to be configured!')
+
+ for user in dict_search('authentication.local_users.username', pptp):
+ user_config = pptp['authentication']['local_users']['username'][
+ user]
+ if 'password' not in user_config:
+ raise ConfigError(f'Password required for local user "{user}"')
+
+ elif auth_mode == 'radius':
+ if not dict_search('authentication.radius.server', pptp):
+ raise ConfigError(
+ 'RADIUS authentication requires at least one server')
+ for server in dict_search('authentication.radius.server', pptp):
+ radius_config = pptp['authentication']['radius']['server'][server]
+ if 'key' not in radius_config:
+ raise ConfigError(
+ f'Missing RADIUS secret key for server "{server}"')
+
+ if auth_mode == 'local' or auth_mode == 'noauth':
+ if not dict_search('client_ip_pool', pptp):
+ raise ConfigError(
+ 'PPTP local auth mode requires local client-ip-pool '
+ 'to be configured!')
+
+ verify_accel_ppp_ip_pool(pptp)
+
+ if 'name_server' in pptp:
+ if len(pptp['name_server']) > 2:
+ raise ConfigError(
+ 'Not more then two IPv4 DNS name-servers can be configured'
+ )
+
+ if 'wins_server' in pptp and len(pptp['wins_server']) > 2:
+ raise ConfigError(
+ 'Not more then two WINS name-servers can be configured')
def generate(pptp):
@@ -276,13 +105,11 @@ def generate(pptp):
render(pptp_conf, 'accel-ppp/pptp.config.j2', pptp)
- if pptp['local_users']:
- render(pptp_chap_secrets, 'accel-ppp/chap-secrets.j2', pptp)
- os.chmod(pptp_chap_secrets, S_IRUSR | S_IWUSR | S_IRGRP)
- else:
- if os.path.exists(pptp_chap_secrets):
- os.unlink(pptp_chap_secrets)
+ if dict_search('authentication.mode', pptp) == 'local':
+ render(pptp_chap_secrets, 'accel-ppp/chap-secrets.config_dict.j2',
+ pptp, permission=0o640)
+ return None
def apply(pptp):
if not pptp:
@@ -295,6 +122,7 @@ def apply(pptp):
call('systemctl restart accel-ppp@pptp.service')
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/conf_mode/vpn_sstp.py b/src/conf_mode/vpn_sstp.py
index e98d8385b..6bf9307e1 100755
--- a/src/conf_mode/vpn_sstp.py
+++ b/src/conf_mode/vpn_sstp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2022 VyOS maintainers and contributors
+# Copyright (C) 2018-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -21,13 +21,15 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
from vyos.configdict import dict_merge
-from vyos.configverify import verify_accel_ppp_base_service
from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.utils.process import call
from vyos.utils.network import check_port_availability
from vyos.utils.dict import dict_search
+from vyos.accel_ppp_util import verify_accel_ppp_base_service
+from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
+from vyos.accel_ppp_util import get_pools_in_order
from vyos.utils.network import is_listen_port_bind_service
from vyos.utils.file import write_file
from vyos import ConfigError
@@ -52,14 +54,15 @@ def get_config(config=None):
return None
# retrieve common dictionary keys
- sstp = get_accel_dict(conf, base, sstp_chap_secrets)
- if sstp:
- sstp['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
- get_first_key=True,
- no_tag_node_value_mangle=True)
+ sstp = get_accel_dict(conf, base, sstp_chap_secrets, with_pki=True)
+ if dict_search('client_ip_pool', sstp):
+ # Multiple named pools require ordered values T5099
+ sstp['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', sstp))
+ sstp['server_type'] = 'sstp'
return sstp
+
def verify(sstp):
if not sstp:
return None
@@ -75,6 +78,7 @@ def verify(sstp):
if 'client_ip_pool' not in sstp and 'client_ipv6_pool' not in sstp:
raise ConfigError('Client IP subnet required')
+ verify_accel_ppp_ip_pool(sstp)
#
# SSL certificate checks
#
diff --git a/src/conf_mode/vpp.py b/src/conf_mode/vpp.py
deleted file mode 100755
index 82c2f236e..000000000
--- a/src/conf_mode/vpp.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2023 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from psutil import virtual_memory
-
-from pathlib import Path
-from re import search as re_search, MULTILINE as re_M
-
-from vyos.config import Config
-from vyos.configdep import set_dependents, call_dependents
-from vyos.configdict import node_changed
-from vyos.ifconfig import Section
-from vyos.utils.boot import boot_configuration_complete
-from vyos.utils.process import call
-from vyos.utils.process import rc_cmd
-from vyos.utils.system import sysctl_read
-from vyos.utils.system import sysctl_apply
-from vyos.template import render
-
-from vyos import ConfigError
-from vyos import airbag
-from vyos.vpp import VPPControl
-from vyos.vpp import HostControl
-
-airbag.enable()
-
-service_name = 'vpp'
-service_conf = Path(f'/run/vpp/{service_name}.conf')
-systemd_override = '/run/systemd/system/vpp.service.d/10-override.conf'
-
-# Free memory required for VPP
-# 2 GB for hugepages + 1 GB for other services
-MIN_AVAILABLE_MEMORY: int = 3 * 1024**3
-
-
-def _get_pci_address_by_interface(iface) -> str:
- rc, out = rc_cmd(f'ethtool -i {iface}')
- # if ethtool command was successful
- if rc == 0 and out:
- regex_filter = r'^bus-info: (?P<address>\w+:\w+:\w+\.\w+)$'
- re_obj = re_search(regex_filter, out, re_M)
- # if bus-info with PCI address found
- if re_obj:
- address = re_obj.groupdict().get('address', '')
- return address
- # use VPP - maybe interface already attached to it
- vpp_control = VPPControl(attempts=20, interval=500)
- pci_addr = vpp_control.get_pci_addr(iface)
- if pci_addr:
- return pci_addr
- # raise error if PCI address was not found
- raise ConfigError(f'Cannot find PCI address for interface {iface}')
-
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['vpp']
- base_ethernet = ['interfaces', 'ethernet']
-
- # find interfaces removed from VPP
- removed_ifaces = []
- tmp = node_changed(conf, base + ['interface'])
- if tmp:
- for removed_iface in tmp:
- pci_address: str = _get_pci_address_by_interface(removed_iface)
- removed_ifaces.append({
- 'iface_name': removed_iface,
- 'iface_pci_addr': pci_address
- })
- # add an interface to a list of interfaces that need
- # to be reinitialized after the commit
- set_dependents('ethernet', conf, removed_iface)
-
- if not conf.exists(base):
- return {'removed_ifaces': removed_ifaces}
-
- config = conf.get_config_dict(base, key_mangling=('-', '_'),
- no_tag_node_value_mangle=True,
- get_first_key=True,
- with_recursive_defaults=True)
-
- if 'interface' in config:
- for iface, iface_config in config['interface'].items():
- # add an interface to a list of interfaces that need
- # to be reinitialized after the commit
- set_dependents('ethernet', conf, iface)
-
- # Get PCI address auto
- if iface_config['pci'] == 'auto':
- config['interface'][iface]['pci'] = _get_pci_address_by_interface(iface)
-
- config['other_interfaces'] = conf.get_config_dict(base_ethernet, key_mangling=('-', '_'),
- get_first_key=True, no_tag_node_value_mangle=True)
-
- if removed_ifaces:
- config['removed_ifaces'] = removed_ifaces
-
- return config
-
-
-def verify(config):
- # bail out early - looks like removal from running config
- if not config or (len(config) == 1 and 'removed_ifaces' in config):
- return None
-
- if 'interface' not in config:
- raise ConfigError('"interface" is required but not set!')
-
- if 'cpu' in config:
- if 'corelist_workers' in config['cpu'] and 'main_core' not in config[
- 'cpu']:
- raise ConfigError('"cpu main-core" is required but not set!')
-
- memory_available: int = virtual_memory().available
- if memory_available < MIN_AVAILABLE_MEMORY:
- raise ConfigError(
- 'Not enough free memory to start VPP:\n'
- f'available: {round(memory_available / 1024**3, 1)}GB\n'
- f'required: {round(MIN_AVAILABLE_MEMORY / 1024**3, 1)}GB')
-
-
-def generate(config):
- if not config or (len(config) == 1 and 'removed_ifaces' in config):
- # Remove old config and return
- service_conf.unlink(missing_ok=True)
- return None
-
- render(service_conf, 'vpp/startup.conf.j2', config)
- render(systemd_override, 'vpp/override.conf.j2', config)
-
- # apply default sysctl values from
- # https://github.com/FDio/vpp/blob/v23.06/src/vpp/conf/80-vpp.conf
- sysctl_config: dict[str, str] = {
- 'vm.nr_hugepages': '1024',
- 'vm.max_map_count': '3096',
- 'vm.hugetlb_shm_group': '0',
- 'kernel.shmmax': '2147483648'
- }
- # we do not want to reduce `kernel.shmmax`
- kernel_shmnax_current: str = sysctl_read('kernel.shmmax')
- if int(kernel_shmnax_current) > int(sysctl_config['kernel.shmmax']):
- sysctl_config['kernel.shmmax'] = kernel_shmnax_current
-
- if not sysctl_apply(sysctl_config):
- raise ConfigError('Cannot configure sysctl parameters for VPP')
-
- return None
-
-
-def apply(config):
- if not config or (len(config) == 1 and 'removed_ifaces' in config):
- call(f'systemctl stop {service_name}.service')
- else:
- call('systemctl daemon-reload')
- call(f'systemctl restart {service_name}.service')
-
- # Initialize interfaces removed from VPP
- for iface in config.get('removed_ifaces', []):
- host_control = HostControl()
- # rescan PCI to use a proper driver
- host_control.pci_rescan(iface['iface_pci_addr'])
- # rename to the proper name
- iface_new_name: str = host_control.get_eth_name(iface['iface_pci_addr'])
- host_control.rename_iface(iface_new_name, iface['iface_name'])
-
- if 'interface' in config:
- # connect to VPP
- # must be performed multiple attempts because API is not available
- # immediately after the service restart
- vpp_control = VPPControl(attempts=20, interval=500)
- for iface, _ in config['interface'].items():
- # Create lcp
- if iface not in Section.interfaces():
- vpp_control.lcp_pair_add(iface, iface)
-
- # reinitialize interfaces, but not during the first boot
- if boot_configuration_complete():
- call_dependents()
-
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py
index 37625142c..9b1b6355f 100755
--- a/src/conf_mode/vrf.py
+++ b/src/conf_mode/vrf.py
@@ -214,6 +214,18 @@ def apply(vrf):
# Delete the VRF Kernel interface
call(f'ip link delete dev {tmp}')
+ # Enable/Disable VRF strict mode
+ # When net.vrf.strict_mode=0 (default) it is possible to associate multiple
+ # VRF devices to the same table. Conversely, when net.vrf.strict_mode=1 a
+ # table can be associated to a single VRF device.
+ #
+ # A VRF table can be used by the VyOS CLI only once (ensured by verify()),
+ # this simply adds an additional Kernel safety net
+ strict_mode = '0'
+ # Set to 1 if any VRF is defined
+ if 'name' in vrf: strict_mode = '1'
+ sysctl_write('net.vrf.strict_mode', strict_mode)
+
if 'name' in vrf:
# Separate VRFs in conntrack table
# check if table already exists