summaryrefslogtreecommitdiff
path: root/src/conf_mode
diff options
context:
space:
mode:
Diffstat (limited to 'src/conf_mode')
-rwxr-xr-xsrc/conf_mode/config_mgmt.py96
-rwxr-xr-xsrc/conf_mode/container.py194
-rwxr-xr-xsrc/conf_mode/firewall.py88
-rwxr-xr-xsrc/conf_mode/flow_accounting_conf.py4
-rwxr-xr-xsrc/conf_mode/high-availability.py30
-rwxr-xr-xsrc/conf_mode/http-api.py10
-rwxr-xr-xsrc/conf_mode/https.py2
-rwxr-xr-xsrc/conf_mode/interfaces-bonding.py7
-rwxr-xr-xsrc/conf_mode/interfaces-ethernet.py2
-rwxr-xr-xsrc/conf_mode/interfaces-geneve.py10
-rwxr-xr-xsrc/conf_mode/interfaces-input.py70
-rwxr-xr-xsrc/conf_mode/interfaces-pppoe.py4
-rwxr-xr-xsrc/conf_mode/interfaces-pseudo-ethernet.py4
-rwxr-xr-xsrc/conf_mode/interfaces-sstpc.py145
-rwxr-xr-xsrc/conf_mode/interfaces-tunnel.py4
-rwxr-xr-xsrc/conf_mode/interfaces-virtual-ethernet.py114
-rwxr-xr-xsrc/conf_mode/interfaces-vxlan.py8
-rwxr-xr-xsrc/conf_mode/interfaces-wireguard.py7
-rwxr-xr-xsrc/conf_mode/nat.py85
-rwxr-xr-xsrc/conf_mode/ntp.py23
-rwxr-xr-xsrc/conf_mode/pki.py76
-rwxr-xr-xsrc/conf_mode/policy-route-interface.py132
-rwxr-xr-xsrc/conf_mode/policy-route.py109
-rwxr-xr-xsrc/conf_mode/policy.py5
-rwxr-xr-xsrc/conf_mode/protocols_bgp.py169
-rwxr-xr-xsrc/conf_mode/protocols_failover.py121
-rwxr-xr-xsrc/conf_mode/protocols_mpls.py5
-rwxr-xr-xsrc/conf_mode/protocols_ospfv3.py4
-rwxr-xr-xsrc/conf_mode/protocols_static.py8
-rwxr-xr-xsrc/conf_mode/qos.py203
-rwxr-xr-xsrc/conf_mode/service_console-server.py2
-rwxr-xr-xsrc/conf_mode/service_monitoring_telegraf.py2
-rwxr-xr-xsrc/conf_mode/service_pppoe-server.py13
-rwxr-xr-xsrc/conf_mode/service_sla.py6
-rwxr-xr-xsrc/conf_mode/service_webproxy.py102
-rwxr-xr-xsrc/conf_mode/snmp.py6
-rwxr-xr-xsrc/conf_mode/ssh.py2
-rwxr-xr-xsrc/conf_mode/system-option.py17
-rwxr-xr-xsrc/conf_mode/vpn_ipsec.py48
-rwxr-xr-xsrc/conf_mode/vpn_l2tp.py51
-rwxr-xr-xsrc/conf_mode/vpn_openconnect.py101
-rwxr-xr-xsrc/conf_mode/vrf.py6
42 files changed, 1532 insertions, 563 deletions
diff --git a/src/conf_mode/config_mgmt.py b/src/conf_mode/config_mgmt.py
new file mode 100755
index 000000000..c681a8405
--- /dev/null
+++ b/src/conf_mode/config_mgmt.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+
+from vyos import ConfigError
+from vyos.config import Config
+from vyos.config_mgmt import ConfigMgmt
+from vyos.config_mgmt import commit_post_hook_dir, commit_hooks
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['system', 'config-management']
+ if not conf.exists(base):
+ return None
+
+ mgmt = ConfigMgmt(config=conf)
+
+ return mgmt
+
+def verify(_mgmt):
+ return
+
+def generate(mgmt):
+ if mgmt is None:
+ return
+
+ mgmt.initialize_revision()
+
+def apply(mgmt):
+ if mgmt is None:
+ return
+
+ locations = mgmt.locations
+ archive_target = os.path.join(commit_post_hook_dir,
+ commit_hooks['commit_archive'])
+ if locations:
+ try:
+ os.symlink('/usr/bin/config-mgmt', archive_target)
+ except FileExistsError:
+ pass
+ except OSError as exc:
+ raise ConfigError from exc
+ else:
+ try:
+ os.unlink(archive_target)
+ except FileNotFoundError:
+ pass
+ except OSError as exc:
+ raise ConfigError from exc
+
+ revisions = mgmt.max_revisions
+ revision_target = os.path.join(commit_post_hook_dir,
+ commit_hooks['commit_revision'])
+ if revisions > 0:
+ try:
+ os.symlink('/usr/bin/config-mgmt', revision_target)
+ except FileExistsError:
+ pass
+ except OSError as exc:
+ raise ConfigError from exc
+ else:
+ try:
+ os.unlink(revision_target)
+ except FileNotFoundError:
+ pass
+ except OSError as exc:
+ raise ConfigError from exc
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py
index ac3dc536b..08861053d 100755
--- a/src/conf_mode/container.py
+++ b/src/conf_mode/container.py
@@ -40,20 +40,7 @@ airbag.enable()
config_containers_registry = '/etc/containers/registries.conf'
config_containers_storage = '/etc/containers/storage.conf'
-
-def _run_rerun(container_cmd):
- counter = 0
- while True:
- if counter >= 10:
- break
- try:
- _cmd(container_cmd)
- break
- except:
- counter = counter +1
- sleep(0.5)
-
- return None
+systemd_unit_path = '/run/systemd/system'
def _cmd(command):
if os.path.exists('/tmp/vyos.container.debug'):
@@ -86,9 +73,28 @@ def get_config(config=None):
# Merge per-container default values
if 'name' in container:
default_values = defaults(base + ['name'])
+ if 'port' in default_values:
+ del default_values['port']
+ if 'volume' in default_values:
+ del default_values['volume']
for name in container['name']:
container['name'][name] = dict_merge(default_values, container['name'][name])
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if 'port' in container['name'][name]:
+ for port in container['name'][name]['port']:
+ default_values = defaults(base + ['name', 'port'])
+ container['name'][name]['port'][port] = dict_merge(
+ default_values, container['name'][name]['port'][port])
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if 'volume' in container['name'][name]:
+ for volume in container['name'][name]['volume']:
+ default_values = defaults(base + ['name', 'volume'])
+ container['name'][name]['volume'][volume] = dict_merge(
+ default_values, container['name'][name]['volume'][volume])
+
# Delete container network, delete containers
tmp = node_changed(conf, base + ['network'])
if tmp: container.update({'network_remove' : tmp})
@@ -122,7 +128,7 @@ def verify(container):
# of image upgrade and deletion.
image = container_config['image']
if run(f'podman image exists {image}') != 0:
- Warning(f'Image "{image}" used in contianer "{name}" does not exist '\
+ Warning(f'Image "{image}" used in container "{name}" does not exist '\
f'locally. Please use "add container image {image}" to add it '\
f'to the system! Container "{name}" will not be started!')
@@ -136,9 +142,6 @@ def verify(container):
raise ConfigError(f'Container network "{network_name}" does not exist!')
if 'address' in container_config['network'][network_name]:
- if 'network' not in container_config:
- raise ConfigError(f'Can not use "address" without "network" for container "{name}"!')
-
address = container_config['network'][network_name]['address']
network = None
if is_ipv4(address):
@@ -184,6 +187,11 @@ def verify(container):
if not os.path.exists(source):
raise ConfigError(f'Volume "{volume}" source path "{source}" does not exist!')
+ if 'port' in container_config:
+ for tmp in container_config['port']:
+ if not {'source', 'destination'} <= set(container_config['port'][tmp]):
+ raise ConfigError(f'Both "source" and "destination" must be specified for a port mapping!')
+
# If 'allow-host-networks' or 'network' not set.
if 'allow_host_networks' not in container_config and 'network' not in container_config:
raise ConfigError(f'Must either set "network" or "allow-host-networks" for container "{name}"!')
@@ -220,6 +228,69 @@ def verify(container):
return None
+def generate_run_arguments(name, container_config):
+ image = container_config['image']
+ memory = container_config['memory']
+ shared_memory = container_config['shared_memory']
+ restart = container_config['restart']
+
+ # Add capability options. Should be in uppercase
+ cap_add = ''
+ if 'cap_add' in container_config:
+ for c in container_config['cap_add']:
+ c = c.upper()
+ c = c.replace('-', '_')
+ cap_add += f' --cap-add={c}'
+
+ # Add a host device to the container /dev/x:/dev/x
+ device = ''
+ if 'device' in container_config:
+ for dev, dev_config in container_config['device'].items():
+ source_dev = dev_config['source']
+ dest_dev = dev_config['destination']
+ device += f' --device={source_dev}:{dest_dev}'
+
+ # Check/set environment options "-e foo=bar"
+ env_opt = ''
+ if 'environment' in container_config:
+ for k, v in container_config['environment'].items():
+ env_opt += f" --env \"{k}={v['value']}\""
+
+ # Publish ports
+ port = ''
+ if 'port' in container_config:
+ protocol = ''
+ for portmap in container_config['port']:
+ protocol = container_config['port'][portmap]['protocol']
+ sport = container_config['port'][portmap]['source']
+ dport = container_config['port'][portmap]['destination']
+ port += f' --publish {sport}:{dport}/{protocol}'
+
+ # Bind volume
+ volume = ''
+ if 'volume' in container_config:
+ for vol, vol_config in container_config['volume'].items():
+ svol = vol_config['source']
+ dvol = vol_config['destination']
+ mode = vol_config['mode']
+ volume += f' --volume {svol}:{dvol}:{mode}'
+
+ container_base_cmd = f'--detach --interactive --tty --replace {cap_add} ' \
+ f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \
+ f'--name {name} {device} {port} {volume} {env_opt}'
+
+ if 'allow_host_networks' in container_config:
+ return f'{container_base_cmd} --net host {image}'
+
+ ip_param = ''
+ networks = ",".join(container_config['network'])
+ for network in container_config['network']:
+ if 'address' in container_config['network'][network]:
+ address = container_config['network'][network]['address']
+ ip_param = f'--ip {address}'
+
+ return f'{container_base_cmd} --net {networks} {ip_param} {image}'
+
def generate(container):
# bail out early - looks like removal from running config
if not container:
@@ -263,6 +334,15 @@ def generate(container):
render(config_containers_registry, 'container/registries.conf.j2', container)
render(config_containers_storage, 'container/storage.conf.j2', container)
+ if 'name' in container:
+ for name, container_config in container['name'].items():
+ if 'disable' in container_config:
+ continue
+
+ file_path = os.path.join(systemd_unit_path, f'vyos-container-{name}.service')
+ run_args = generate_run_arguments(name, container_config)
+ render(file_path, 'container/systemd-unit.j2', {'name': name, 'run_args': run_args})
+
return None
def apply(container):
@@ -270,8 +350,12 @@ def apply(container):
# Option "--force" allows to delete containers with any status
if 'container_remove' in container:
for name in container['container_remove']:
- call(f'podman stop --time 3 {name}')
- call(f'podman rm --force {name}')
+ file_path = os.path.join(systemd_unit_path, f'vyos-container-{name}.service')
+ call(f'systemctl stop vyos-container-{name}.service')
+ if os.path.exists(file_path):
+ os.unlink(file_path)
+
+ call('systemctl daemon-reload')
# Delete old networks if needed
if 'network_remove' in container:
@@ -282,6 +366,7 @@ def apply(container):
os.unlink(tmp)
# Add container
+ disabled_new = False
if 'name' in container:
for name, container_config in container['name'].items():
image = container_config['image']
@@ -295,70 +380,17 @@ def apply(container):
# check if there is a container by that name running
tmp = _cmd('podman ps -a --format "{{.Names}}"')
if name in tmp:
- _cmd(f'podman stop --time 3 {name}')
- _cmd(f'podman rm --force {name}')
+ file_path = os.path.join(systemd_unit_path, f'vyos-container-{name}.service')
+ call(f'systemctl stop vyos-container-{name}.service')
+ if os.path.exists(file_path):
+ disabled_new = True
+ os.unlink(file_path)
continue
- memory = container_config['memory']
- restart = container_config['restart']
+ cmd(f'systemctl restart vyos-container-{name}.service')
- # Add capability options. Should be in uppercase
- cap_add = ''
- if 'cap_add' in container_config:
- for c in container_config['cap_add']:
- c = c.upper()
- c = c.replace('-', '_')
- cap_add += f' --cap-add={c}'
-
- # Add a host device to the container /dev/x:/dev/x
- device = ''
- if 'device' in container_config:
- for dev, dev_config in container_config['device'].items():
- source_dev = dev_config['source']
- dest_dev = dev_config['destination']
- device += f' --device={source_dev}:{dest_dev}'
-
- # Check/set environment options "-e foo=bar"
- env_opt = ''
- if 'environment' in container_config:
- for k, v in container_config['environment'].items():
- env_opt += f" -e \"{k}={v['value']}\""
-
- # Publish ports
- port = ''
- if 'port' in container_config:
- protocol = ''
- for portmap in container_config['port']:
- if 'protocol' in container_config['port'][portmap]:
- protocol = container_config['port'][portmap]['protocol']
- protocol = f'/{protocol}'
- else:
- protocol = '/tcp'
- sport = container_config['port'][portmap]['source']
- dport = container_config['port'][portmap]['destination']
- port += f' -p {sport}:{dport}{protocol}'
-
- # Bind volume
- volume = ''
- if 'volume' in container_config:
- for vol, vol_config in container_config['volume'].items():
- svol = vol_config['source']
- dvol = vol_config['destination']
- volume += f' -v {svol}:{dvol}'
-
- container_base_cmd = f'podman run --detach --interactive --tty --replace {cap_add} ' \
- f'--memory {memory}m --memory-swap 0 --restart {restart} ' \
- f'--name {name} {device} {port} {volume} {env_opt}'
- if 'allow_host_networks' in container_config:
- _run_rerun(f'{container_base_cmd} --net host {image}')
- else:
- for network in container_config['network']:
- ipparam = ''
- if 'address' in container_config['network'][network]:
- address = container_config['network'][network]['address']
- ipparam = f'--ip {address}'
-
- _run_rerun(f'{container_base_cmd} --net {network} {ipparam} {image}')
+ if disabled_new:
+ call('systemctl daemon-reload')
return None
diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py
index cbd9cbe90..20cf1ead1 100755
--- a/src/conf_mode/firewall.py
+++ b/src/conf_mode/firewall.py
@@ -26,13 +26,10 @@ from vyos.config import Config
from vyos.configdict import dict_merge
from vyos.configdict import node_changed
from vyos.configdiff import get_config_diff, Diff
+from vyos.configdep import set_dependents, call_dependents
# from vyos.configverify import verify_interface_exists
+from vyos.firewall import fqdn_config_parse
from vyos.firewall import geoip_update
-from vyos.firewall import get_ips_domains_dict
-from vyos.firewall import nft_add_set_elements
-from vyos.firewall import nft_flush_set
-from vyos.firewall import nft_init_set
-from vyos.firewall import nft_update_set_elements
from vyos.template import render
from vyos.util import call
from vyos.util import cmd
@@ -45,7 +42,8 @@ from vyos import ConfigError
from vyos import airbag
airbag.enable()
-policy_route_conf_script = '/usr/libexec/vyos/conf_mode/policy-route.py'
+nat_conf_script = 'nat.py'
+policy_route_conf_script = 'policy-route.py'
nftables_conf = '/run/nftables.conf'
@@ -67,7 +65,8 @@ valid_groups = [
'address_group',
'domain_group',
'network_group',
- 'port_group'
+ 'port_group',
+ 'interface_group'
]
nested_group_types = [
@@ -162,7 +161,10 @@ def get_config(config=None):
for zone in firewall['zone']:
firewall['zone'][zone] = dict_merge(default_values, firewall['zone'][zone])
- firewall['policy_resync'] = bool('group' in firewall or node_changed(conf, base + ['group']))
+ firewall['group_resync'] = bool('group' in firewall or node_changed(conf, base + ['group']))
+ if firewall['group_resync']:
+ # Update nat and policy-route as firewall groups were updated
+ set_dependents('group_resync', conf)
if 'config_trap' in firewall and firewall['config_trap'] == 'enable':
diff = get_config_diff(conf)
@@ -173,6 +175,8 @@ def get_config(config=None):
firewall['geoip_updated'] = geoip_updated(conf, firewall)
+ fqdn_config_parse(firewall)
+
return firewall
def verify_rule(firewall, rule_conf, ipv6):
@@ -232,29 +236,28 @@ def verify_rule(firewall, rule_conf, ipv6):
if side in rule_conf:
side_conf = rule_conf[side]
- if dict_search_args(side_conf, 'geoip', 'country_code'):
- if 'address' in side_conf:
- raise ConfigError('Address and GeoIP cannot both be defined')
-
- if dict_search_args(side_conf, 'group', 'address_group'):
- raise ConfigError('Address-group and GeoIP cannot both be defined')
-
- if dict_search_args(side_conf, 'group', 'network_group'):
- raise ConfigError('Network-group and GeoIP cannot both be defined')
+ if len({'address', 'fqdn', 'geoip'} & set(side_conf)) > 1:
+ raise ConfigError('Only one of address, fqdn or geoip can be specified')
if 'group' in side_conf:
- if {'address_group', 'network_group'} <= set(side_conf['group']):
- raise ConfigError('Only one address-group or network-group can be specified')
+ if len({'address_group', 'network_group', 'domain_group'} & set(side_conf['group'])) > 1:
+ raise ConfigError('Only one address-group, network-group or domain-group can be specified')
for group in valid_groups:
if group in side_conf['group']:
group_name = side_conf['group'][group]
+ fw_group = f'ipv6_{group}' if ipv6 and group in ['address_group', 'network_group'] else group
+ error_group = fw_group.replace("_", "-")
+
+ if group in ['address_group', 'network_group', 'domain_group']:
+ types = [t for t in ['address', 'fqdn', 'geoip'] if t in side_conf]
+ if types:
+ raise ConfigError(f'{error_group} and {types[0]} cannot both be defined')
+
if group_name and group_name[0] == '!':
group_name = group_name[1:]
- fw_group = f'ipv6_{group}' if ipv6 and group in ['address_group', 'network_group'] else group
- error_group = fw_group.replace("_", "-")
group_obj = dict_search_args(firewall, 'group', fw_group, group_name)
if group_obj is None:
@@ -274,6 +277,8 @@ def verify_nested_group(group_name, group, groups, seen):
if 'include' not in group:
return
+ seen.append(group_name)
+
for g in group['include']:
if g not in groups:
raise ConfigError(f'Nested group "{g}" does not exist')
@@ -281,8 +286,6 @@ def verify_nested_group(group_name, group, groups, seen):
if g in seen:
raise ConfigError(f'Group "{group_name}" has a circular reference')
- seen.append(g)
-
if 'include' in groups[g]:
verify_nested_group(g, groups[g], groups, seen)
@@ -466,42 +469,23 @@ def post_apply_trap(firewall):
cmd(base_cmd + ' '.join(objects))
-def resync_policy_route():
- # Update policy route as firewall groups were updated
- tmp, out = rc_cmd(policy_route_conf_script)
- if tmp > 0:
- Warning(f'Failed to re-apply policy route configuration! {out}')
-
def apply(firewall):
install_result, output = rc_cmd(f'nft -f {nftables_conf}')
if install_result == 1:
raise ConfigError(f'Failed to apply firewall: {output}')
- # set firewall group domain-group xxx
- if 'group' in firewall:
- if 'domain_group' in firewall['group']:
- # T970 Enable a resolver (systemd daemon) that checks
- # domain-group addresses and update entries for domains by timeout
- # If router loaded without internet connection or for synchronization
- call('systemctl restart vyos-domain-group-resolve.service')
- for group, group_config in firewall['group']['domain_group'].items():
- domains = []
- if group_config.get('address') is not None:
- for address in group_config.get('address'):
- domains.append(address)
- # Add elements to domain-group, try to resolve domain => ip
- # and add elements to nft set
- ip_dict = get_ips_domains_dict(domains)
- elements = sum(ip_dict.values(), [])
- nft_init_set(f'D_{group}')
- nft_add_set_elements(f'D_{group}', elements)
- else:
- call('systemctl stop vyos-domain-group-resolve.service')
-
apply_sysfs(firewall)
- if firewall['policy_resync']:
- resync_policy_route()
+ if firewall['group_resync']:
+ call_dependents()
+
+ # T970 Enable a resolver (systemd daemon) that checks
+ # domain-group/fqdn addresses and update entries for domains by timeout
+ # If router loaded without internet connection or for synchronization
+ domain_action = 'stop'
+ if dict_search_args(firewall, 'group', 'domain_group') or firewall['ip_fqdn'] or firewall['ip6_fqdn']:
+ domain_action = 'restart'
+ call(f'systemctl {domain_action} vyos-domain-resolver.service')
if firewall['geoip_updated']:
# Call helper script to Update set contents
diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/flow_accounting_conf.py
index 7e16235c1..f67f1710e 100755
--- a/src/conf_mode/flow_accounting_conf.py
+++ b/src/conf_mode/flow_accounting_conf.py
@@ -38,7 +38,7 @@ airbag.enable()
uacctd_conf_path = '/run/pmacct/uacctd.conf'
systemd_service = 'uacctd.service'
-systemd_override = f'/etc/systemd/system/{systemd_service}.d/override.conf'
+systemd_override = f'/run/systemd/system/{systemd_service}.d/override.conf'
nftables_nflog_table = 'raw'
nftables_nflog_chain = 'VYOS_CT_PREROUTING_HOOK'
egress_nftables_nflog_table = 'inet mangle'
@@ -192,7 +192,7 @@ def verify(flow_config):
raise ConfigError("All sFlow servers must use the same IP protocol")
else:
sflow_collector_ipver = ip_address(server).version
-
+
# check if vrf is defined for Sflow
sflow_vrf = None
if 'vrf' in flow_config:
diff --git a/src/conf_mode/high-availability.py b/src/conf_mode/high-availability.py
index 8a959dc79..79e407efd 100755
--- a/src/conf_mode/high-availability.py
+++ b/src/conf_mode/high-availability.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2022 VyOS maintainers and contributors
+# Copyright (C) 2018-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -28,6 +28,7 @@ from vyos.template import render
from vyos.template import is_ipv4
from vyos.template import is_ipv6
from vyos.util import call
+from vyos.util import dict_search
from vyos.xml import defaults
from vyos import ConfigError
from vyos import airbag
@@ -49,10 +50,27 @@ def get_config(config=None):
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
if 'vrrp' in ha:
+ if dict_search('vrrp.global_parameters.garp', ha) != None:
+ default_values = defaults(base_vrrp + ['global-parameters', 'garp'])
+ ha['vrrp']['global_parameters']['garp'] = dict_merge(
+ default_values, ha['vrrp']['global_parameters']['garp'])
+
if 'group' in ha['vrrp']:
- default_values_vrrp = defaults(base_vrrp + ['group'])
+ default_values = defaults(base_vrrp + ['group'])
+ default_values_garp = defaults(base_vrrp + ['group', 'garp'])
+
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if 'garp' in default_values:
+ del default_values['garp']
for group in ha['vrrp']['group']:
- ha['vrrp']['group'][group] = dict_merge(default_values_vrrp, ha['vrrp']['group'][group])
+ ha['vrrp']['group'][group] = dict_merge(default_values, ha['vrrp']['group'][group])
+
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if 'garp' in ha['vrrp']['group'][group]:
+ ha['vrrp']['group'][group]['garp'] = dict_merge(
+ default_values_garp, ha['vrrp']['group'][group]['garp'])
# Merge per virtual-server default values
if 'virtual_server' in ha:
@@ -144,8 +162,10 @@ def verify(ha):
# Virtual-server
if 'virtual_server' in ha:
for vs, vs_config in ha['virtual_server'].items():
- if 'port' not in vs_config:
- raise ConfigError(f'Port is required but not set for virtual-server "{vs}"')
+ if 'port' not in vs_config and 'fwmark' not in vs_config:
+ raise ConfigError(f'Port or fwmark is required but not set for virtual-server "{vs}"')
+ if 'port' in vs_config and 'fwmark' in vs_config:
+ raise ConfigError(f'Cannot set both port and fwmark for virtual-server "{vs}"')
if 'real_server' not in vs_config:
raise ConfigError(f'Real-server ip is required but not set for virtual-server "{vs}"')
# Real-server
diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py
index c196e272b..6328294c1 100755
--- a/src/conf_mode/http-api.py
+++ b/src/conf_mode/http-api.py
@@ -25,6 +25,7 @@ import vyos.defaults
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdep import set_dependents, call_dependents
from vyos.template import render
from vyos.util import cmd
from vyos.util import call
@@ -61,6 +62,11 @@ def get_config(config=None):
else:
conf = Config()
+ # reset on creation/deletion of 'api' node
+ https_base = ['service', 'https']
+ if conf.exists(https_base):
+ set_dependents("https", conf)
+
base = ['service', 'https', 'api']
if not conf.exists(base):
return None
@@ -86,7 +92,7 @@ def get_config(config=None):
if 'api_keys' in api_dict:
keys_added = True
- if 'gql' in api_dict:
+ if 'graphql' in api_dict:
api_dict = dict_merge(defaults(base), api_dict)
http_api.update(api_dict)
@@ -132,7 +138,7 @@ def apply(http_api):
# Let uvicorn settle before restarting Nginx
sleep(1)
- cmd(f'{vyos_conf_scripts_dir}/https.py', raising=ConfigError)
+ call_dependents()
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
index 7cd7ea42e..ce5e63928 100755
--- a/src/conf_mode/https.py
+++ b/src/conf_mode/https.py
@@ -37,7 +37,7 @@ from vyos import airbag
airbag.enable()
config_file = '/etc/nginx/sites-available/default'
-systemd_override = r'/etc/systemd/system/nginx.service.d/override.conf'
+systemd_override = r'/run/systemd/system/nginx.service.d/override.conf'
cert_dir = '/etc/ssl/certs'
key_dir = '/etc/ssl/private'
certbot_dir = vyos.defaults.directories['certbot']
diff --git a/src/conf_mode/interfaces-bonding.py b/src/conf_mode/interfaces-bonding.py
index 21cf204fc..9936620c8 100755
--- a/src/conf_mode/interfaces-bonding.py
+++ b/src/conf_mode/interfaces-bonding.py
@@ -21,6 +21,7 @@ from netifaces import interfaces
from vyos.config import Config
from vyos.configdict import get_interface_dict
+from vyos.configdict import is_node_changed
from vyos.configdict import leaf_node_changed
from vyos.configdict import is_member
from vyos.configdict import is_source_interface
@@ -81,10 +82,10 @@ def get_config(config=None):
if 'mode' in bond:
bond['mode'] = get_bond_mode(bond['mode'])
- tmp = leaf_node_changed(conf, base + [ifname, 'mode'])
+ tmp = is_node_changed(conf, base + [ifname, 'mode'])
if tmp: bond['shutdown_required'] = {}
- tmp = leaf_node_changed(conf, base + [ifname, 'lacp-rate'])
+ tmp = is_node_changed(conf, base + [ifname, 'lacp-rate'])
if tmp: bond['shutdown_required'] = {}
# determine which members have been removed
@@ -116,7 +117,7 @@ def get_config(config=None):
if dict_search('member.interface', bond):
for interface, interface_config in bond['member']['interface'].items():
# Check if member interface is a new member
- if not conf.exists_effective(['member', 'interface', interface]):
+ if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]):
bond['shutdown_required'] = {}
# Check if member interface is disabled
diff --git a/src/conf_mode/interfaces-ethernet.py b/src/conf_mode/interfaces-ethernet.py
index e02841831..b49c945cd 100755
--- a/src/conf_mode/interfaces-ethernet.py
+++ b/src/conf_mode/interfaces-ethernet.py
@@ -175,7 +175,7 @@ def generate(ethernet):
loaded_pki_cert = load_certificate(pki_cert['certificate'])
loaded_ca_certs = {load_certificate(c['certificate'])
- for c in ethernet['pki']['ca'].values()}
+ for c in ethernet['pki']['ca'].values()} if 'ca' in ethernet['pki'] else {}
cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
diff --git a/src/conf_mode/interfaces-geneve.py b/src/conf_mode/interfaces-geneve.py
index 08cc3a48d..f6694ddde 100755
--- a/src/conf_mode/interfaces-geneve.py
+++ b/src/conf_mode/interfaces-geneve.py
@@ -14,14 +14,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-
from sys import exit
from netifaces import interfaces
from vyos.config import Config
from vyos.configdict import get_interface_dict
-from vyos.configdict import leaf_node_changed
from vyos.configdict import is_node_changed
from vyos.configverify import verify_address
from vyos.configverify import verify_mtu_ipv6
@@ -49,13 +46,10 @@ def get_config(config=None):
# GENEVE interfaces are picky and require recreation if certain parameters
# change. But a GENEVE interface should - of course - not be re-created if
# it's description or IP address is adjusted. Feels somehow logic doesn't it?
- for cli_option in ['remote', 'vni']:
- if leaf_node_changed(conf, base + [ifname, cli_option]):
+ for cli_option in ['remote', 'vni', 'parameters']:
+ if is_node_changed(conf, base + [ifname, cli_option]):
geneve.update({'rebuild_required': {}})
- if is_node_changed(conf, base + [ifname, 'parameters']):
- geneve.update({'rebuild_required': {}})
-
return geneve
def verify(geneve):
diff --git a/src/conf_mode/interfaces-input.py b/src/conf_mode/interfaces-input.py
new file mode 100755
index 000000000..ad248843d
--- /dev/null
+++ b/src/conf_mode/interfaces-input.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2023 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import get_interface_dict
+from vyos.configverify import verify_mirror_redirect
+from vyos.ifconfig import InputIf
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+def get_config(config=None):
+ """
+ Retrive CLI config as dictionary. Dictionary can never be empty, as at
+ least the interface name will be added or a deleted flag
+ """
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['interfaces', 'input']
+ _, ifb = get_interface_dict(conf, base)
+
+ return ifb
+
+def verify(ifb):
+ if 'deleted' in ifb:
+ return None
+
+ verify_mirror_redirect(ifb)
+ return None
+
+def generate(ifb):
+ return None
+
+def apply(ifb):
+ d = InputIf(ifb['ifname'])
+
+ # Remove input interface
+ if 'deleted' in ifb:
+ d.remove()
+ else:
+ d.update(ifb)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/interfaces-pppoe.py b/src/conf_mode/interfaces-pppoe.py
index e2fdc7a42..5f0b76f90 100755
--- a/src/conf_mode/interfaces-pppoe.py
+++ b/src/conf_mode/interfaces-pppoe.py
@@ -23,7 +23,6 @@ from netifaces import interfaces
from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
-from vyos.configdict import leaf_node_changed
from vyos.configdict import get_pppoe_interfaces
from vyos.configverify import verify_authentication
from vyos.configverify import verify_source_interface
@@ -55,7 +54,8 @@ def get_config(config=None):
# All parameters that can be changed on-the-fly (like interface description)
# should not lead to a reconnect!
for options in ['access-concentrator', 'connect-on-demand', 'service-name',
- 'source-interface', 'vrf', 'no-default-route', 'authentication']:
+ 'source-interface', 'vrf', 'no-default-route',
+ 'authentication', 'host_uniq']:
if is_node_changed(conf, base + [ifname, options]):
pppoe.update({'shutdown_required': {}})
# bail out early - no need to further process other nodes
diff --git a/src/conf_mode/interfaces-pseudo-ethernet.py b/src/conf_mode/interfaces-pseudo-ethernet.py
index 4c65bc0b6..dce5c2358 100755
--- a/src/conf_mode/interfaces-pseudo-ethernet.py
+++ b/src/conf_mode/interfaces-pseudo-ethernet.py
@@ -21,7 +21,7 @@ from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
from vyos.configdict import is_source_interface
-from vyos.configdict import leaf_node_changed
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
@@ -51,7 +51,7 @@ def get_config(config=None):
mode = is_node_changed(conf, ['mode'])
if mode: peth.update({'shutdown_required' : {}})
- if leaf_node_changed(conf, base + [ifname, 'mode']):
+ if is_node_changed(conf, base + [ifname, 'mode']):
peth.update({'rebuild_required': {}})
if 'source_interface' in peth:
diff --git a/src/conf_mode/interfaces-sstpc.py b/src/conf_mode/interfaces-sstpc.py
new file mode 100755
index 000000000..b5cc4cf4e
--- /dev/null
+++ b/src/conf_mode/interfaces-sstpc.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import get_interface_dict
+from vyos.configdict import is_node_changed
+from vyos.configverify import verify_authentication
+from vyos.configverify import verify_vrf
+from vyos.ifconfig import SSTPCIf
+from vyos.pki import encode_certificate
+from vyos.pki import find_chain
+from vyos.pki import load_certificate
+from vyos.template import render
+from vyos.util import call
+from vyos.util import dict_search
+from vyos.util import is_systemd_service_running
+from vyos.util import write_file
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+def get_config(config=None):
+ """
+ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
+ interface name will be added or a deleted flag
+ """
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['interfaces', 'sstpc']
+ ifname, sstpc = get_interface_dict(conf, base)
+
+ # We should only terminate the SSTP client session if critical parameters
+ # change. All parameters that can be changed on-the-fly (like interface
+ # description) should not lead to a reconnect!
+ for options in ['authentication', 'no_peer_dns', 'no_default_route',
+ 'server', 'ssl']:
+ if is_node_changed(conf, base + [ifname, options]):
+ sstpc.update({'shutdown_required': {}})
+ # bail out early - no need to further process other nodes
+ break
+
+ # Load PKI certificates for later processing
+ sstpc['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+ return sstpc
+
+def verify(sstpc):
+ if 'deleted' in sstpc:
+ return None
+
+ verify_authentication(sstpc)
+ verify_vrf(sstpc)
+
+ if not dict_search('server', sstpc):
+ raise ConfigError('Remote SSTP server must be specified!')
+
+ if not dict_search('ssl.ca_certificate', sstpc):
+ raise ConfigError('Missing mandatory CA certificate!')
+
+ return None
+
+def generate(sstpc):
+ ifname = sstpc['ifname']
+ config_sstpc = f'/etc/ppp/peers/{ifname}'
+
+ sstpc['ca_file_path'] = f'/run/sstpc/{ifname}_ca-cert.pem'
+
+ if 'deleted' in sstpc:
+ for file in [sstpc['ca_file_path'], config_sstpc]:
+ if os.path.exists(file):
+ os.unlink(file)
+ return None
+
+ ca_name = sstpc['ssl']['ca_certificate']
+ pki_ca_cert = sstpc['pki']['ca'][ca_name]
+
+ loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
+ loaded_ca_certs = {load_certificate(c['certificate'])
+ for c in sstpc['pki']['ca'].values()} if 'ca' in sstpc['pki'] else {}
+
+ ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
+
+ write_file(sstpc['ca_file_path'], '\n'.join(encode_certificate(c) for c in ca_full_chain))
+ render(config_sstpc, 'sstp-client/peer.j2', sstpc, permission=0o640)
+
+ return None
+
+def apply(sstpc):
+ ifname = sstpc['ifname']
+ if 'deleted' in sstpc or 'disable' in sstpc:
+ if os.path.isdir(f'/sys/class/net/{ifname}'):
+ p = SSTPCIf(ifname)
+ p.remove()
+ call(f'systemctl stop ppp@{ifname}.service')
+ return None
+
+ # reconnect should only be necessary when specific options change,
+ # like server, authentication ... (see get_config() for details)
+ if ((not is_systemd_service_running(f'ppp@{ifname}.service')) or
+ 'shutdown_required' in sstpc):
+
+ # cleanup system (e.g. FRR routes first)
+ if os.path.isdir(f'/sys/class/net/{ifname}'):
+ p = SSTPCIf(ifname)
+ p.remove()
+
+ call(f'systemctl restart ppp@{ifname}.service')
+ # When interface comes "live" a hook is called:
+ # /etc/ppp/ip-up.d/96-vyos-sstpc-callback
+ # which triggers SSTPCIf.update()
+ else:
+ if os.path.isdir(f'/sys/class/net/{ifname}'):
+ p = SSTPCIf(ifname)
+ p.update(sstpc)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces-tunnel.py
index acef1fda7..e2701d9d3 100755
--- a/src/conf_mode/interfaces-tunnel.py
+++ b/src/conf_mode/interfaces-tunnel.py
@@ -21,7 +21,7 @@ from netifaces import interfaces
from vyos.config import Config
from vyos.configdict import get_interface_dict
-from vyos.configdict import leaf_node_changed
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
from vyos.configverify import verify_interface_exists
@@ -52,7 +52,7 @@ def get_config(config=None):
ifname, tunnel = get_interface_dict(conf, base)
if 'deleted' not in tunnel:
- tmp = leaf_node_changed(conf, base + [ifname, 'encapsulation'])
+ tmp = is_node_changed(conf, base + [ifname, 'encapsulation'])
if tmp: tunnel.update({'encapsulation_changed': {}})
# We also need to inspect other configured tunnels as there are Kernel
diff --git a/src/conf_mode/interfaces-virtual-ethernet.py b/src/conf_mode/interfaces-virtual-ethernet.py
new file mode 100755
index 000000000..8efe89c41
--- /dev/null
+++ b/src/conf_mode/interfaces-virtual-ethernet.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from sys import exit
+
+from netifaces import interfaces
+from vyos import ConfigError
+from vyos import airbag
+from vyos.config import Config
+from vyos.configdict import get_interface_dict
+from vyos.configverify import verify_address
+from vyos.configverify import verify_bridge_delete
+from vyos.configverify import verify_vrf
+from vyos.ifconfig import VethIf
+
+airbag.enable()
+
+def get_config(config=None):
+ """
+ Retrive CLI config as dictionary. Dictionary can never be empty, as at
+ least the interface name will be added or a deleted flag
+ """
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['interfaces', 'virtual-ethernet']
+ ifname, veth = get_interface_dict(conf, base)
+
+ # We need to know all other veth related interfaces as veth requires a 1:1
+ # mapping for the peer-names. The Linux kernel automatically creates both
+ # interfaces, the local one and the peer-name, but VyOS also needs a peer
+ # interfaces configrued on the CLI so we can assign proper IP addresses etc.
+ veth['other_interfaces'] = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, no_tag_node_value_mangle=True)
+
+ return veth
+
+
+def verify(veth):
+ if 'deleted' in veth:
+ verify_bridge_delete(veth)
+ # Prevent to delete veth interface which used for another "vethX peer-name"
+ for iface, iface_config in veth['other_interfaces'].items():
+ if veth['ifname'] in iface_config['peer_name']:
+ ifname = veth['ifname']
+ raise ConfigError(
+ f'Cannot delete "{ifname}" used for "interface {iface} peer-name"'
+ )
+ return None
+
+ verify_vrf(veth)
+ verify_address(veth)
+
+ if 'peer_name' not in veth:
+ raise ConfigError(f'Remote peer name must be set for "{veth["ifname"]}"!')
+
+ peer_name = veth['peer_name']
+ ifname = veth['ifname']
+
+ if veth['peer_name'] not in veth['other_interfaces']:
+ raise ConfigError(f'Used peer-name "{peer_name}" on interface "{ifname}" ' \
+ 'is not configured!')
+
+ if veth['other_interfaces'][peer_name]['peer_name'] != ifname:
+ raise ConfigError(
+ f'Configuration mismatch between "{ifname}" and "{peer_name}"!')
+
+ if peer_name == ifname:
+ raise ConfigError(
+ f'Peer-name "{peer_name}" cannot be the same as interface "{ifname}"!')
+
+ return None
+
+
+def generate(peth):
+ return None
+
+def apply(veth):
+ # Check if the Veth interface already exists
+ if 'rebuild_required' in veth or 'deleted' in veth:
+ if veth['ifname'] in interfaces():
+ p = VethIf(veth['ifname'])
+ p.remove()
+
+ if 'deleted' not in veth:
+ p = VethIf(**veth)
+ p.update(veth)
+
+ return None
+
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces-vxlan.py
index af2d0588d..b1536148c 100755
--- a/src/conf_mode/interfaces-vxlan.py
+++ b/src/conf_mode/interfaces-vxlan.py
@@ -52,13 +52,11 @@ def get_config(config=None):
# VXLAN interfaces are picky and require recreation if certain parameters
# change. But a VXLAN interface should - of course - not be re-created if
# it's description or IP address is adjusted. Feels somehow logic doesn't it?
- for cli_option in ['external', 'gpe', 'group', 'port', 'remote',
+ for cli_option in ['parameters', 'external', 'gpe', 'group', 'port', 'remote',
'source-address', 'source-interface', 'vni']:
- if leaf_node_changed(conf, base + [ifname, cli_option]):
+ if is_node_changed(conf, base + [ifname, cli_option]):
vxlan.update({'rebuild_required': {}})
-
- if is_node_changed(conf, base + [ifname, 'parameters']):
- vxlan.update({'rebuild_required': {}})
+ break
# We need to verify that no other VXLAN tunnel is configured when external
# mode is in use - Linux Kernel limitation
diff --git a/src/conf_mode/interfaces-wireguard.py b/src/conf_mode/interfaces-wireguard.py
index 8d738f55e..762bad94f 100755
--- a/src/conf_mode/interfaces-wireguard.py
+++ b/src/conf_mode/interfaces-wireguard.py
@@ -87,6 +87,8 @@ def verify(wireguard):
'cannot be used for the interface!')
# run checks on individual configured WireGuard peer
+ public_keys = []
+
for tmp in wireguard['peer']:
peer = wireguard['peer'][tmp]
@@ -100,6 +102,11 @@ def verify(wireguard):
raise ConfigError('Both Wireguard port and address must be defined '
f'for peer "{tmp}" if either one of them is set!')
+ if peer['public_key'] in public_keys:
+ raise ConfigError(f'Duplicate public-key defined on peer "{tmp}"')
+
+ public_keys.append(peer['public_key'])
+
def apply(wireguard):
tmp = WireGuardIf(wireguard['ifname'])
if 'deleted' in wireguard:
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 8b1a5a720..9f8221514 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -32,6 +32,7 @@ from vyos.util import cmd
from vyos.util import run
from vyos.util import check_kmod
from vyos.util import dict_search
+from vyos.util import dict_search_args
from vyos.validate import is_addr_assigned
from vyos.xml import defaults
from vyos import ConfigError
@@ -47,6 +48,13 @@ else:
nftables_nat_config = '/run/nftables_nat.conf'
nftables_static_nat_conf = '/run/nftables_static-nat-rules.nft'
+valid_groups = [
+ 'address_group',
+ 'domain_group',
+ 'network_group',
+ 'port_group'
+]
+
def get_handler(json, chain, target):
""" Get nftable rule handler number of given chain/target combination.
Handler is required when adding NAT/Conntrack helper targets """
@@ -60,7 +68,7 @@ def get_handler(json, chain, target):
return None
-def verify_rule(config, err_msg):
+def verify_rule(config, err_msg, groups_dict):
""" Common verify steps used for both source and destination NAT """
if (dict_search('translation.port', config) != None or
@@ -78,6 +86,45 @@ def verify_rule(config, err_msg):
'statically maps a whole network of addresses onto another\n' \
'network of addresses')
+ for side in ['destination', 'source']:
+ if side in config:
+ side_conf = config[side]
+
+ if len({'address', 'fqdn'} & set(side_conf)) > 1:
+ raise ConfigError('Only one of address, fqdn or geoip can be specified')
+
+ if 'group' in side_conf:
+ if len({'address_group', 'network_group', 'domain_group'} & set(side_conf['group'])) > 1:
+ raise ConfigError('Only one address-group, network-group or domain-group can be specified')
+
+ for group in valid_groups:
+ if group in side_conf['group']:
+ group_name = side_conf['group'][group]
+ error_group = group.replace("_", "-")
+
+ if group in ['address_group', 'network_group', 'domain_group']:
+ types = [t for t in ['address', 'fqdn'] if t in side_conf]
+ if types:
+ raise ConfigError(f'{error_group} and {types[0]} cannot both be defined')
+
+ if group_name and group_name[0] == '!':
+ group_name = group_name[1:]
+
+ group_obj = dict_search_args(groups_dict, group, group_name)
+
+ if group_obj is None:
+ raise ConfigError(f'Invalid {error_group} "{group_name}" on firewall rule')
+
+ if not group_obj:
+ Warning(f'{error_group} "{group_name}" has no members!')
+
+ if dict_search_args(side_conf, 'group', 'port_group'):
+ if 'protocol' not in config:
+ raise ConfigError('Protocol must be defined if specifying a port-group')
+
+ if config['protocol'] not in ['tcp', 'udp', 'tcp_udp']:
+ raise ConfigError('Protocol must be tcp, udp, or tcp_udp when specifying a port-group')
+
def get_config(config=None):
if config:
conf = config
@@ -105,16 +152,20 @@ def get_config(config=None):
condensed_json = jmespath.search(pattern, nftable_json)
if not conf.exists(base):
- nat['helper_functions'] = 'remove'
-
- # Retrieve current table handler positions
- nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER')
- nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK')
- nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_HELPER')
- nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK')
+ if get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER'):
+ nat['helper_functions'] = 'remove'
+
+ # Retrieve current table handler positions
+ nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER')
+ nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK')
+ nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_HELPER')
+ nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK')
nat['deleted'] = ''
return nat
+ nat['firewall_group'] = conf.get_config_dict(['firewall', 'group'], key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
# check if NAT connection tracking helpers need to be set up - this has to
# be done only once
if not get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK'):
@@ -146,6 +197,10 @@ def verify(nat):
if config['outbound_interface'] not in 'any' and config['outbound_interface'] not in interfaces():
Warning(f'rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
+ if not dict_search('translation.address', config) and not dict_search('translation.port', config):
+ if 'exclude' not in config:
+ raise ConfigError(f'{err_msg} translation requires address and/or port')
+
addr = dict_search('translation.address', config)
if addr != None and addr != 'masquerade' and not is_ip_network(addr):
for ip in addr.split('-'):
@@ -153,7 +208,7 @@ def verify(nat):
Warning(f'IP address {ip} does not exist on the system!')
# common rule verification
- verify_rule(config, err_msg)
+ verify_rule(config, err_msg, nat['firewall_group'])
if dict_search('destination.rule', nat):
@@ -166,8 +221,12 @@ def verify(nat):
elif config['inbound_interface'] not in 'any' and config['inbound_interface'] not in interfaces():
Warning(f'rule "{rule}" interface "{config["inbound_interface"]}" does not exist on this system')
+ if not dict_search('translation.address', config) and not dict_search('translation.port', config):
+ if 'exclude' not in config:
+ raise ConfigError(f'{err_msg} translation requires address and/or port')
+
# common rule verification
- verify_rule(config, err_msg)
+ verify_rule(config, err_msg, nat['firewall_group'])
if dict_search('static.rule', nat):
for rule, config in dict_search('static.rule', nat).items():
@@ -178,7 +237,7 @@ def verify(nat):
'inbound-interface not specified')
# common rule verification
- verify_rule(config, err_msg)
+ verify_rule(config, err_msg, nat['firewall_group'])
return None
@@ -204,6 +263,10 @@ def apply(nat):
cmd(f'nft -f {nftables_nat_config}')
cmd(f'nft -f {nftables_static_nat_conf}')
+ if not nat or 'deleted' in nat:
+ os.unlink(nftables_nat_config)
+ os.unlink(nftables_static_nat_conf)
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/ntp.py b/src/conf_mode/ntp.py
index 0ecb4d736..92cb73aab 100755
--- a/src/conf_mode/ntp.py
+++ b/src/conf_mode/ntp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2022 VyOS maintainers and contributors
+# Copyright (C) 2018-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -21,26 +21,29 @@ from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.configverify import verify_interface_exists
from vyos.util import call
+from vyos.util import chmod_750
from vyos.util import get_interface_config
from vyos.template import render
from vyos import ConfigError
from vyos import airbag
airbag.enable()
-config_file = r'/run/ntpd/ntpd.conf'
-systemd_override = r'/etc/systemd/system/ntp.service.d/override.conf'
+config_file = r'/run/chrony/chrony.conf'
+systemd_override = r'/run/systemd/system/chrony.service.d/override.conf'
+user_group = '_chrony'
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base = ['system', 'ntp']
+ base = ['service', 'ntp']
if not conf.exists(base):
return None
ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
ntp['config_file'] = config_file
+ ntp['user'] = user_group
tmp = is_node_changed(conf, base + ['vrf'])
if tmp: ntp.update({'restart_required': {}})
@@ -52,7 +55,7 @@ def verify(ntp):
if not ntp:
return None
- if 'allow_clients' in ntp and 'server' not in ntp:
+ if 'server' not in ntp:
raise ConfigError('NTP server not configured')
verify_vrf(ntp)
@@ -77,13 +80,17 @@ def generate(ntp):
if not ntp:
return None
- render(config_file, 'ntp/ntpd.conf.j2', ntp)
- render(systemd_override, 'ntp/override.conf.j2', ntp)
+ render(config_file, 'chrony/chrony.conf.j2', ntp, user=user_group, group=user_group)
+ render(systemd_override, 'chrony/override.conf.j2', ntp, user=user_group, group=user_group)
+
+ # Ensure proper permission for chrony command socket
+ config_dir = os.path.dirname(config_file)
+ chmod_750(config_dir)
return None
def apply(ntp):
- systemd_service = 'ntp.service'
+ systemd_service = 'chrony.service'
# Reload systemd manager configuration
call('systemctl daemon-reload')
diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py
index 29ed7b1b7..54de467ca 100755
--- a/src/conf_mode/pki.py
+++ b/src/conf_mode/pki.py
@@ -16,20 +16,16 @@
from sys import exit
-import jmespath
-
from vyos.config import Config
+from vyos.configdep import set_dependents, call_dependents
from vyos.configdict import dict_merge
from vyos.configdict import node_changed
from vyos.pki import is_ca_certificate
from vyos.pki import load_certificate
-from vyos.pki import load_certificate_request
from vyos.pki import load_public_key
from vyos.pki import load_private_key
from vyos.pki import load_crl
from vyos.pki import load_dh_parameters
-from vyos.util import ask_input
-from vyos.util import call
from vyos.util import dict_search_args
from vyos.util import dict_search_recursive
from vyos.xml import defaults
@@ -55,6 +51,11 @@ sync_search = [
'script': '/usr/libexec/vyos/conf_mode/interfaces-openvpn.py'
},
{
+ 'keys': ['ca_certificate'],
+ 'path': ['interfaces', 'sstpc'],
+ 'script': '/usr/libexec/vyos/conf_mode/interfaces-sstpc.py'
+ },
+ {
'keys': ['certificate', 'ca_certificate', 'local_key', 'remote_key'],
'path': ['vpn', 'ipsec'],
'script': '/usr/libexec/vyos/conf_mode/vpn_ipsec.py'
@@ -121,6 +122,39 @@ def get_config(config=None):
get_first_key=True,
no_tag_node_value_mangle=True)
+ if 'changed' in pki:
+ for search in sync_search:
+ for key in search['keys']:
+ changed_key = sync_translate[key]
+
+ if changed_key not in pki['changed']:
+ continue
+
+ for item_name in pki['changed'][changed_key]:
+ node_present = False
+ if changed_key == 'openvpn':
+ node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name)
+ else:
+ node_present = dict_search_args(pki, changed_key, item_name)
+
+ if node_present:
+ search_dict = dict_search_args(pki['system'], *search['path'])
+
+ if not search_dict:
+ continue
+
+ for found_name, found_path in dict_search_recursive(search_dict, key):
+ if found_name == item_name:
+ path = search['path']
+ path_str = ' '.join(path + found_path)
+ print(f'pki: Updating config: {path_str} {found_name}')
+
+ if path[0] == 'interfaces':
+ ifname = found_path[0]
+ set_dependents(path[1], conf, ifname)
+ else:
+ set_dependents(path[1], conf)
+
return pki
def is_valid_certificate(raw_data):
@@ -259,37 +293,7 @@ def apply(pki):
return None
if 'changed' in pki:
- for search in sync_search:
- for key in search['keys']:
- changed_key = sync_translate[key]
-
- if changed_key not in pki['changed']:
- continue
-
- for item_name in pki['changed'][changed_key]:
- node_present = False
- if changed_key == 'openvpn':
- node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name)
- else:
- node_present = dict_search_args(pki, changed_key, item_name)
-
- if node_present:
- search_dict = dict_search_args(pki['system'], *search['path'])
-
- if not search_dict:
- continue
-
- for found_name, found_path in dict_search_recursive(search_dict, key):
- if found_name == item_name:
- path_str = ' '.join(search['path'] + found_path)
- print(f'pki: Updating config: {path_str} {found_name}')
-
- script = search['script']
- if found_path[0] == 'interfaces':
- ifname = found_path[2]
- call(f'VYOS_TAGNODE_VALUE={ifname} {script}')
- else:
- call(script)
+ call_dependents()
return None
diff --git a/src/conf_mode/policy-route-interface.py b/src/conf_mode/policy-route-interface.py
deleted file mode 100755
index 58c5fd93d..000000000
--- a/src/conf_mode/policy-route-interface.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-
-from sys import argv
-from sys import exit
-
-from vyos.config import Config
-from vyos.ifconfig import Section
-from vyos.template import render
-from vyos.util import cmd
-from vyos.util import run
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- ifname = argv[1]
- ifpath = Section.get_config_path(ifname)
- if_policy_path = f'interfaces {ifpath} policy'
-
- if_policy = conf.get_config_dict(if_policy_path, key_mangling=('-', '_'), get_first_key=True,
- no_tag_node_value_mangle=True)
-
- if_policy['ifname'] = ifname
- if_policy['policy'] = conf.get_config_dict(['policy'], key_mangling=('-', '_'), get_first_key=True,
- no_tag_node_value_mangle=True)
-
- return if_policy
-
-def verify_chain(table, chain):
- # Verify policy route applied
- code = run(f'nft list chain {table} {chain}')
- return code == 0
-
-def verify(if_policy):
- # bail out early - looks like removal from running config
- if not if_policy:
- return None
-
- for route in ['route', 'route6']:
- if route in if_policy:
- if route not in if_policy['policy']:
- raise ConfigError('Policy route not configured')
-
- route_name = if_policy[route]
-
- if route_name not in if_policy['policy'][route]:
- raise ConfigError(f'Invalid policy route name "{name}"')
-
- nft_prefix = 'VYOS_PBR6_' if route == 'route6' else 'VYOS_PBR_'
- nft_table = 'ip6 mangle' if route == 'route6' else 'ip mangle'
-
- if not verify_chain(nft_table, nft_prefix + route_name):
- raise ConfigError('Policy route did not apply')
-
- return None
-
-def generate(if_policy):
- return None
-
-def cleanup_rule(table, chain, ifname, new_name=None):
- results = cmd(f'nft -a list chain {table} {chain}').split("\n")
- retval = None
- for line in results:
- if f'ifname "{ifname}"' in line:
- if new_name and f'jump {new_name}' in line:
- # new_name is used to clear rules for any previously referenced chains
- # returns true when rule exists and doesn't need to be created
- retval = True
- continue
-
- handle_search = re.search('handle (\d+)', line)
- if handle_search:
- cmd(f'nft delete rule {table} {chain} handle {handle_search[1]}')
- return retval
-
-def apply(if_policy):
- ifname = if_policy['ifname']
-
- route_chain = 'VYOS_PBR_PREROUTING'
- ipv6_route_chain = 'VYOS_PBR6_PREROUTING'
-
- if 'route' in if_policy:
- name = 'VYOS_PBR_' + if_policy['route']
- rule_exists = cleanup_rule('ip mangle', route_chain, ifname, name)
-
- if not rule_exists:
- cmd(f'nft insert rule ip mangle {route_chain} iifname {ifname} counter jump {name}')
- else:
- cleanup_rule('ip mangle', route_chain, ifname)
-
- if 'route6' in if_policy:
- name = 'VYOS_PBR6_' + if_policy['route6']
- rule_exists = cleanup_rule('ip6 mangle', ipv6_route_chain, ifname, name)
-
- if not rule_exists:
- cmd(f'nft insert rule ip6 mangle {ipv6_route_chain} iifname {ifname} counter jump {name}')
- else:
- cleanup_rule('ip6 mangle', ipv6_route_chain, ifname)
-
- return None
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/conf_mode/policy-route.py b/src/conf_mode/policy-route.py
index 00539b9c7..40a32efb3 100755
--- a/src/conf_mode/policy-route.py
+++ b/src/conf_mode/policy-route.py
@@ -15,7 +15,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import re
from json import loads
from sys import exit
@@ -25,7 +24,6 @@ from vyos.config import Config
from vyos.template import render
from vyos.util import cmd
from vyos.util import dict_search_args
-from vyos.util import dict_search_recursive
from vyos.util import run
from vyos import ConfigError
from vyos import airbag
@@ -34,48 +32,14 @@ airbag.enable()
mark_offset = 0x7FFFFFFF
nftables_conf = '/run/nftables_policy.conf'
-ROUTE_PREFIX = 'VYOS_PBR_'
-ROUTE6_PREFIX = 'VYOS_PBR6_'
-
-preserve_chains = [
- 'VYOS_PBR_PREROUTING',
- 'VYOS_PBR_POSTROUTING',
- 'VYOS_PBR6_PREROUTING',
- 'VYOS_PBR6_POSTROUTING'
-]
-
valid_groups = [
'address_group',
+ 'domain_group',
'network_group',
- 'port_group'
+ 'port_group',
+ 'interface_group'
]
-group_set_prefix = {
- 'A_': 'address_group',
- 'A6_': 'ipv6_address_group',
-# 'D_': 'domain_group',
- 'M_': 'mac_group',
- 'N_': 'network_group',
- 'N6_': 'ipv6_network_group',
- 'P_': 'port_group'
-}
-
-def get_policy_interfaces(conf):
- out = {}
- interfaces = conf.get_config_dict(['interfaces'], key_mangling=('-', '_'), get_first_key=True,
- no_tag_node_value_mangle=True)
- def find_interfaces(iftype_conf, output={}, prefix=''):
- for ifname, if_conf in iftype_conf.items():
- if 'policy' in if_conf:
- output[prefix + ifname] = if_conf['policy']
- for vif in ['vif', 'vif_s', 'vif_c']:
- if vif in if_conf:
- output.update(find_interfaces(if_conf[vif], output, f'{prefix}{ifname}.'))
- return output
- for iftype, iftype_conf in interfaces.items():
- out.update(find_interfaces(iftype_conf))
- return out
-
def get_config(config=None):
if config:
conf = config
@@ -88,7 +52,6 @@ def get_config(config=None):
policy['firewall_group'] = conf.get_config_dict(['firewall', 'group'], key_mangling=('-', '_'), get_first_key=True,
no_tag_node_value_mangle=True)
- policy['interfaces'] = get_policy_interfaces(conf)
return policy
@@ -132,8 +95,8 @@ def verify_rule(policy, name, rule_conf, ipv6, rule_id):
side_conf = rule_conf[side]
if 'group' in side_conf:
- if {'address_group', 'network_group'} <= set(side_conf['group']):
- raise ConfigError('Only one address-group or network-group can be specified')
+ if len({'address_group', 'domain_group', 'network_group'} & set(side_conf['group'])) > 1:
+ raise ConfigError('Only one address-group, domain-group or network-group can be specified')
for group in valid_groups:
if group in side_conf['group']:
@@ -168,73 +131,11 @@ def verify(policy):
for rule_id, rule_conf in pol_conf['rule'].items():
verify_rule(policy, name, rule_conf, ipv6, rule_id)
- for ifname, if_policy in policy['interfaces'].items():
- name = dict_search_args(if_policy, 'route')
- ipv6_name = dict_search_args(if_policy, 'route6')
-
- if name and not dict_search_args(policy, 'route', name):
- raise ConfigError(f'Policy route "{name}" is still referenced on interface {ifname}')
-
- if ipv6_name and not dict_search_args(policy, 'route6', ipv6_name):
- raise ConfigError(f'Policy route6 "{ipv6_name}" is still referenced on interface {ifname}')
-
return None
-def cleanup_commands(policy):
- commands = []
- commands_chains = []
- commands_sets = []
- for table in ['ip mangle', 'ip6 mangle']:
- route_node = 'route' if table == 'ip mangle' else 'route6'
- chain_prefix = ROUTE_PREFIX if table == 'ip mangle' else ROUTE6_PREFIX
-
- json_str = cmd(f'nft -t -j list table {table}')
- obj = loads(json_str)
- if 'nftables' not in obj:
- continue
- for item in obj['nftables']:
- if 'chain' in item:
- chain = item['chain']['name']
- if chain in preserve_chains or not chain.startswith("VYOS_PBR"):
- continue
-
- if dict_search_args(policy, route_node, chain.replace(chain_prefix, "", 1)) != None:
- commands.append(f'flush chain {table} {chain}')
- else:
- commands_chains.append(f'delete chain {table} {chain}')
-
- if 'rule' in item:
- rule = item['rule']
- chain = rule['chain']
- handle = rule['handle']
-
- if chain not in preserve_chains:
- continue
-
- target, _ = next(dict_search_recursive(rule['expr'], 'target'))
-
- if target.startswith(chain_prefix):
- if dict_search_args(policy, route_node, target.replace(chain_prefix, "", 1)) == None:
- commands.append(f'delete rule {table} {chain} handle {handle}')
-
- if 'set' in item:
- set_name = item['set']['name']
-
- for prefix, group_type in group_set_prefix.items():
- if set_name.startswith(prefix):
- group_name = set_name.replace(prefix, "", 1)
- if dict_search_args(policy, 'firewall_group', group_type, group_name) != None:
- commands_sets.append(f'flush set {table} {set_name}')
- else:
- commands_sets.append(f'delete set {table} {set_name}')
-
- return commands + commands_chains + commands_sets
-
def generate(policy):
if not os.path.exists(nftables_conf):
policy['first_install'] = True
- else:
- policy['cleanup_commands'] = cleanup_commands(policy)
render(nftables_conf, 'firewall/nftables-policy.j2', policy)
return None
diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py
index a0d288e91..331194fec 100755
--- a/src/conf_mode/policy.py
+++ b/src/conf_mode/policy.py
@@ -167,6 +167,11 @@ def verify(policy):
continue
for rule, rule_config in route_map_config['rule'].items():
+ # Action 'deny' cannot be used with "continue"
+ # FRR does not validate it T4827
+ if rule_config['action'] == 'deny' and 'continue' in rule_config:
+ raise ConfigError(f'rule {rule} "continue" cannot be used with action deny!')
+
# Specified community-list must exist
tmp = dict_search('match.community.community_list',
rule_config)
diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py
index ff568d470..4f05957fa 100755
--- a/src/conf_mode/protocols_bgp.py
+++ b/src/conf_mode/protocols_bgp.py
@@ -14,8 +14,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-
from sys import exit
from sys import argv
@@ -57,13 +55,18 @@ def get_config(config=None):
# instead of the VRF instance.
if vrf: bgp.update({'vrf' : vrf})
+ bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'],
+ key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ bgp['dependent_vrfs'].update({'default': {'protocols': {
+ 'bgp': conf.get_config_dict(base_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)}}})
if not conf.exists(base):
+ # If bgp instance is deleted then mark it
bgp.update({'deleted' : ''})
- if not vrf:
- # We are running in the default VRF context, thus we can not delete
- # our main BGP instance if there are dependent BGP VRF instances.
- bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'],
- key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True)
return bgp
# We also need some additional information from the config, prefix-lists
@@ -74,9 +77,91 @@ def get_config(config=None):
tmp = conf.get_config_dict(['policy'])
# Merge policy dict into "regular" config dict
bgp = dict_merge(tmp, bgp)
-
return bgp
+
+def verify_vrf_as_import(search_vrf_name: str, afi_name: str, vrfs_config: dict) -> bool:
+ """
+ :param search_vrf_name: search vrf name in import list
+ :type search_vrf_name: str
+ :param afi_name: afi/safi name
+ :type afi_name: str
+ :param vrfs_config: configuration dependents vrfs
+ :type vrfs_config: dict
+ :return: if vrf in import list retrun true else false
+ :rtype: bool
+ """
+ for vrf_name, vrf_config in vrfs_config.items():
+ import_list = dict_search(
+ f'protocols.bgp.address_family.{afi_name}.import.vrf',
+ vrf_config)
+ if import_list:
+ if search_vrf_name in import_list:
+ return True
+ return False
+
+def verify_vrf_import_options(afi_config: dict) -> bool:
+ """
+ Search if afi contains one of options
+ :param afi_config: afi/safi
+ :type afi_config: dict
+ :return: if vrf contains rd and route-target options return true else false
+ :rtype: bool
+ """
+ options = [
+ f'rd.vpn.export',
+ f'route_target.vpn.import',
+ f'route_target.vpn.export',
+ f'route_target.vpn.both'
+ ]
+ for option in options:
+ if dict_search(option, afi_config):
+ return True
+ return False
+
+def verify_vrf_import(vrf_name: str, vrfs_config: dict, afi_name: str) -> bool:
+ """
+ Verify if vrf exists and contain options
+ :param vrf_name: name of VRF
+ :type vrf_name: str
+ :param vrfs_config: dependent vrfs config
+ :type vrfs_config: dict
+ :param afi_name: afi/safi name
+ :type afi_name: str
+ :return: if vrf contains rd and route-target options return true else false
+ :rtype: bool
+ """
+ if vrf_name != 'default':
+ verify_vrf({'vrf': vrf_name})
+ if dict_search(f'{vrf_name}.protocols.bgp.address_family.{afi_name}',
+ vrfs_config):
+ afi_config = \
+ vrfs_config[vrf_name]['protocols']['bgp']['address_family'][
+ afi_name]
+ if verify_vrf_import_options(afi_config):
+ return True
+ return False
+
+def verify_vrflist_import(afi_name: str, afi_config: dict, vrfs_config: dict) -> bool:
+ """
+ Call function to verify
+ if scpecific vrf contains rd and route-target
+ options return true else false
+
+ :param afi_name: afi/safi name
+ :type afi_name: str
+ :param afi_config: afi/safi configuration
+ :type afi_config: dict
+ :param vrfs_config: dependent vrfs config
+ :type vrfs_config:dict
+ :return: if vrf contains rd and route-target options return true else false
+ :rtype: bool
+ """
+ for vrf_name in afi_config['import']['vrf']:
+ if verify_vrf_import(vrf_name, vrfs_config, afi_name):
+ return True
+ return False
+
def verify_remote_as(peer_config, bgp_config):
if 'remote_as' in peer_config:
return peer_config['remote_as']
@@ -113,12 +198,22 @@ def verify_afi(peer_config, bgp_config):
return False
def verify(bgp):
- if not bgp or 'deleted' in bgp:
- if 'dependent_vrfs' in bgp:
- for vrf, vrf_options in bgp['dependent_vrfs'].items():
- if dict_search('protocols.bgp', vrf_options) != None:
- raise ConfigError('Cannot delete default BGP instance, ' \
- 'dependent VRF instance(s) exist!')
+ if 'deleted' in bgp:
+ if 'vrf' in bgp:
+ # Cannot delete vrf if it exists in import vrf list in other vrfs
+ for tmp_afi in ['ipv4_unicast', 'ipv6_unicast']:
+ if verify_vrf_as_import(bgp['vrf'],tmp_afi,bgp['dependent_vrfs']):
+ raise ConfigError(f'Cannot delete vrf {bgp["vrf"]} instance, ' \
+ 'Please unconfigure import vrf commands!')
+ else:
+ # We are running in the default VRF context, thus we can not delete
+ # our main BGP instance if there are dependent BGP VRF instances.
+ if 'dependent_vrfs' in bgp:
+ for vrf, vrf_options in bgp['dependent_vrfs'].items():
+ if vrf != 'default':
+ if dict_search('protocols.bgp', vrf_options):
+ raise ConfigError('Cannot delete default BGP instance, ' \
+ 'dependent VRF instance(s) exist!')
return None
if 'system_as' not in bgp:
@@ -140,6 +235,11 @@ def verify(bgp):
raise ConfigError(f'Specified peer-group "{peer_group}" for '\
f'neighbor "{neighbor}" does not exist!')
+ if 'local_role' in peer_config:
+ #Ensure Local Role has only one value.
+ if len(peer_config['local_role']) > 1:
+ raise ConfigError(f'Only one local role can be specified for peer "{peer}"!')
+
if 'local_as' in peer_config:
if len(peer_config['local_as']) > 1:
raise ConfigError(f'Only one local-as number can be specified for peer "{peer}"!')
@@ -324,9 +424,43 @@ def verify(bgp):
f'{afi} administrative distance {key}!')
if afi in ['ipv4_unicast', 'ipv6_unicast']:
- if 'import' in afi_config and 'vrf' in afi_config['import']:
- # Check if VRF exists
- verify_vrf(afi_config['import']['vrf'])
+
+ vrf_name = bgp['vrf'] if dict_search('vrf', bgp) else 'default'
+ # Verify if currant VRF contains rd and route-target options
+ # and does not exist in import list in other VRFs
+ if dict_search(f'rd.vpn.export', afi_config):
+ if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']):
+ raise ConfigError(
+ 'Command "import vrf" conflicts with "rd vpn export" command!')
+
+ if dict_search('route_target.vpn.both', afi_config):
+ if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']):
+ raise ConfigError(
+ 'Command "import vrf" conflicts with "route-target vpn both" command!')
+
+ if dict_search('route_target.vpn.import', afi_config):
+ if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']):
+ raise ConfigError(
+ 'Command "import vrf conflicts" with "route-target vpn import" command!')
+
+ if dict_search('route_target.vpn.export', afi_config):
+ if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']):
+ raise ConfigError(
+ 'Command "import vrf" conflicts with "route-target vpn export" command!')
+
+ # Verify if VRFs in import do not contain rd
+ # and route-target options
+ if dict_search('import.vrf', afi_config) is not None:
+ # Verify if VRF with import does not contain rd
+ # and route-target options
+ if verify_vrf_import_options(afi_config):
+ raise ConfigError(
+ 'Please unconfigure "import vrf" commands before using vpn commands in the same VRF!')
+ # Verify if VRFs in import list do not contain rd
+ # and route-target options
+ if verify_vrflist_import(afi, afi_config, bgp['dependent_vrfs']):
+ raise ConfigError(
+ 'Please unconfigure import vrf commands before using vpn commands in dependent VRFs!')
# FRR error: please unconfigure vpn to vrf commands before
# using import vrf commands
@@ -339,7 +473,6 @@ def verify(bgp):
tmp = dict_search(f'route_map.vpn.{export_import}', afi_config)
if tmp: verify_route_map(tmp, bgp)
-
return None
def generate(bgp):
diff --git a/src/conf_mode/protocols_failover.py b/src/conf_mode/protocols_failover.py
new file mode 100755
index 000000000..85e984afe
--- /dev/null
+++ b/src/conf_mode/protocols_failover.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+
+from pathlib import Path
+
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.template import render
+from vyos.util import call
+from vyos.xml import defaults
+from vyos import ConfigError
+from vyos import airbag
+
+airbag.enable()
+
+
+service_name = 'vyos-failover'
+service_conf = Path(f'/run/{service_name}.conf')
+systemd_service = '/run/systemd/system/vyos-failover.service'
+rt_proto_failover = '/etc/iproute2/rt_protos.d/failover.conf'
+
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['protocols', 'failover']
+ failover = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ # Set default values only if we set config
+ if failover.get('route'):
+ for route, route_config in failover.get('route').items():
+ for next_hop, next_hop_config in route_config.get('next_hop').items():
+ default_values = defaults(base + ['route'])
+ failover['route'][route]['next_hop'][next_hop] = dict_merge(
+ default_values['next_hop'], failover['route'][route]['next_hop'][next_hop])
+
+ return failover
+
+def verify(failover):
+ # bail out early - looks like removal from running config
+ if not failover:
+ return None
+
+ if 'route' not in failover:
+ raise ConfigError(f'Failover "route" is mandatory!')
+
+ for route, route_config in failover['route'].items():
+ if not route_config.get('next_hop'):
+ raise ConfigError(f'Next-hop for "{route}" is mandatory!')
+
+ for next_hop, next_hop_config in route_config.get('next_hop').items():
+ if 'interface' not in next_hop_config:
+ raise ConfigError(f'Interface for route "{route}" next-hop "{next_hop}" is mandatory!')
+
+ if not next_hop_config.get('check'):
+ raise ConfigError(f'Check target for next-hop "{next_hop}" is mandatory!')
+
+ if 'target' not in next_hop_config['check']:
+ raise ConfigError(f'Check target for next-hop "{next_hop}" is mandatory!')
+
+ check_type = next_hop_config['check']['type']
+ if check_type == 'tcp' and 'port' not in next_hop_config['check']:
+ raise ConfigError(f'Check port for next-hop "{next_hop}" and type TCP is mandatory!')
+
+ return None
+
+def generate(failover):
+ if not failover:
+ service_conf.unlink(missing_ok=True)
+ return None
+
+ # Add own rt_proto 'failover'
+ # Helps to detect all own routes 'proto failover'
+ with open(rt_proto_failover, 'w') as f:
+ f.write('111 failover\n')
+
+ # Write configuration file
+ conf_json = json.dumps(failover, indent=4)
+ service_conf.write_text(conf_json)
+ render(systemd_service, 'protocols/systemd_vyos_failover_service.j2', failover)
+
+ return None
+
+def apply(failover):
+ if not failover:
+ call(f'systemctl stop {service_name}.service')
+ call('ip route flush protocol failover')
+ else:
+ call('systemctl daemon-reload')
+ call(f'systemctl restart {service_name}.service')
+ call(f'ip route flush protocol failover')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py
index 5da8e7b06..73af6595b 100755
--- a/src/conf_mode/protocols_mpls.py
+++ b/src/conf_mode/protocols_mpls.py
@@ -24,6 +24,7 @@ from vyos.template import render_to_string
from vyos.util import dict_search
from vyos.util import read_file
from vyos.util import sysctl_write
+from vyos.configverify import verify_interface_exists
from vyos import ConfigError
from vyos import frr
from vyos import airbag
@@ -46,6 +47,10 @@ def verify(mpls):
if not mpls:
return None
+ if 'interface' in mpls:
+ for interface in mpls['interface']:
+ verify_interface_exists(interface)
+
# Checks to see if LDP is properly configured
if 'ldp' in mpls:
# If router ID not defined
diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py
index ee4eaf59d..ed0a8fba2 100755
--- a/src/conf_mode/protocols_ospfv3.py
+++ b/src/conf_mode/protocols_ospfv3.py
@@ -117,6 +117,10 @@ def verify(ospfv3):
if 'area_type' in area_config:
if len(area_config['area_type']) > 1:
raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!')
+ if 'range' in area_config:
+ for range, range_config in area_config['range'].items():
+ if {'not_advertise', 'advertise'} <= range_config.keys():
+ raise ConfigError(f'"not-advertise" and "advertise" for "range {range}" cannot be both configured at the same time!')
if 'interface' in ospfv3:
for interface, interface_config in ospfv3['interface'].items():
diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py
index 58e202928..3e5ebb805 100755
--- a/src/conf_mode/protocols_static.py
+++ b/src/conf_mode/protocols_static.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2021 VyOS maintainers and contributors
+# Copyright (C) 2021-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -25,12 +25,15 @@ from vyos.configdict import get_dhcp_interfaces
from vyos.configdict import get_pppoe_interfaces
from vyos.configverify import verify_common_route_maps
from vyos.configverify import verify_vrf
+from vyos.template import render
from vyos.template import render_to_string
from vyos import ConfigError
from vyos import frr
from vyos import airbag
airbag.enable()
+config_file = '/etc/iproute2/rt_tables.d/vyos-static.conf'
+
def get_config(config=None):
if config:
conf = config
@@ -94,6 +97,9 @@ def verify(static):
def generate(static):
if not static:
return None
+
+ # Put routing table names in /etc/iproute2/rt_tables
+ render(config_file, 'iproute2/static.conf.j2', static)
static['new_frr_config'] = render_to_string('frr/staticd.frr.j2', static)
return None
diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py
index dbe3be225..dca713283 100755
--- a/src/conf_mode/qos.py
+++ b/src/conf_mode/qos.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2022 VyOS maintainers and contributors
+# Copyright (C) 2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -14,15 +14,62 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import os
+
from sys import exit
+from netifaces import interfaces
+from vyos.base import Warning
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.qos import CAKE
+from vyos.qos import DropTail
+from vyos.qos import FairQueue
+from vyos.qos import FQCodel
+from vyos.qos import Limiter
+from vyos.qos import NetEm
+from vyos.qos import Priority
+from vyos.qos import RandomDetect
+from vyos.qos import RateLimiter
+from vyos.qos import RoundRobin
+from vyos.qos import TrafficShaper
+from vyos.qos import TrafficShaperHFSC
+from vyos.util import call
+from vyos.util import dict_search_recursive
from vyos.xml import defaults
from vyos import ConfigError
from vyos import airbag
airbag.enable()
+map_vyops_tc = {
+ 'cake' : CAKE,
+ 'drop_tail' : DropTail,
+ 'fair_queue' : FairQueue,
+ 'fq_codel' : FQCodel,
+ 'limiter' : Limiter,
+ 'network_emulator' : NetEm,
+ 'priority_queue' : Priority,
+ 'random_detect' : RandomDetect,
+ 'rate_control' : RateLimiter,
+ 'round_robin' : RoundRobin,
+ 'shaper' : TrafficShaper,
+ 'shaper_hfsc' : TrafficShaperHFSC,
+}
+
+def get_shaper(qos, interface_config, direction):
+ policy_name = interface_config[direction]
+ # An interface might have a QoS configuration, search the used
+ # configuration referenced by this. Path will hold the dict element
+ # referenced by the config, as this will be of sort:
+ #
+ # ['policy', 'drop_tail', 'foo-dtail'] <- we are only interested in
+ # drop_tail as the policy/shaper type
+ _, path = next(dict_search_recursive(qos, policy_name))
+ shaper_type = path[1]
+ shaper_config = qos['policy'][shaper_type][policy_name]
+
+ return (map_vyops_tc[shaper_type], shaper_config)
+
def get_config(config=None):
if config:
conf = config
@@ -32,48 +79,172 @@ def get_config(config=None):
if not conf.exists(base):
return None
- qos = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ qos = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
if 'policy' in qos:
for policy in qos['policy']:
- # CLI mangles - to _ for better Jinja2 compatibility - do we need
- # Jinja2 here?
- policy = policy.replace('-','_')
+ # when calling defaults() we need to use the real CLI node, thus we
+ # need a hyphen
+ policy_hyphen = policy.replace('_', '-')
- default_values = defaults(base + ['policy', policy])
-
- # class is another tag node which requires individual handling
- class_default_values = defaults(base + ['policy', policy, 'class'])
- if 'class' in default_values:
- del default_values['class']
+ if policy in ['random_detect']:
+ for rd_name, rd_config in qos['policy'][policy].items():
+ # There are eight precedence levels - ensure all are present
+ # to be filled later down with the appropriate default values
+ default_precedence = {'precedence' : { '0' : {}, '1' : {}, '2' : {}, '3' : {},
+ '4' : {}, '5' : {}, '6' : {}, '7' : {} }}
+ qos['policy']['random_detect'][rd_name] = dict_merge(
+ default_precedence, qos['policy']['random_detect'][rd_name])
for p_name, p_config in qos['policy'][policy].items():
+ default_values = defaults(base + ['policy', policy_hyphen])
+
+ if policy in ['priority_queue']:
+ if 'default' not in p_config:
+ raise ConfigError(f'QoS policy {p_name} misses "default" class!')
+
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if 'class' in default_values:
+ del default_values['class']
+ if 'precedence' in default_values:
+ del default_values['precedence']
+
qos['policy'][policy][p_name] = dict_merge(
default_values, qos['policy'][policy][p_name])
+ # class is another tag node which requires individual handling
if 'class' in p_config:
+ default_values = defaults(base + ['policy', policy_hyphen, 'class'])
for p_class in p_config['class']:
qos['policy'][policy][p_name]['class'][p_class] = dict_merge(
- class_default_values, qos['policy'][policy][p_name]['class'][p_class])
+ default_values, qos['policy'][policy][p_name]['class'][p_class])
+
+ if 'precedence' in p_config:
+ default_values = defaults(base + ['policy', policy_hyphen, 'precedence'])
+ # precedence values are a bit more complex as they are calculated
+ # under specific circumstances - thus we need to iterate two times.
+ # first blend in the defaults from XML / CLI
+ for precedence in p_config['precedence']:
+ qos['policy'][policy][p_name]['precedence'][precedence] = dict_merge(
+ default_values, qos['policy'][policy][p_name]['precedence'][precedence])
+ # second calculate defaults based on actual dictionary
+ for precedence in p_config['precedence']:
+ max_thr = int(qos['policy'][policy][p_name]['precedence'][precedence]['maximum_threshold'])
+ if 'minimum_threshold' not in qos['policy'][policy][p_name]['precedence'][precedence]:
+ qos['policy'][policy][p_name]['precedence'][precedence]['minimum_threshold'] = str(
+ int((9 + int(precedence)) * max_thr) // 18);
+
+ if 'queue_limit' not in qos['policy'][policy][p_name]['precedence'][precedence]:
+ qos['policy'][policy][p_name]['precedence'][precedence]['queue_limit'] = \
+ str(int(4 * max_thr))
- import pprint
- pprint.pprint(qos)
return qos
def verify(qos):
- if not qos:
+ if not qos or 'interface' not in qos:
return None
# network policy emulator
# reorder rerquires delay to be set
+ if 'policy' in qos:
+ for policy_type in qos['policy']:
+ for policy, policy_config in qos['policy'][policy_type].items():
+ # a policy with it's given name is only allowed to exist once
+ # on the system. This is because an interface selects a policy
+ # for ingress/egress traffic, and thus there can only be one
+ # policy with a given name.
+ #
+ # We check if the policy name occurs more then once - error out
+ # if this is true
+ counter = 0
+ for _, path in dict_search_recursive(qos['policy'], policy):
+ counter += 1
+ if counter > 1:
+ raise ConfigError(f'Conflicting policy name "{policy}", already in use!')
+
+ if 'class' in policy_config:
+ for cls, cls_config in policy_config['class'].items():
+ # bandwidth is not mandatory for priority-queue - that is why this is on the exception list
+ if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin']:
+ raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!')
+ if 'match' in cls_config:
+ for match, match_config in cls_config['match'].items():
+ if {'ip', 'ipv6'} <= set(match_config):
+ raise ConfigError(f'Can not use both IPv6 and IPv4 in one match ({match})!')
+
+ if policy_type in ['random_detect']:
+ if 'precedence' in policy_config:
+ for precedence, precedence_config in policy_config['precedence'].items():
+ max_tr = int(precedence_config['maximum_threshold'])
+ if {'maximum_threshold', 'minimum_threshold'} <= set(precedence_config):
+ min_tr = int(precedence_config['minimum_threshold'])
+ if min_tr >= max_tr:
+ raise ConfigError(f'Policy "{policy}" uses min-threshold "{min_tr}" >= max-threshold "{max_tr}"!')
+
+ if {'maximum_threshold', 'queue_limit'} <= set(precedence_config):
+ queue_lim = int(precedence_config['queue_limit'])
+ if queue_lim < max_tr:
+ raise ConfigError(f'Policy "{policy}" uses queue-limit "{queue_lim}" < max-threshold "{max_tr}"!')
+
+ if 'default' in policy_config:
+ if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin']:
+ raise ConfigError('Bandwidth not defined for default traffic!')
+
+ # we should check interface ingress/egress configuration after verifying that
+ # the policy name is used only once - this makes the logic easier!
+ for interface, interface_config in qos['interface'].items():
+ for direction in ['egress', 'ingress']:
+ # bail out early if shaper for given direction is not used at all
+ if direction not in interface_config:
+ continue
+
+ policy_name = interface_config[direction]
+ if 'policy' not in qos or list(dict_search_recursive(qos['policy'], policy_name)) == []:
+ raise ConfigError(f'Selected QoS policy "{policy_name}" does not exist!')
+
+ shaper_type, shaper_config = get_shaper(qos, interface_config, direction)
+ tmp = shaper_type(interface).get_direction()
+ if direction not in tmp:
+ raise ConfigError(f'Selected QoS policy on interface "{interface}" only supports "{tmp}"!')
- raise ConfigError('123')
return None
def generate(qos):
+ if not qos or 'interface' not in qos:
+ return None
+
return None
def apply(qos):
+ # Always delete "old" shapers first
+ for interface in interfaces():
+ # Ignore errors (may have no qdisc)
+ call(f'tc qdisc del dev {interface} parent ffff:')
+ call(f'tc qdisc del dev {interface} root')
+
+ if not qos or 'interface' not in qos:
+ return None
+
+ for interface, interface_config in qos['interface'].items():
+ if not os.path.exists(f'/sys/class/net/{interface}'):
+ # When shaper is bound to a dialup (e.g. PPPoE) interface it is
+ # possible that it is yet not availbale when to QoS code runs.
+ # Skip the configuration and inform the user
+ Warning(f'Interface "{interface}" does not exist!')
+ continue
+
+ for direction in ['egress', 'ingress']:
+ # bail out early if shaper for given direction is not used at all
+ if direction not in interface_config:
+ continue
+
+ shaper_type, shaper_config = get_shaper(qos, interface_config, direction)
+ tmp = shaper_type(interface)
+ tmp.update(shaper_config, direction)
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/service_console-server.py b/src/conf_mode/service_console-server.py
index ee4fe42ab..60eff6543 100755
--- a/src/conf_mode/service_console-server.py
+++ b/src/conf_mode/service_console-server.py
@@ -27,7 +27,7 @@ from vyos.xml import defaults
from vyos import ConfigError
config_file = '/run/conserver/conserver.cf'
-dropbear_systemd_file = '/etc/systemd/system/dropbear@{port}.service.d/override.conf'
+dropbear_systemd_file = '/run/systemd/system/dropbear@{port}.service.d/override.conf'
def get_config(config=None):
if config:
diff --git a/src/conf_mode/service_monitoring_telegraf.py b/src/conf_mode/service_monitoring_telegraf.py
index aafece47a..363408679 100755
--- a/src/conf_mode/service_monitoring_telegraf.py
+++ b/src/conf_mode/service_monitoring_telegraf.py
@@ -38,7 +38,7 @@ cache_dir = f'/etc/telegraf/.cache'
config_telegraf = f'/run/telegraf/telegraf.conf'
custom_scripts_dir = '/etc/telegraf/custom_scripts'
syslog_telegraf = '/etc/rsyslog.d/50-telegraf.conf'
-systemd_override = '/etc/systemd/system/telegraf.service.d/10-override.conf'
+systemd_override = '/run/systemd/system/telegraf.service.d/10-override.conf'
def get_nft_filter_chains():
""" Get nft chains for table filter """
diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py
index ba0249efd..600ba4e92 100755
--- a/src/conf_mode/service_pppoe-server.py
+++ b/src/conf_mode/service_pppoe-server.py
@@ -20,6 +20,7 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_accel_dict
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_accel_ppp_base_service
from vyos.configverify import verify_interface_exists
from vyos.template import render
@@ -43,6 +44,13 @@ def get_config(config=None):
# retrieve common dictionary keys
pppoe = get_accel_dict(conf, base, pppoe_chap_secrets)
+
+ # reload-or-restart does not implemented in accel-ppp
+ # use this workaround until it will be implemented
+ # https://phabricator.accel-ppp.org/T3
+ if is_node_changed(conf, base + ['client-ip-pool']) or is_node_changed(
+ conf, base + ['client-ipv6-pool']):
+ pppoe.update({'restart_required': {}})
return pppoe
def verify(pppoe):
@@ -95,7 +103,10 @@ def apply(pppoe):
os.unlink(file)
return None
- call(f'systemctl reload-or-restart {systemd_service}')
+ if 'restart_required' in pppoe:
+ call(f'systemctl restart {systemd_service}')
+ else:
+ call(f'systemctl reload-or-restart {systemd_service}')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/service_sla.py b/src/conf_mode/service_sla.py
index e7c3ca59c..b1e22f37b 100755
--- a/src/conf_mode/service_sla.py
+++ b/src/conf_mode/service_sla.py
@@ -27,15 +27,13 @@ from vyos import ConfigError
from vyos import airbag
airbag.enable()
-
owamp_config_dir = '/etc/owamp-server'
owamp_config_file = f'{owamp_config_dir}/owamp-server.conf'
-systemd_override_owamp = r'/etc/systemd/system/owamp-server.d/20-override.conf'
+systemd_override_owamp = r'/run/systemd/system/owamp-server.d/20-override.conf'
twamp_config_dir = '/etc/twamp-server'
twamp_config_file = f'{twamp_config_dir}/twamp-server.conf'
-systemd_override_twamp = r'/etc/systemd/system/twamp-server.d/20-override.conf'
-
+systemd_override_twamp = r'/run/systemd/system/twamp-server.d/20-override.conf'
def get_config(config=None):
if config:
diff --git a/src/conf_mode/service_webproxy.py b/src/conf_mode/service_webproxy.py
index 32af31bde..658e496a6 100755
--- a/src/conf_mode/service_webproxy.py
+++ b/src/conf_mode/service_webproxy.py
@@ -28,8 +28,10 @@ from vyos.util import dict_search
from vyos.util import write_file
from vyos.validate import is_addr_assigned
from vyos.xml import defaults
+from vyos.base import Warning
from vyos import ConfigError
from vyos import airbag
+
airbag.enable()
squid_config_file = '/etc/squid/squid.conf'
@@ -37,24 +39,57 @@ squidguard_config_file = '/etc/squidguard/squidGuard.conf'
squidguard_db_dir = '/opt/vyatta/etc/config/url-filtering/squidguard/db'
user_group = 'proxy'
-def generate_sg_localdb(category, list_type, role, proxy):
+
+def check_blacklist_categorydb(config_section):
+ if 'block_category' in config_section:
+ for category in config_section['block_category']:
+ check_categorydb(category)
+ if 'allow_category' in config_section:
+ for category in config_section['allow_category']:
+ check_categorydb(category)
+
+
+def check_categorydb(category: str):
+ """
+ Check if category's db exist
+ :param category:
+ :type str:
+ """
+ path_to_cat: str = f'{squidguard_db_dir}/{category}'
+ if not os.path.exists(f'{path_to_cat}/domains.db') \
+ and not os.path.exists(f'{path_to_cat}/urls.db') \
+ and not os.path.exists(f'{path_to_cat}/expressions.db'):
+ Warning(f'DB of category {category} does not exist.\n '
+ f'Use [update webproxy blacklists] '
+ f'or delete undefined category!')
+
+
+def generate_sg_rule_localdb(category, list_type, role, proxy):
+ if not category or not list_type or not role:
+ return None
+
cat_ = category.replace('-', '_')
- if isinstance(dict_search(f'url_filtering.squidguard.{cat_}', proxy),
- list):
+ if role == 'default':
+ path_to_cat = f'{cat_}'
+ else:
+ path_to_cat = f'rule.{role}.{cat_}'
+ if isinstance(
+ dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy),
+ list):
# local block databases must be generated "on-the-fly"
tmp = {
- 'squidguard_db_dir' : squidguard_db_dir,
- 'category' : f'{category}-default',
- 'list_type' : list_type,
- 'rule' : role
+ 'squidguard_db_dir': squidguard_db_dir,
+ 'category': f'{category}-{role}',
+ 'list_type': list_type,
+ 'rule': role
}
sg_tmp_file = '/tmp/sg.conf'
- db_file = f'{category}-default/{list_type}'
- domains = '\n'.join(dict_search(f'url_filtering.squidguard.{cat_}', proxy))
-
+ db_file = f'{category}-{role}/{list_type}'
+ domains = '\n'.join(
+ dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy))
# local file
- write_file(f'{squidguard_db_dir}/{category}-default/local', '',
+ write_file(f'{squidguard_db_dir}/{category}-{role}/local', '',
user=user_group, group=user_group)
# database input file
write_file(f'{squidguard_db_dir}/{db_file}', domains,
@@ -64,17 +99,18 @@ def generate_sg_localdb(category, list_type, role, proxy):
render(sg_tmp_file, 'squid/sg_acl.conf.j2', tmp,
user=user_group, group=user_group)
- call(f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"')
+ call(
+ f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"')
if os.path.exists(sg_tmp_file):
os.unlink(sg_tmp_file)
-
else:
# if category is not part of our configuration, clean out the
# squidguard lists
- tmp = f'{squidguard_db_dir}/{category}-default'
+ tmp = f'{squidguard_db_dir}/{category}-{role}'
if os.path.exists(tmp):
- rmtree(f'{squidguard_db_dir}/{category}-default')
+ rmtree(f'{squidguard_db_dir}/{category}-{role}')
+
def get_config(config=None):
if config:
@@ -85,7 +121,8 @@ def get_config(config=None):
if not conf.exists(base):
return None
- proxy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ proxy = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True)
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
default_values = defaults(base)
@@ -110,10 +147,11 @@ def get_config(config=None):
default_values = defaults(base + ['cache-peer'])
for peer in proxy['cache_peer']:
proxy['cache_peer'][peer] = dict_merge(default_values,
- proxy['cache_peer'][peer])
+ proxy['cache_peer'][peer])
return proxy
+
def verify(proxy):
if not proxy:
return None
@@ -170,17 +208,30 @@ def generate(proxy):
render(squidguard_config_file, 'squid/squidGuard.conf.j2', proxy)
cat_dict = {
- 'local-block' : 'domains',
- 'local-block-keyword' : 'expressions',
- 'local-block-url' : 'urls',
- 'local-ok' : 'domains',
- 'local-ok-url' : 'urls'
+ 'local-block': 'domains',
+ 'local-block-keyword': 'expressions',
+ 'local-block-url': 'urls',
+ 'local-ok': 'domains',
+ 'local-ok-url': 'urls'
}
- for category, list_type in cat_dict.items():
- generate_sg_localdb(category, list_type, 'default', proxy)
+ if dict_search(f'url_filtering.squidguard', proxy) is not None:
+ squidgard_config_section = proxy['url_filtering']['squidguard']
+
+ for category, list_type in cat_dict.items():
+ generate_sg_rule_localdb(category, list_type, 'default', proxy)
+ check_blacklist_categorydb(squidgard_config_section)
+
+ if 'rule' in squidgard_config_section:
+ for rule in squidgard_config_section['rule']:
+ rule_config_section = squidgard_config_section['rule'][
+ rule]
+ for category, list_type in cat_dict.items():
+ generate_sg_rule_localdb(category, list_type, rule, proxy)
+ check_blacklist_categorydb(rule_config_section)
return None
+
def apply(proxy):
if not proxy:
# proxy is removed in the commit
@@ -195,9 +246,10 @@ def apply(proxy):
if os.path.exists(squidguard_db_dir):
chmod_755(squidguard_db_dir)
- call('systemctl restart squid.service')
+ call('systemctl reload-or-restart squid.service')
return None
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/conf_mode/snmp.py b/src/conf_mode/snmp.py
index 5cd24db32..ab2ccf99e 100755
--- a/src/conf_mode/snmp.py
+++ b/src/conf_mode/snmp.py
@@ -40,7 +40,7 @@ config_file_client = r'/etc/snmp/snmp.conf'
config_file_daemon = r'/etc/snmp/snmpd.conf'
config_file_access = r'/usr/share/snmp/snmpd.conf'
config_file_user = r'/var/lib/snmp/snmpd.conf'
-systemd_override = r'/etc/systemd/system/snmpd.service.d/override.conf'
+systemd_override = r'/run/systemd/system/snmpd.service.d/override.conf'
systemd_service = 'snmpd.service'
def get_config(config=None):
@@ -166,6 +166,10 @@ def verify(snmp):
if 'community' not in trap_config:
raise ConfigError(f'Trap target "{trap}" requires a community to be set!')
+ if 'oid_enable' in snmp:
+ Warning(f'Custom OIDs are enabled and may lead to system instability and high resource consumption')
+
+
verify_vrf(snmp)
# bail out early if SNMP v3 is not configured
diff --git a/src/conf_mode/ssh.py b/src/conf_mode/ssh.py
index 8746cc701..8de0617af 100755
--- a/src/conf_mode/ssh.py
+++ b/src/conf_mode/ssh.py
@@ -32,7 +32,7 @@ from vyos import airbag
airbag.enable()
config_file = r'/run/sshd/sshd_config'
-systemd_override = r'/etc/systemd/system/ssh.service.d/override.conf'
+systemd_override = r'/run/systemd/system/ssh.service.d/override.conf'
sshguard_config_file = '/etc/sshguard/sshguard.conf'
sshguard_whitelist = '/etc/sshguard/whitelist'
diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system-option.py
index 36dbf155b..e6c7a0ed2 100755
--- a/src/conf_mode/system-option.py
+++ b/src/conf_mode/system-option.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
+# Copyright (C) 2019-2022 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -22,17 +22,19 @@ from time import sleep
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configverify import verify_source_interface
from vyos.template import render
from vyos.util import cmd
from vyos.util import is_systemd_service_running
from vyos.validate import is_addr_assigned
+from vyos.validate import is_intf_addr_assigned
from vyos.xml import defaults
from vyos import ConfigError
from vyos import airbag
airbag.enable()
curlrc_config = r'/etc/curlrc'
-ssh_config = r'/etc/ssh/ssh_config'
+ssh_config = r'/etc/ssh/ssh_config.d/91-vyos-ssh-client-options.conf'
systemd_action_file = '/lib/systemd/system/ctrl-alt-del.target'
def get_config(config=None):
@@ -68,8 +70,17 @@ def verify(options):
if 'ssh_client' in options:
config = options['ssh_client']
if 'source_address' in config:
+ address = config['source_address']
if not is_addr_assigned(config['source_address']):
- raise ConfigError('No interface with give address specified!')
+ raise ConfigError('No interface with address "{address}" configured!')
+
+ if 'source_interface' in config:
+ verify_source_interface(config)
+ if 'source_address' in config:
+ address = config['source_address']
+ interface = config['source_interface']
+ if not is_intf_addr_assigned(interface, address):
+ raise ConfigError(f'Address "{address}" not assigned on interface "{interface}"!')
return None
diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py
index 77a425f8b..ce4f13d27 100755
--- a/src/conf_mode/vpn_ipsec.py
+++ b/src/conf_mode/vpn_ipsec.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2021-2022 VyOS maintainers and contributors
+# Copyright (C) 2021-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -17,11 +17,13 @@
import ipaddress
import os
import re
+import jmespath
from sys import exit
from time import sleep
from time import time
+from vyos.base import Warning
from vyos.config import Config
from vyos.configdict import leaf_node_changed
from vyos.configverify import verify_interface_exists
@@ -94,6 +96,7 @@ def get_config(config=None):
del default_values['esp_group']
del default_values['ike_group']
del default_values['remote_access']
+ del default_values['site_to_site']
ipsec = dict_merge(default_values, ipsec)
if 'esp_group' in ipsec:
@@ -117,18 +120,39 @@ def get_config(config=None):
ipsec['ike_group'][group]['proposal'][proposal] = dict_merge(default_values,
ipsec['ike_group'][group]['proposal'][proposal])
- if 'remote_access' in ipsec and 'connection' in ipsec['remote_access']:
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if dict_search('remote_access.connection', ipsec):
default_values = defaults(base + ['remote-access', 'connection'])
for rw in ipsec['remote_access']['connection']:
ipsec['remote_access']['connection'][rw] = dict_merge(default_values,
ipsec['remote_access']['connection'][rw])
- if 'remote_access' in ipsec and 'radius' in ipsec['remote_access'] and 'server' in ipsec['remote_access']['radius']:
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if dict_search('remote_access.radius.server', ipsec):
+ # Fist handle the "base" stuff like RADIUS timeout
+ default_values = defaults(base + ['remote-access', 'radius'])
+ if 'server' in default_values:
+ del default_values['server']
+ ipsec['remote_access']['radius'] = dict_merge(default_values,
+ ipsec['remote_access']['radius'])
+
+ # Take care about individual RADIUS servers implemented as tagNodes - this
+ # requires special treatment
default_values = defaults(base + ['remote-access', 'radius', 'server'])
for server in ipsec['remote_access']['radius']['server']:
ipsec['remote_access']['radius']['server'][server] = dict_merge(default_values,
ipsec['remote_access']['radius']['server'][server])
+ # XXX: T2665: we can not safely rely on the defaults() when there are
+ # tagNodes in place, it is better to blend in the defaults manually.
+ if dict_search('site_to_site.peer', ipsec):
+ default_values = defaults(base + ['site-to-site', 'peer'])
+ for peer in ipsec['site_to_site']['peer']:
+ ipsec['site_to_site']['peer'][peer] = dict_merge(default_values,
+ ipsec['site_to_site']['peer'][peer])
+
ipsec['dhcp_no_address'] = {}
ipsec['install_routes'] = 'no' if conf.exists(base + ["options", "disable-route-autoinstall"]) else default_install_routes
ipsec['interface_change'] = leaf_node_changed(conf, base + ['interface'])
@@ -196,6 +220,12 @@ def verify(ipsec):
if not ipsec:
return None
+ if 'authentication' in ipsec:
+ if 'psk' in ipsec['authentication']:
+ for psk, psk_config in ipsec['authentication']['psk'].items():
+ if 'id' not in psk_config or 'secret' not in psk_config:
+ raise ConfigError(f'Authentication psk "{psk}" missing "id" or "secret"')
+
if 'interfaces' in ipsec :
for ifname in ipsec['interface']:
verify_interface_exists(ifname)
@@ -425,6 +455,10 @@ def verify(ipsec):
if 'local_address' in peer_conf and 'dhcp_interface' in peer_conf:
raise ConfigError(f"A single local-address or dhcp-interface is required when using VTI on site-to-site peer {peer}")
+ if dict_search('options.disable_route_autoinstall',
+ ipsec) == None:
+ Warning('It\'s recommended to use ipsec vty with the next command\n[set vpn ipsec option disable-route-autoinstall]')
+
if 'bind' in peer_conf['vti']:
vti_interface = peer_conf['vti']['bind']
if not os.path.exists(f'/sys/class/net/{vti_interface}'):
@@ -575,6 +609,14 @@ def generate(ipsec):
ipsec['site_to_site']['peer'][peer]['tunnel'][tunnel]['passthrough'] = passthrough
+ # auth psk <tag> dhcp-interface <xxx>
+ if jmespath.search('authentication.psk.*.dhcp_interface', ipsec):
+ for psk, psk_config in ipsec['authentication']['psk'].items():
+ if 'dhcp_interface' in psk_config:
+ for iface in psk_config['dhcp_interface']:
+ id = get_dhcp_address(iface)
+ if id:
+ ipsec['authentication']['psk'][psk]['id'].append(id)
render(ipsec_conf, 'ipsec/ipsec.conf.j2', ipsec)
render(ipsec_secrets, 'ipsec/ipsec.secrets.j2', ipsec)
diff --git a/src/conf_mode/vpn_l2tp.py b/src/conf_mode/vpn_l2tp.py
index fd5a4acd8..65623c2b1 100755
--- a/src/conf_mode/vpn_l2tp.py
+++ b/src/conf_mode/vpn_l2tp.py
@@ -26,7 +26,10 @@ from ipaddress import ip_network
from vyos.config import Config
from vyos.template import is_ipv4
from vyos.template import render
-from vyos.util import call, get_half_cpus
+from vyos.util import call
+from vyos.util import get_half_cpus
+from vyos.util import check_port_availability
+from vyos.util import is_listen_port_bind_service
from vyos import ConfigError
from vyos import airbag
@@ -43,6 +46,7 @@ default_config_data = {
'client_ip_pool': None,
'client_ip_subnets': [],
'client_ipv6_pool': [],
+ 'client_ipv6_pool_configured': False,
'client_ipv6_delegate_prefix': [],
'dnsv4': [],
'dnsv6': [],
@@ -54,6 +58,9 @@ default_config_data = {
'ppp_echo_failure' : '3',
'ppp_echo_interval' : '30',
'ppp_echo_timeout': '0',
+ 'ppp_ipv6_accept_peer_intf_id': False,
+ 'ppp_ipv6_intf_id': None,
+ 'ppp_ipv6_peer_intf_id': None,
'radius_server': [],
'radius_acct_inter_jitter': '',
'radius_acct_tmo': '3',
@@ -64,7 +71,7 @@ default_config_data = {
'radius_source_address': '',
'radius_shaper_attr': '',
'radius_shaper_vendor': '',
- 'radius_dynamic_author': '',
+ 'radius_dynamic_author': {},
'wins': [],
'ip6_column': [],
'thread_cnt': get_half_cpus()
@@ -205,21 +212,21 @@ def get_config(config=None):
l2tp['radius_source_address'] = conf.return_value(['source-address'])
# Dynamic Authorization Extensions (DOA)/Change Of Authentication (COA)
- if conf.exists(['dynamic-author']):
+ if conf.exists(['dae-server']):
dae = {
'port' : '',
'server' : '',
'key' : ''
}
- if conf.exists(['dynamic-author', 'server']):
- dae['server'] = conf.return_value(['dynamic-author', 'server'])
+ if conf.exists(['dae-server', 'ip-address']):
+ dae['server'] = conf.return_value(['dae-server', 'ip-address'])
- if conf.exists(['dynamic-author', 'port']):
- dae['port'] = conf.return_value(['dynamic-author', 'port'])
+ if conf.exists(['dae-server', 'port']):
+ dae['port'] = conf.return_value(['dae-server', 'port'])
- if conf.exists(['dynamic-author', 'key']):
- dae['key'] = conf.return_value(['dynamic-author', 'key'])
+ if conf.exists(['dae-server', 'secret']):
+ dae['key'] = conf.return_value(['dae-server', 'secret'])
l2tp['radius_dynamic_author'] = dae
@@ -244,6 +251,7 @@ def get_config(config=None):
l2tp['client_ip_subnets'] = conf.return_values(['client-ip-pool', 'subnet'])
if conf.exists(['client-ipv6-pool', 'prefix']):
+ l2tp['client_ipv6_pool_configured'] = True
l2tp['ip6_column'].append('ip6')
for prefix in conf.list_nodes(['client-ipv6-pool', 'prefix']):
tmp = {
@@ -306,6 +314,18 @@ def get_config(config=None):
if conf.exists(['ppp-options', 'lcp-echo-interval']):
l2tp['ppp_echo_interval'] = conf.return_value(['ppp-options', 'lcp-echo-interval'])
+ if conf.exists(['ppp-options', 'ipv6']):
+ l2tp['ppp_ipv6'] = conf.return_value(['ppp-options', 'ipv6'])
+
+ if conf.exists(['ppp-options', 'ipv6-accept-peer-intf-id']):
+ l2tp['ppp_ipv6_accept_peer_intf_id'] = True
+
+ if conf.exists(['ppp-options', 'ipv6-intf-id']):
+ l2tp['ppp_ipv6_intf_id'] = conf.return_value(['ppp-options', 'ipv6-intf-id'])
+
+ if conf.exists(['ppp-options', 'ipv6-peer-intf-id']):
+ l2tp['ppp_ipv6_peer_intf_id'] = conf.return_value(['ppp-options', 'ipv6-peer-intf-id'])
+
return l2tp
@@ -329,6 +349,19 @@ def verify(l2tp):
if not radius['key']:
raise ConfigError(f"Missing RADIUS secret for server { radius['key'] }")
+ if l2tp['radius_dynamic_author']:
+ if not l2tp['radius_dynamic_author']['server']:
+ raise ConfigError("Missing ip-address for dae-server")
+ if not l2tp['radius_dynamic_author']['key']:
+ raise ConfigError("Missing secret for dae-server")
+ address = l2tp['radius_dynamic_author']['server']
+ port = l2tp['radius_dynamic_author']['port']
+ proto = 'tcp'
+ # check if dae listen port is not used by another service
+ if check_port_availability(address, int(port), proto) is not True and \
+ not is_listen_port_bind_service(int(port), 'accel-pppd'):
+ raise ConfigError(f'"{proto}" port "{port}" is used by another service')
+
# check for the existence of a client ip pool
if not (l2tp['client_ip_pool'] or l2tp['client_ip_subnets']):
raise ConfigError(
diff --git a/src/conf_mode/vpn_openconnect.py b/src/conf_mode/vpn_openconnect.py
index c050b796b..63ffe2a41 100755
--- a/src/conf_mode/vpn_openconnect.py
+++ b/src/conf_mode/vpn_openconnect.py
@@ -46,6 +46,65 @@ radius_servers = cfg_dir + '/radius_servers'
def get_hash(password):
return crypt(password, mksalt(METHOD_SHA512))
+
+def T2665_default_dict_cleanup(origin: dict, default_values: dict) -> dict:
+ """
+ https://phabricator.vyos.net/T2665
+ Clear unnecessary key values in merged config by dict_merge function
+ :param origin: config
+ :type origin: dict
+ :param default_values: default values
+ :type default_values: dict
+ :return: merged dict
+ :rtype: dict
+ """
+ if 'mode' in origin["authentication"] and "local" in \
+ origin["authentication"]["mode"]:
+ del origin['authentication']['local_users']['username']['otp']
+ if not origin["authentication"]["local_users"]["username"]:
+ raise ConfigError(
+ 'Openconnect mode local required at least one user')
+ default_ocserv_usr_values = \
+ default_values['authentication']['local_users']['username']['otp']
+ for user, params in origin['authentication']['local_users'][
+ 'username'].items():
+ # Not every configuration requires OTP settings
+ if origin['authentication']['local_users']['username'][user].get(
+ 'otp'):
+ origin['authentication']['local_users']['username'][user][
+ 'otp'] = dict_merge(default_ocserv_usr_values,
+ origin['authentication'][
+ 'local_users']['username'][user][
+ 'otp'])
+
+ if 'mode' in origin["authentication"] and "radius" in \
+ origin["authentication"]["mode"]:
+ del origin['authentication']['radius']['server']['port']
+ if not origin["authentication"]['radius']['server']:
+ raise ConfigError(
+ 'Openconnect authentication mode radius required at least one radius server')
+ default_values_radius_port = \
+ default_values['authentication']['radius']['server']['port']
+ for server, params in origin['authentication']['radius'][
+ 'server'].items():
+ if 'port' not in params:
+ params['port'] = default_values_radius_port
+
+ if 'mode' in origin["accounting"] and "radius" in \
+ origin["accounting"]["mode"]:
+ del origin['accounting']['radius']['server']['port']
+ if not origin["accounting"]['radius']['server']:
+ raise ConfigError(
+ 'Openconnect accounting mode radius required at least one radius server')
+ default_values_radius_port = \
+ default_values['accounting']['radius']['server']['port']
+ for server, params in origin['accounting']['radius'][
+ 'server'].items():
+ if 'port' not in params:
+ params['port'] = default_values_radius_port
+ return origin
+
+
def get_config():
conf = Config()
base = ['vpn', 'openconnect']
@@ -57,18 +116,8 @@ def get_config():
# options which we need to update into the dictionary retrived.
default_values = defaults(base)
ocserv = dict_merge(default_values, ocserv)
-
- if "local" in ocserv["authentication"]["mode"]:
- # workaround a "know limitation" - https://phabricator.vyos.net/T2665
- del ocserv['authentication']['local_users']['username']['otp']
- if not ocserv["authentication"]["local_users"]["username"]:
- raise ConfigError('openconnect mode local required at least one user')
- default_ocserv_usr_values = default_values['authentication']['local_users']['username']['otp']
- for user, params in ocserv['authentication']['local_users']['username'].items():
- # Not every configuration requires OTP settings
- if ocserv['authentication']['local_users']['username'][user].get('otp'):
- ocserv['authentication']['local_users']['username'][user]['otp'] = dict_merge(default_ocserv_usr_values, ocserv['authentication']['local_users']['username'][user]['otp'])
-
+ # workaround a "know limitation" - https://phabricator.vyos.net/T2665
+ ocserv = T2665_default_dict_cleanup(ocserv, default_values)
if ocserv:
ocserv['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
get_first_key=True, no_tag_node_value_mangle=True)
@@ -85,6 +134,14 @@ def verify(ocserv):
not is_listen_port_bind_service(int(port), 'ocserv-main'):
raise ConfigError(f'"{proto}" port "{port}" is used by another service')
+ # Check accounting
+ if "accounting" in ocserv:
+ if "mode" in ocserv["accounting"] and "radius" in ocserv["accounting"]["mode"]:
+ if "authentication" not in ocserv or "mode" not in ocserv["authentication"]:
+ raise ConfigError('Accounting depends on OpenConnect authentication configuration')
+ elif "radius" not in ocserv["authentication"]["mode"]:
+ raise ConfigError('RADIUS accounting must be used with RADIUS authentication')
+
# Check authentication
if "authentication" in ocserv:
if "mode" in ocserv["authentication"]:
@@ -157,7 +214,7 @@ def verify(ocserv):
ocserv["network_settings"]["push_route"].remove("0.0.0.0/0")
ocserv["network_settings"]["push_route"].append("default")
else:
- ocserv["network_settings"]["push_route"] = "default"
+ ocserv["network_settings"]["push_route"] = ["default"]
else:
raise ConfigError('openconnect network settings required')
@@ -166,10 +223,18 @@ def generate(ocserv):
return None
if "radius" in ocserv["authentication"]["mode"]:
- # Render radius client configuration
- render(radius_cfg, 'ocserv/radius_conf.j2', ocserv["authentication"]["radius"])
- # Render radius servers
- render(radius_servers, 'ocserv/radius_servers.j2', ocserv["authentication"]["radius"])
+ if dict_search(ocserv, 'accounting.mode.radius'):
+ # Render radius client configuration
+ render(radius_cfg, 'ocserv/radius_conf.j2', ocserv)
+ merged_servers = ocserv["accounting"]["radius"]["server"] | ocserv["authentication"]["radius"]["server"]
+ # Render radius servers
+ # Merge the accounting and authentication servers into a single dictionary
+ render(radius_servers, 'ocserv/radius_servers.j2', {'server': merged_servers})
+ else:
+ # Render radius client configuration
+ render(radius_cfg, 'ocserv/radius_conf.j2', ocserv)
+ # Render radius servers
+ render(radius_servers, 'ocserv/radius_servers.j2', ocserv["authentication"]["radius"])
elif "local" in ocserv["authentication"]["mode"]:
# if mode "OTP", generate OTP users file parameters
if "otp" in ocserv["authentication"]["mode"]["local"]:
@@ -247,7 +312,7 @@ def apply(ocserv):
if os.path.exists(file):
os.unlink(file)
else:
- call('systemctl restart ocserv.service')
+ call('systemctl reload-or-restart ocserv.service')
counter = 0
while True:
# exit early when service runs
diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py
index 1b4156895..c17cca3bd 100755
--- a/src/conf_mode/vrf.py
+++ b/src/conf_mode/vrf.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2020-2022 VyOS maintainers and contributors
+# Copyright (C) 2020-2023 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -140,11 +140,9 @@ def verify(vrf):
def generate(vrf):
- render(config_file, 'vrf/vrf.conf.j2', vrf)
+ render(config_file, 'iproute2/vrf.conf.j2', vrf)
# Render nftables zones config
-
render(nft_vrf_config, 'firewall/nftables-vrf-zones.j2', vrf)
-
return None