summaryrefslogtreecommitdiff
path: root/src/conf_mode
diff options
context:
space:
mode:
Diffstat (limited to 'src/conf_mode')
-rwxr-xr-xsrc/conf_mode/conntrack.py39
-rwxr-xr-xsrc/conf_mode/containers.py39
-rwxr-xr-xsrc/conf_mode/dhcp_server.py12
-rwxr-xr-xsrc/conf_mode/dhcpv6_server.py2
-rwxr-xr-xsrc/conf_mode/dns_forwarding.py220
-rwxr-xr-xsrc/conf_mode/dynamic_dns.py4
-rwxr-xr-xsrc/conf_mode/firewall-interface.py146
-rwxr-xr-xsrc/conf_mode/firewall.py267
-rwxr-xr-xsrc/conf_mode/flow_accounting_conf.py436
-rwxr-xr-xsrc/conf_mode/host_name.py2
-rwxr-xr-xsrc/conf_mode/http-api.py54
-rwxr-xr-xsrc/conf_mode/https.py30
-rwxr-xr-xsrc/conf_mode/interfaces-openvpn.py73
-rwxr-xr-xsrc/conf_mode/interfaces-tunnel.py23
-rwxr-xr-xsrc/conf_mode/interfaces-vxlan.py31
-rwxr-xr-xsrc/conf_mode/interfaces-wireless.py5
-rwxr-xr-xsrc/conf_mode/interfaces-wwan.py53
-rwxr-xr-xsrc/conf_mode/nat.py23
-rwxr-xr-xsrc/conf_mode/nat66.py24
-rwxr-xr-xsrc/conf_mode/netns.py118
-rwxr-xr-xsrc/conf_mode/policy-route-interface.py120
-rwxr-xr-xsrc/conf_mode/policy-route.py154
-rwxr-xr-xsrc/conf_mode/policy.py21
-rwxr-xr-xsrc/conf_mode/protocols_bfd.py35
-rwxr-xr-xsrc/conf_mode/protocols_bgp.py45
-rwxr-xr-xsrc/conf_mode/protocols_isis.py26
-rwxr-xr-xsrc/conf_mode/protocols_mpls.py38
-rwxr-xr-xsrc/conf_mode/protocols_nhrp.py27
-rwxr-xr-xsrc/conf_mode/protocols_ospf.py27
-rwxr-xr-xsrc/conf_mode/protocols_ospfv3.py116
-rwxr-xr-xsrc/conf_mode/protocols_rip.py35
-rwxr-xr-xsrc/conf_mode/protocols_ripng.py29
-rwxr-xr-xsrc/conf_mode/protocols_rpki.py17
-rwxr-xr-xsrc/conf_mode/protocols_static.py24
-rwxr-xr-xsrc/conf_mode/service_mdns-repeater.py12
-rwxr-xr-xsrc/conf_mode/service_pppoe-server.py15
-rwxr-xr-xsrc/conf_mode/snmp.py639
-rwxr-xr-xsrc/conf_mode/system-login-banner.py38
-rwxr-xr-xsrc/conf_mode/system-logs.py83
-rwxr-xr-xsrc/conf_mode/system-option.py6
-rwxr-xr-xsrc/conf_mode/system_console.py70
-rwxr-xr-xsrc/conf_mode/tftp_server.py9
-rwxr-xr-xsrc/conf_mode/vpn_l2tp.py2
-rwxr-xr-xsrc/conf_mode/vpn_openconnect.py12
-rwxr-xr-xsrc/conf_mode/vpn_sstp.py42
-rwxr-xr-xsrc/conf_mode/vrf.py14
-rwxr-xr-xsrc/conf_mode/vrf_vni.py31
-rwxr-xr-xsrc/conf_mode/vrrp.py8
-rwxr-xr-xsrc/conf_mode/zone_policy.py196
49 files changed, 2350 insertions, 1142 deletions
diff --git a/src/conf_mode/conntrack.py b/src/conf_mode/conntrack.py
index 68877f794..c65ef9540 100755
--- a/src/conf_mode/conntrack.py
+++ b/src/conf_mode/conntrack.py
@@ -15,11 +15,14 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
+import re
from sys import exit
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.firewall import find_nftables_rule
+from vyos.firewall import remove_nftables_rule
from vyos.util import cmd
from vyos.util import run
from vyos.util import process_named_running
@@ -43,8 +46,8 @@ module_map = {
'ko' : ['nf_nat_h323', 'nf_conntrack_h323'],
},
'nfs' : {
- 'iptables' : ['VYATTA_CT_HELPER --table raw --proto tcp --dport 111 --jump CT --helper rpc',
- 'VYATTA_CT_HELPER --table raw --proto udp --dport 111 --jump CT --helper rpc'],
+ 'nftables' : ['ct helper set "rpc_tcp" tcp dport "{111}" return',
+ 'ct helper set "rpc_udp" udp dport "{111}" return']
},
'pptp' : {
'ko' : ['nf_nat_pptp', 'nf_conntrack_pptp'],
@@ -53,9 +56,7 @@ module_map = {
'ko' : ['nf_nat_sip', 'nf_conntrack_sip'],
},
'sqlnet' : {
- 'iptables' : ['VYATTA_CT_HELPER --table raw --proto tcp --dport 1521 --jump CT --helper tns',
- 'VYATTA_CT_HELPER --table raw --proto tcp --dport 1525 --jump CT --helper tns',
- 'VYATTA_CT_HELPER --table raw --proto tcp --dport 1536 --jump CT --helper tns'],
+ 'nftables' : ['ct helper set "tns_tcp" tcp dport "{1521,1525,1536}" return']
},
'tftp' : {
'ko' : ['nf_nat_tftp', 'nf_conntrack_tftp'],
@@ -93,6 +94,17 @@ def generate(conntrack):
return None
+def find_nftables_ct_rule(rule):
+ helper_search = re.search('ct helper set "(\w+)"', rule)
+ if helper_search:
+ rule = helper_search[1]
+ return find_nftables_rule('raw', 'VYOS_CT_HELPER', [rule])
+
+def find_remove_rule(rule):
+ handle = find_nftables_ct_rule(rule)
+ if handle:
+ remove_nftables_rule('raw', 'VYOS_CT_HELPER', handle)
+
def apply(conntrack):
# Depending on the enable/disable state of the ALG (Application Layer Gateway)
# modules we need to either insmod or rmmod the helpers.
@@ -103,20 +115,17 @@ def apply(conntrack):
# Only remove the module if it's loaded
if os.path.exists(f'/sys/module/{mod}'):
cmd(f'rmmod {mod}')
- if 'iptables' in module_config:
- for rule in module_config['iptables']:
- # Only install iptables rule if it does not exist
- tmp = run(f'iptables --check {rule}')
- if tmp == 0: cmd(f'iptables --delete {rule}')
+ if 'nftables' in module_config:
+ for rule in module_config['nftables']:
+ find_remove_rule(rule)
else:
if 'ko' in module_config:
for mod in module_config['ko']:
cmd(f'modprobe {mod}')
- if 'iptables' in module_config:
- for rule in module_config['iptables']:
- # Only install iptables rule if it does not exist
- tmp = run(f'iptables --check {rule}')
- if tmp > 0: cmd(f'iptables --insert {rule}')
+ if 'nftables' in module_config:
+ for rule in module_config['nftables']:
+ if not find_nftables_ct_rule(rule):
+ cmd(f'nft insert rule ip raw VYOS_CT_HELPER {rule}')
if process_named_running('conntrackd'):
# Reload conntrack-sync daemon to fetch new sysctl values
diff --git a/src/conf_mode/containers.py b/src/conf_mode/containers.py
index 1e0197a13..2e14e0b25 100755
--- a/src/conf_mode/containers.py
+++ b/src/conf_mode/containers.py
@@ -30,8 +30,6 @@ from vyos.util import cmd
from vyos.util import run
from vyos.util import read_file
from vyos.util import write_file
-from vyos.util import is_systemd_service_active
-from vyos.util import is_systemd_service_running
from vyos.template import inc_ip
from vyos.template import is_ipv4
from vyos.template import is_ipv6
@@ -102,7 +100,7 @@ def verify(container):
# Check if the specified container network exists
network_name = list(container_config['network'])[0]
if network_name not in container['network']:
- raise ConfigError('Container network "{network_name}" does not exist!')
+ raise ConfigError(f'Container network "{network_name}" does not exist!')
if 'address' in container_config['network'][network_name]:
if 'network' not in container_config:
@@ -160,7 +158,7 @@ def verify(container):
v6_prefix = 0
# If ipv4-prefix not defined for user-defined network
if 'prefix' not in network_config:
- raise ConfigError(f'prefix for network "{net}" must be defined!')
+ raise ConfigError(f'prefix for network "{network}" must be defined!')
for prefix in network_config['prefix']:
if is_ipv4(prefix): v4_prefix += 1
@@ -237,17 +235,6 @@ def apply(container):
if os.path.exists(tmp):
os.unlink(tmp)
- service_name = 'podman.service'
- if 'network' in container or 'name' in container:
- # Start podman if it's required and not yet running
- if not is_systemd_service_active(service_name):
- _cmd(f'systemctl start {service_name}')
- # Wait for podman to be running
- while not is_systemd_service_running(service_name):
- sleep(0.250)
- else:
- _cmd(f'systemctl stop {service_name}')
-
# Add container
if 'name' in container:
for name, container_config in container['name'].items():
@@ -271,6 +258,14 @@ def apply(container):
tmp = run(f'podman image exists {image}')
if tmp != 0: print(os.system(f'podman pull {image}'))
+ # Add capability options. Should be in uppercase
+ cap_add = ''
+ if 'cap_add' in container_config:
+ for c in container_config['cap_add']:
+ c = c.upper()
+ c = c.replace('-', '_')
+ cap_add += f' --cap-add={c}'
+
# Check/set environment options "-e foo=bar"
env_opt = ''
if 'environment' in container_config:
@@ -299,7 +294,7 @@ def apply(container):
dvol = vol_config['destination']
volume += f' -v {svol}:{dvol}'
- container_base_cmd = f'podman run --detach --interactive --tty --replace ' \
+ container_base_cmd = f'podman run --detach --interactive --tty --replace {cap_add} ' \
f'--memory {memory}m --memory-swap 0 --restart {restart} ' \
f'--name {name} {port} {volume} {env_opt}'
if 'allow_host_networks' in container_config:
@@ -310,7 +305,17 @@ def apply(container):
if 'address' in container_config['network'][network]:
address = container_config['network'][network]['address']
ipparam = f'--ip {address}'
- _cmd(f'{container_base_cmd} --net {network} {ipparam} {image}')
+
+ counter = 0
+ while True:
+ if counter >= 10:
+ break
+ try:
+ _cmd(f'{container_base_cmd} --net {network} {ipparam} {image}')
+ break
+ except:
+ counter = counter +1
+ sleep(0.5)
return None
diff --git a/src/conf_mode/dhcp_server.py b/src/conf_mode/dhcp_server.py
index 28f2a4ca5..a8cef5ebf 100755
--- a/src/conf_mode/dhcp_server.py
+++ b/src/conf_mode/dhcp_server.py
@@ -151,9 +151,15 @@ def verify(dhcp):
listen_ok = False
subnets = []
failover_ok = False
+ shared_networks = len(dhcp['shared_network_name'])
+ disabled_shared_networks = 0
+
# A shared-network requires a subnet definition
for network, network_config in dhcp['shared_network_name'].items():
+ if 'disable' in network_config:
+ disabled_shared_networks += 1
+
if 'subnet' not in network_config:
raise ConfigError(f'No subnets defined for {network}. At least one\n' \
'lease subnet must be configured.')
@@ -226,7 +232,7 @@ def verify(dhcp):
# There must be one subnet connected to a listen interface.
# This only counts if the network itself is not disabled!
if 'disable' not in network_config:
- if is_subnet_connected(subnet, primary=True):
+ if is_subnet_connected(subnet, primary=False):
listen_ok = True
# Subnets must be non overlapping
@@ -243,6 +249,10 @@ def verify(dhcp):
if net.overlaps(net2):
raise ConfigError('Conflicting subnet ranges: "{net}" overlaps "{net2}"!')
+ # Prevent 'disable' for shared-network if only one network is configured
+ if (shared_networks - disabled_shared_networks) < 1:
+ raise ConfigError(f'At least one shared network must be active!')
+
if 'failover' in dhcp:
if not failover_ok:
raise ConfigError('DHCP failover must be enabled for at least one subnet!')
diff --git a/src/conf_mode/dhcpv6_server.py b/src/conf_mode/dhcpv6_server.py
index 175300bb0..e6a2e4486 100755
--- a/src/conf_mode/dhcpv6_server.py
+++ b/src/conf_mode/dhcpv6_server.py
@@ -128,7 +128,7 @@ def verify(dhcpv6):
# Subnets must be unique
if subnet in subnets:
- raise ConfigError('DHCPv6 subnets must be unique! Subnet {0} defined multiple times!'.format(subnet['network']))
+ raise ConfigError(f'DHCPv6 subnets must be unique! Subnet {subnet} defined multiple times!')
subnets.append(subnet)
# DHCPv6 requires at least one configured address range or one static mapping
diff --git a/src/conf_mode/dns_forwarding.py b/src/conf_mode/dns_forwarding.py
index c44e6c974..23a16df63 100755
--- a/src/conf_mode/dns_forwarding.py
+++ b/src/conf_mode/dns_forwarding.py
@@ -17,6 +17,7 @@
import os
from sys import exit
+from glob import glob
from vyos.config import Config
from vyos.configdict import dict_merge
@@ -50,10 +51,12 @@ def get_config(config=None):
if not conf.exists(base):
return None
- dns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ dns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True)
# We have gathered the dict representation of the CLI, but there are default
- # options which we need to update into the dictionary retrived.
+ # options which we need to update into the dictionary retrieved.
default_values = defaults(base)
+ # T2665 due to how defaults under tag nodes work, we must clear these out before we merge
+ del default_values['authoritative_domain']
dns = dict_merge(default_values, dns)
# some additions to the default dictionary
@@ -66,20 +69,182 @@ def get_config(config=None):
if conf.exists(base_nameservers_dhcp):
dns.update({'system_name_server_dhcp': conf.return_values(base_nameservers_dhcp)})
- # Split the source_address property into separate IPv4 and IPv6 lists
- # NOTE: In future versions of pdns-recursor (> 4.4.0), this logic can be removed
- # as both IPv4 and IPv6 addresses can be specified in a single setting.
- source_address_v4 = []
- source_address_v6 = []
-
- for source_address in dns['source_address']:
- if is_ipv6(source_address):
- source_address_v6.append(source_address)
- else:
- source_address_v4.append(source_address)
-
- dns.update({'source_address_v4': source_address_v4})
- dns.update({'source_address_v6': source_address_v6})
+ if 'authoritative_domain' in dns:
+ dns['authoritative_zones'] = []
+ dns['authoritative_zone_errors'] = []
+ for node in dns['authoritative_domain']:
+ zonedata = dns['authoritative_domain'][node]
+ if ('disable' in zonedata) or (not 'records' in zonedata):
+ continue
+ zone = {
+ 'name': node,
+ 'file': "{}/zone.{}.conf".format(pdns_rec_run_dir, node),
+ 'records': [],
+ }
+
+ recorddata = zonedata['records']
+
+ for rtype in [ 'a', 'aaaa', 'cname', 'mx', 'ptr', 'txt', 'spf', 'srv', 'naptr' ]:
+ if rtype not in recorddata:
+ continue
+ for subnode in recorddata[rtype]:
+ if 'disable' in recorddata[rtype][subnode]:
+ continue
+
+ rdata = recorddata[rtype][subnode]
+
+ if rtype in [ 'a', 'aaaa' ]:
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'address' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one address is required'.format(subnode, node))
+ continue
+
+ for address in rdata['address']:
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': address
+ })
+ elif rtype in ['cname', 'ptr']:
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'target' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: target is required'.format(subnode, node))
+ continue
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{}.'.format(rdata['target'])
+ })
+ elif rtype == 'mx':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ del rdefaults['server']
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'server' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one server is required'.format(subnode, node))
+ continue
+
+ for servername in rdata['server']:
+ serverdata = rdata['server'][servername]
+ serverdefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'server']) # T2665
+ serverdata = dict_merge(serverdefaults, serverdata)
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{} {}.'.format(serverdata['priority'], servername)
+ })
+ elif rtype == 'txt':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'value' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one value is required'.format(subnode, node))
+ continue
+
+ for value in rdata['value']:
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': "\"{}\"".format(value.replace("\"", "\\\""))
+ })
+ elif rtype == 'spf':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'value' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: value is required'.format(subnode, node))
+ continue
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '"{}"'.format(rdata['value'].replace("\"", "\\\""))
+ })
+ elif rtype == 'srv':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ del rdefaults['entry']
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'entry' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one entry is required'.format(subnode, node))
+ continue
+
+ for entryno in rdata['entry']:
+ entrydata = rdata['entry'][entryno]
+ entrydefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'entry']) # T2665
+ entrydata = dict_merge(entrydefaults, entrydata)
+
+ if not 'hostname' in entrydata:
+ dns['authoritative_zone_errors'].append('{}.{}: hostname is required for entry {}'.format(subnode, node, entryno))
+ continue
+
+ if not 'port' in entrydata:
+ dns['authoritative_zone_errors'].append('{}.{}: port is required for entry {}'.format(subnode, node, entryno))
+ continue
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{} {} {} {}.'.format(entrydata['priority'], entrydata['weight'], entrydata['port'], entrydata['hostname'])
+ })
+ elif rtype == 'naptr':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ del rdefaults['rule']
+ rdata = dict_merge(rdefaults, rdata)
+
+
+ if not 'rule' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one rule is required'.format(subnode, node))
+ continue
+
+ for ruleno in rdata['rule']:
+ ruledata = rdata['rule'][ruleno]
+ ruledefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'rule']) # T2665
+ ruledata = dict_merge(ruledefaults, ruledata)
+ flags = ""
+ if 'lookup-srv' in ruledata:
+ flags += "S"
+ if 'lookup-a' in ruledata:
+ flags += "A"
+ if 'resolve-uri' in ruledata:
+ flags += "U"
+ if 'protocol-specific' in ruledata:
+ flags += "P"
+
+ if 'order' in ruledata:
+ order = ruledata['order']
+ else:
+ order = ruleno
+
+ if 'regexp' in ruledata:
+ regexp= ruledata['regexp'].replace("\"", "\\\"")
+ else:
+ regexp = ''
+
+ if ruledata['replacement']:
+ replacement = '{}.'.format(ruledata['replacement'])
+ else:
+ replacement = ''
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{} {} "{}" "{}" "{}" {}'.format(order, ruledata['preference'], flags, ruledata['service'], regexp, replacement)
+ })
+
+ dns['authoritative_zones'].append(zone)
return dns
@@ -101,6 +266,11 @@ def verify(dns):
if 'server' not in dns['domain'][domain]:
raise ConfigError(f'No server configured for domain {domain}!')
+ if ('authoritative_zone_errors' in dns) and dns['authoritative_zone_errors']:
+ for error in dns['authoritative_zone_errors']:
+ print(error)
+ raise ConfigError('Invalid authoritative records have been defined')
+
if 'system' in dns:
if not ('system_name_server' in dns or 'system_name_server_dhcp' in dns):
print("Warning: No 'system name-server' or 'system " \
@@ -119,6 +289,15 @@ def generate(dns):
render(pdns_rec_lua_conf_file, 'dns-forwarding/recursor.conf.lua.tmpl',
dns, user=pdns_rec_user, group=pdns_rec_group)
+ for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'):
+ os.unlink(zone_filename)
+
+ if 'authoritative_zones' in dns:
+ for zone in dns['authoritative_zones']:
+ render(zone['file'], 'dns-forwarding/recursor.zone.conf.tmpl',
+ zone, user=pdns_rec_user, group=pdns_rec_group)
+
+
# if vyos-hostsd didn't create its files yet, create them (empty)
for file in [pdns_rec_hostsd_lua_conf_file, pdns_rec_hostsd_zones_file]:
with open(file, 'a'):
@@ -134,6 +313,9 @@ def apply(dns):
if os.path.isfile(pdns_rec_config_file):
os.unlink(pdns_rec_config_file)
+
+ for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'):
+ os.unlink(zone_filename)
else:
### first apply vyos-hostsd config
hc = hostsd_client()
@@ -168,6 +350,12 @@ def apply(dns):
if 'domain' in dns:
hc.add_forward_zones(dns['domain'])
+ # hostsd generates NTAs for the authoritative zones
+ # the list and keys() are required as get returns a dict, not list
+ hc.delete_authoritative_zones(list(hc.get_authoritative_zones()))
+ if 'authoritative_zones' in dns:
+ hc.add_authoritative_zones(list(map(lambda zone: zone['name'], dns['authoritative_zones'])))
+
# call hostsd to generate forward-zones and its lua-config-file
hc.apply()
diff --git a/src/conf_mode/dynamic_dns.py b/src/conf_mode/dynamic_dns.py
index c979feca7..a31e5ed75 100755
--- a/src/conf_mode/dynamic_dns.py
+++ b/src/conf_mode/dynamic_dns.py
@@ -131,9 +131,7 @@ def generate(dyndns):
if not dyndns:
return None
- render(config_file, 'dynamic-dns/ddclient.conf.tmpl', dyndns,
- permission=0o600)
-
+ render(config_file, 'dynamic-dns/ddclient.conf.tmpl', dyndns)
return None
def apply(dyndns):
diff --git a/src/conf_mode/firewall-interface.py b/src/conf_mode/firewall-interface.py
new file mode 100755
index 000000000..3a17dc5a4
--- /dev/null
+++ b/src/conf_mode/firewall-interface.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+
+from sys import argv
+from sys import exit
+
+from vyos.config import Config
+from vyos.configdict import leaf_node_changed
+from vyos.ifconfig import Section
+from vyos.template import render
+from vyos.util import cmd
+from vyos.util import dict_search_args
+from vyos.util import run
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+NFT_CHAINS = {
+ 'in': 'VYOS_FW_IN',
+ 'out': 'VYOS_FW_OUT',
+ 'local': 'VYOS_FW_LOCAL'
+}
+NFT6_CHAINS = {
+ 'in': 'VYOS_FW6_IN',
+ 'out': 'VYOS_FW6_OUT',
+ 'local': 'VYOS_FW6_LOCAL'
+}
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ ifname = argv[1]
+ ifpath = Section.get_config_path(ifname)
+ if_firewall_path = f'interfaces {ifpath} firewall'
+
+ if_firewall = conf.get_config_dict(if_firewall_path, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ if_firewall['ifname'] = ifname
+ if_firewall['firewall'] = conf.get_config_dict(['firewall'], key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ return if_firewall
+
+def verify(if_firewall):
+ # bail out early - looks like removal from running config
+ if not if_firewall:
+ return None
+
+ for direction in ['in', 'out', 'local']:
+ if direction in if_firewall:
+ if 'name' in if_firewall[direction]:
+ name = if_firewall[direction]['name']
+
+ if 'name' not in if_firewall['firewall']:
+ raise ConfigError('Firewall name not configured')
+
+ if name not in if_firewall['firewall']['name']:
+ raise ConfigError(f'Invalid firewall name "{name}"')
+
+ if 'ipv6_name' in if_firewall[direction]:
+ name = if_firewall[direction]['ipv6_name']
+
+ if 'ipv6_name' not in if_firewall['firewall']:
+ raise ConfigError('Firewall ipv6-name not configured')
+
+ if name not in if_firewall['firewall']['ipv6_name']:
+ raise ConfigError(f'Invalid firewall ipv6-name "{name}"')
+
+ return None
+
+def generate(if_firewall):
+ return None
+
+def cleanup_rule(table, chain, ifname, new_name=None):
+ results = cmd(f'nft -a list chain {table} {chain}').split("\n")
+ retval = None
+ for line in results:
+ if f'ifname "{ifname}"' in line:
+ if new_name and f'jump {new_name}' in line:
+ # new_name is used to clear rules for any previously referenced chains
+ # returns true when rule exists and doesn't need to be created
+ retval = True
+ continue
+
+ handle_search = re.search('handle (\d+)', line)
+ if handle_search:
+ run(f'nft delete rule {table} {chain} handle {handle_search[1]}')
+ return retval
+
+def apply(if_firewall):
+ ifname = if_firewall['ifname']
+
+ for direction in ['in', 'out', 'local']:
+ chain = NFT_CHAINS[direction]
+ ipv6_chain = NFT6_CHAINS[direction]
+ if_prefix = 'i' if direction in ['in', 'local'] else 'o'
+
+ name = dict_search_args(if_firewall, direction, 'name')
+ if name:
+ rule_exists = cleanup_rule('ip filter', chain, ifname, name)
+
+ if not rule_exists:
+ run(f'nft insert rule ip filter {chain} {if_prefix}ifname {ifname} counter jump {name}')
+ else:
+ cleanup_rule('ip filter', chain, ifname)
+
+ ipv6_name = dict_search_args(if_firewall, direction, 'ipv6_name')
+ if ipv6_name:
+ rule_exists = cleanup_rule('ip6 filter', ipv6_chain, ifname, ipv6_name)
+
+ if not rule_exists:
+ run(f'nft insert rule ip6 filter {ipv6_chain} {if_prefix}ifname {ifname} counter jump {ipv6_name}')
+ else:
+ cleanup_rule('ip6 filter', ipv6_chain, ifname)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py
index 8e6ce5b14..5ac48c9ba 100755
--- a/src/conf_mode/firewall.py
+++ b/src/conf_mode/firewall.py
@@ -16,50 +16,295 @@
import os
+from glob import glob
+from json import loads
from sys import exit
from vyos.config import Config
from vyos.configdict import dict_merge
-from vyos.configdict import node_changed
-from vyos.configdict import leaf_node_changed
+from vyos.configdiff import get_config_diff, Diff
from vyos.template import render
-from vyos.util import call
+from vyos.util import cmd
+from vyos.util import dict_search_args
+from vyos.util import process_named_running
+from vyos.util import run
+from vyos.xml import defaults
from vyos import ConfigError
from vyos import airbag
-from pprint import pprint
airbag.enable()
+nftables_conf = '/run/nftables.conf'
-def get_config(config=None):
+sysfs_config = {
+ 'all_ping': {'sysfs': '/proc/sys/net/ipv4/icmp_echo_ignore_all', 'enable': '0', 'disable': '1'},
+ 'broadcast_ping': {'sysfs': '/proc/sys/net/ipv4/icmp_echo_ignore_broadcasts', 'enable': '0', 'disable': '1'},
+ 'ip_src_route': {'sysfs': '/proc/sys/net/ipv4/conf/*/accept_source_route'},
+ 'ipv6_receive_redirects': {'sysfs': '/proc/sys/net/ipv6/conf/*/accept_redirects'},
+ 'ipv6_src_route': {'sysfs': '/proc/sys/net/ipv6/conf/*/accept_source_route', 'enable': '0', 'disable': '-1'},
+ 'log_martians': {'sysfs': '/proc/sys/net/ipv4/conf/all/log_martians'},
+ 'receive_redirects': {'sysfs': '/proc/sys/net/ipv4/conf/*/accept_redirects'},
+ 'send_redirects': {'sysfs': '/proc/sys/net/ipv4/conf/*/send_redirects'},
+ 'source_validation': {'sysfs': '/proc/sys/net/ipv4/conf/*/rp_filter', 'disable': '0', 'strict': '1', 'loose': '2'},
+ 'syn_cookies': {'sysfs': '/proc/sys/net/ipv4/tcp_syncookies'},
+ 'twa_hazards_protection': {'sysfs': '/proc/sys/net/ipv4/tcp_rfc1337'}
+}
+
+preserve_chains = [
+ 'INPUT',
+ 'FORWARD',
+ 'OUTPUT',
+ 'VYOS_FW_IN',
+ 'VYOS_FW_OUT',
+ 'VYOS_FW_LOCAL',
+ 'VYOS_FW_OUTPUT',
+ 'VYOS_POST_FW',
+ 'VYOS_FRAG_MARK',
+ 'VYOS_FW6_IN',
+ 'VYOS_FW6_OUT',
+ 'VYOS_FW6_LOCAL',
+ 'VYOS_FW6_OUTPUT',
+ 'VYOS_POST_FW6',
+ 'VYOS_FRAG6_MARK'
+]
+
+valid_groups = [
+ 'address_group',
+ 'network_group',
+ 'port_group'
+]
+
+snmp_change_type = {
+ 'unknown': 0,
+ 'add': 1,
+ 'delete': 2,
+ 'change': 3
+}
+snmp_event_source = 1
+snmp_trap_mib = 'VYATTA-TRAP-MIB'
+snmp_trap_name = 'mgmtEventTrap'
+def get_firewall_interfaces(conf):
+ out = {}
+ interfaces = conf.get_config_dict(['interfaces'], key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+ def find_interfaces(iftype_conf, output={}, prefix=''):
+ for ifname, if_conf in iftype_conf.items():
+ if 'firewall' in if_conf:
+ output[prefix + ifname] = if_conf['firewall']
+ for vif in ['vif', 'vif_s', 'vif_c']:
+ if vif in if_conf:
+ output.update(find_interfaces(if_conf[vif], output, f'{prefix}{ifname}.'))
+ return output
+ for iftype, iftype_conf in interfaces.items():
+ out.update(find_interfaces(iftype_conf))
+ return out
+
+def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base = ['nfirewall']
+ base = ['firewall']
+
+ if not conf.exists(base):
+ return {}
+
firewall = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
no_tag_node_value_mangle=True)
- pprint(firewall)
+ default_values = defaults(base)
+ firewall = dict_merge(default_values, firewall)
+
+ firewall['interfaces'] = get_firewall_interfaces(conf)
+
+ if 'config_trap' in firewall and firewall['config_trap'] == 'enable':
+ diff = get_config_diff(conf)
+ firewall['trap_diff'] = diff.get_child_nodes_diff_str(base)
+ firewall['trap_targets'] = conf.get_config_dict(['service', 'snmp', 'trap-target'],
+ key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
return firewall
+def verify_rule(firewall, rule_conf, ipv6):
+ if 'action' not in rule_conf:
+ raise ConfigError('Rule action must be defined')
+
+ if 'fragment' in rule_conf:
+ if {'match_frag', 'match_non_frag'} <= set(rule_conf['fragment']):
+ raise ConfigError('Cannot specify both "match-frag" and "match-non-frag"')
+
+ if 'ipsec' in rule_conf:
+ if {'match_ipsec', 'match_non_ipsec'} <= set(rule_conf['ipsec']):
+ raise ConfigError('Cannot specify both "match-ipsec" and "match-non-ipsec"')
+
+ if 'recent' in rule_conf:
+ if not {'count', 'time'} <= set(rule_conf['recent']):
+ raise ConfigError('Recent "count" and "time" values must be defined')
+
+ for side in ['destination', 'source']:
+ if side in rule_conf:
+ side_conf = rule_conf[side]
+
+ if 'group' in side_conf:
+ if {'address_group', 'network_group'} <= set(side_conf['group']):
+ raise ConfigError('Only one address-group or network-group can be specified')
+
+ for group in valid_groups:
+ if group in side_conf['group']:
+ group_name = side_conf['group'][group]
+
+ fw_group = f'ipv6_{group}' if ipv6 and group in ['address_group', 'network_group'] else group
+
+ if not dict_search_args(firewall, 'group', fw_group):
+ error_group = fw_group.replace("_", "-")
+ raise ConfigError(f'Group defined in rule but {error_group} is not configured')
+
+ if group_name not in firewall['group'][fw_group]:
+ error_group = group.replace("_", "-")
+ raise ConfigError(f'Invalid {error_group} "{group_name}" on firewall rule')
+
+ if 'port' in side_conf or dict_search_args(side_conf, 'group', 'port_group'):
+ if 'protocol' not in rule_conf:
+ raise ConfigError('Protocol must be defined if specifying a port or port-group')
+
+ if rule_conf['protocol'] not in ['tcp', 'udp', 'tcp_udp']:
+ raise ConfigError('Protocol must be tcp, udp, or tcp_udp when specifying a port or port-group')
+
def verify(firewall):
# bail out early - looks like removal from running config
if not firewall:
return None
+ if 'config_trap' in firewall and firewall['config_trap'] == 'enable':
+ if not firewall['trap_targets']:
+ raise ConfigError(f'Firewall config-trap enabled but "service snmp trap-target" is not defined')
+
+ for name in ['name', 'ipv6_name']:
+ if name in firewall:
+ for name_id, name_conf in firewall[name].items():
+ if name_id in preserve_chains:
+ raise ConfigError(f'Firewall name "{name_id}" is reserved for VyOS')
+
+ if 'rule' in name_conf:
+ for rule_id, rule_conf in name_conf['rule'].items():
+ verify_rule(firewall, rule_conf, name == 'ipv6_name')
+
+ for ifname, if_firewall in firewall['interfaces'].items():
+ for direction in ['in', 'out', 'local']:
+ name = dict_search_args(if_firewall, direction, 'name')
+ ipv6_name = dict_search_args(if_firewall, direction, 'ipv6_name')
+
+ if name and not dict_search_args(firewall, 'name', name):
+ raise ConfigError(f'Firewall name "{name}" is still referenced on interface {ifname}')
+
+ if ipv6_name and not dict_search_args(firewall, 'ipv6_name', ipv6_name):
+ raise ConfigError(f'Firewall ipv6-name "{ipv6_name}" is still referenced on interface {ifname}')
+
return None
+def cleanup_commands(firewall):
+ commands = []
+ for table in ['ip filter', 'ip6 filter']:
+ json_str = cmd(f'nft -j list table {table}')
+ obj = loads(json_str)
+ if 'nftables' not in obj:
+ continue
+ for item in obj['nftables']:
+ if 'chain' in item:
+ if item['chain']['name'] not in preserve_chains:
+ chain = item['chain']['name']
+ if table == 'ip filter' and dict_search_args(firewall, 'name', chain):
+ commands.append(f'flush chain {table} {chain}')
+ elif table == 'ip6 filter' and dict_search_args(firewall, 'ipv6_name', chain):
+ commands.append(f'flush chain {table} {chain}')
+ else:
+ commands.append(f'delete chain {table} {chain}')
+ return commands
+
def generate(firewall):
- if not firewall:
- return None
+ if not os.path.exists(nftables_conf):
+ firewall['first_install'] = True
+ else:
+ firewall['cleanup_commands'] = cleanup_commands(firewall)
+ render(nftables_conf, 'firewall/nftables.tmpl', firewall)
return None
-def apply(firewall):
- if not firewall:
+def apply_sysfs(firewall):
+ for name, conf in sysfs_config.items():
+ paths = glob(conf['sysfs'])
+ value = None
+
+ if name in firewall:
+ conf_value = firewall[name]
+
+ if conf_value in conf:
+ value = conf[conf_value]
+ elif conf_value == 'enable':
+ value = '1'
+ elif conf_value == 'disable':
+ value = '0'
+
+ if value:
+ for path in paths:
+ with open(path, 'w') as f:
+ f.write(value)
+
+def post_apply_trap(firewall):
+ if 'first_install' in firewall:
return None
+ if 'config_trap' not in firewall or firewall['config_trap'] != 'enable':
+ return None
+
+ if not process_named_running('snmpd'):
+ return None
+
+ trap_username = os.getlogin()
+
+ for host, target_conf in firewall['trap_targets'].items():
+ community = target_conf['community'] if 'community' in target_conf else 'public'
+ port = int(target_conf['port']) if 'port' in target_conf else 162
+
+ base_cmd = f'snmptrap -v2c -c {community} {host}:{port} 0 {snmp_trap_mib}::{snmp_trap_name} '
+
+ for change_type, changes in firewall['trap_diff'].items():
+ for path_str, value in changes.items():
+ objects = [
+ f'mgmtEventUser s "{trap_username}"',
+ f'mgmtEventSource i {snmp_event_source}',
+ f'mgmtEventType i {snmp_change_type[change_type]}'
+ ]
+
+ if change_type == 'add':
+ objects.append(f'mgmtEventCurrCfg s "{path_str} {value}"')
+ elif change_type == 'delete':
+ objects.append(f'mgmtEventPrevCfg s "{path_str} {value}"')
+ elif change_type == 'change':
+ objects.append(f'mgmtEventPrevCfg s "{path_str} {value[0]}"')
+ objects.append(f'mgmtEventCurrCfg s "{path_str} {value[1]}"')
+
+ cmd(base_cmd + ' '.join(objects))
+
+def apply(firewall):
+ if 'first_install' in firewall:
+ run('nfct helper add rpc inet tcp')
+ run('nfct helper add rpc inet udp')
+ run('nfct helper add tns inet tcp')
+
+ install_result = run(f'nft -f {nftables_conf}')
+ if install_result == 1:
+ raise ConfigError('Failed to apply firewall')
+
+ if 'state_policy' in firewall:
+ for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
+ cmd(f'nft insert rule ip filter {chain} jump VYOS_STATE_POLICY')
+ cmd(f'nft insert rule ip6 filter {chain} jump VYOS_STATE_POLICY6')
+
+ apply_sysfs(firewall)
+
+ post_apply_trap(firewall)
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/flow_accounting_conf.py
index 9cae29481..975f19acf 100755
--- a/src/conf_mode/flow_accounting_conf.py
+++ b/src/conf_mode/flow_accounting_conf.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2020 VyOS maintainers and contributors
+# Copyright (C) 2018-2021 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -16,121 +16,83 @@
import os
import re
+
from sys import exit
import ipaddress
from ipaddress import ip_address
-from jinja2 import FileSystemLoader, Environment
+from vyos.config import Config
+from vyos.configdict import dict_merge
from vyos.ifconfig import Section
from vyos.ifconfig import Interface
-from vyos.config import Config
-from vyos import ConfigError
-from vyos.util import cmd
from vyos.template import render
-
+from vyos.util import cmd
+from vyos.validate import is_addr_assigned
+from vyos.xml import defaults
+from vyos import ConfigError
from vyos import airbag
airbag.enable()
-# default values
-default_sflow_server_port = 6343
-default_netflow_server_port = 2055
-default_plugin_pipe_size = 10
-default_captured_packet_size = 128
-default_netflow_version = '9'
-default_sflow_agentip = 'auto'
-uacctd_conf_path = '/etc/pmacct/uacctd.conf'
-iptables_nflog_table = 'raw'
-iptables_nflog_chain = 'VYATTA_CT_PREROUTING_HOOK'
-egress_iptables_nflog_table = 'mangle'
-egress_iptables_nflog_chain = 'FORWARD'
-
-# helper functions
-# check if node exists and return True if this is true
-def _node_exists(path):
- vyos_config = Config()
- if vyos_config.exists(path):
- return True
-
-# get sFlow agent-ip if agent-address is "auto" (default behaviour)
-def _sflow_default_agentip(config):
- # check if any of BGP, OSPF, OSPFv3 protocols are configured and use router-id from there
- if config.exists('protocols bgp'):
- bgp_router_id = config.return_value("protocols bgp {} parameters router-id".format(config.list_nodes('protocols bgp')[0]))
- if bgp_router_id:
- return bgp_router_id
- if config.return_value('protocols ospf parameters router-id'):
- return config.return_value('protocols ospf parameters router-id')
- if config.return_value('protocols ospfv3 parameters router-id'):
- return config.return_value('protocols ospfv3 parameters router-id')
-
- # if router-id was not found, use first available ip of any interface
- for iface in Section.interfaces():
- for address in Interface(iface).get_addr():
- # return an IP, if this is not loopback
- regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$')
- if regex_filter.search(address):
- return regex_filter.search(address).group('ipaddr')
-
- # return nothing by default
- return None
-
-# get iptables rule dict for chain in table
-def _iptables_get_nflog(chain, table):
+uacctd_conf_path = '/run/pmacct/uacctd.conf'
+nftables_nflog_table = 'raw'
+nftables_nflog_chain = 'VYOS_CT_PREROUTING_HOOK'
+egress_nftables_nflog_table = 'inet mangle'
+egress_nftables_nflog_chain = 'FORWARD'
+
+# get nftables rule dict for chain in table
+def _nftables_get_nflog(chain, table):
# define list with rules
rules = []
# prepare regex for parsing rules
- rule_pattern = "^-A (?P<rule_definition>{0} (\-i|\-o) (?P<interface>[\w\.\*\-]+).*--comment FLOW_ACCOUNTING_RULE.* -j NFLOG.*$)".format(chain)
+ rule_pattern = '[io]ifname "(?P<interface>[\w\.\*\-]+)".*handle (?P<handle>[\d]+)'
rule_re = re.compile(rule_pattern)
- for iptables_variant in ['iptables', 'ip6tables']:
- # run iptables, save output and split it by lines
- iptables_command = f'{iptables_variant} -t {table} -S {chain}'
- tmp = cmd(iptables_command, message='Failed to get flows list')
-
- # parse each line and add information to list
- for current_rule in tmp.splitlines():
- current_rule_parsed = rule_re.search(current_rule)
- if current_rule_parsed:
- rules.append({ 'interface': current_rule_parsed.groupdict()["interface"], 'iptables_variant': iptables_variant, 'table': table, 'rule_definition': current_rule_parsed.groupdict()["rule_definition"] })
+ # run nftables, save output and split it by lines
+ nftables_command = f'nft -a list chain {table} {chain}'
+ tmp = cmd(nftables_command, message='Failed to get flows list')
+ # parse each line and add information to list
+ for current_rule in tmp.splitlines():
+ if 'FLOW_ACCOUNTING_RULE' not in current_rule:
+ continue
+ current_rule_parsed = rule_re.search(current_rule)
+ if current_rule_parsed:
+ groups = current_rule_parsed.groupdict()
+ rules.append({ 'interface': groups["interface"], 'table': table, 'handle': groups["handle"] })
# return list with rules
return rules
-# modify iptables rules
-def _iptables_config(configured_ifaces, direction):
- # define list of iptables commands to modify settings
- iptable_commands = []
- iptables_chain = iptables_nflog_chain
- iptables_table = iptables_nflog_table
+def _nftables_config(configured_ifaces, direction, length=None):
+ # define list of nftables commands to modify settings
+ nftable_commands = []
+ nftables_chain = nftables_nflog_chain
+ nftables_table = nftables_nflog_table
if direction == "egress":
- iptables_chain = egress_iptables_nflog_chain
- iptables_table = egress_iptables_nflog_table
+ nftables_chain = egress_nftables_nflog_chain
+ nftables_table = egress_nftables_nflog_table
# prepare extended list with configured interfaces
configured_ifaces_extended = []
for iface in configured_ifaces:
- configured_ifaces_extended.append({ 'iface': iface, 'iptables_variant': 'iptables' })
- configured_ifaces_extended.append({ 'iface': iface, 'iptables_variant': 'ip6tables' })
+ configured_ifaces_extended.append({ 'iface': iface })
- # get currently configured interfaces with iptables rules
- active_nflog_rules = _iptables_get_nflog(iptables_chain, iptables_table)
+ # get currently configured interfaces with nftables rules
+ active_nflog_rules = _nftables_get_nflog(nftables_chain, nftables_table)
# compare current active list with configured one and delete excessive interfaces, add missed
active_nflog_ifaces = []
for rule in active_nflog_rules:
- iptables = rule['iptables_variant']
interface = rule['interface']
if interface not in configured_ifaces:
table = rule['table']
- rule = rule['rule_definition']
- iptable_commands.append(f'{iptables} -t {table} -D {rule}')
+ handle = rule['handle']
+ nftable_commands.append(f'nft delete rule {table} {nftables_chain} handle {handle}')
else:
active_nflog_ifaces.append({
'iface': interface,
- 'iptables_variant': iptables,
})
# do not create new rules for already configured interfaces
@@ -141,244 +103,166 @@ def _iptables_config(configured_ifaces, direction):
# create missed rules
for iface_extended in configured_ifaces_extended:
iface = iface_extended['iface']
- iptables = iface_extended['iptables_variant']
- iptables_op = "-i"
- if direction == "egress":
- iptables_op = "-o"
-
- rule_definition = f'{iptables_chain} {iptables_op} {iface} -m comment --comment FLOW_ACCOUNTING_RULE -j NFLOG --nflog-group 2 --nflog-size {default_captured_packet_size} --nflog-threshold 100'
- iptable_commands.append(f'{iptables} -t {iptables_table} -I {rule_definition}')
+ iface_prefix = "o" if direction == "egress" else "i"
+ rule_definition = f'{iface_prefix}ifname "{iface}" counter log group 2 snaplen {length} queue-threshold 100 comment "FLOW_ACCOUNTING_RULE"'
+ nftable_commands.append(f'nft insert rule {nftables_table} {nftables_chain} {rule_definition}')
- # change iptables
- for command in iptable_commands:
+ # change nftables
+ for command in nftable_commands:
cmd(command, raising=ConfigError)
-def get_config():
- vc = Config()
- vc.set_level('')
- # Convert the VyOS config to an abstract internal representation
- flow_config = {
- 'flow-accounting-configured': vc.exists('system flow-accounting'),
- 'buffer-size': vc.return_value('system flow-accounting buffer-size'),
- 'enable-egress': _node_exists('system flow-accounting enable-egress'),
- 'disable-imt': _node_exists('system flow-accounting disable-imt'),
- 'syslog-facility': vc.return_value('system flow-accounting syslog-facility'),
- 'interfaces': None,
- 'sflow': {
- 'configured': vc.exists('system flow-accounting sflow'),
- 'agent-address': vc.return_value('system flow-accounting sflow agent-address'),
- 'sampling-rate': vc.return_value('system flow-accounting sflow sampling-rate'),
- 'servers': None
- },
- 'netflow': {
- 'configured': vc.exists('system flow-accounting netflow'),
- 'engine-id': vc.return_value('system flow-accounting netflow engine-id'),
- 'max-flows': vc.return_value('system flow-accounting netflow max-flows'),
- 'sampling-rate': vc.return_value('system flow-accounting netflow sampling-rate'),
- 'source-ip': vc.return_value('system flow-accounting netflow source-ip'),
- 'version': vc.return_value('system flow-accounting netflow version'),
- 'timeout': {
- 'expint': vc.return_value('system flow-accounting netflow timeout expiry-interval'),
- 'general': vc.return_value('system flow-accounting netflow timeout flow-generic'),
- 'icmp': vc.return_value('system flow-accounting netflow timeout icmp'),
- 'maxlife': vc.return_value('system flow-accounting netflow timeout max-active-life'),
- 'tcp.fin': vc.return_value('system flow-accounting netflow timeout tcp-fin'),
- 'tcp': vc.return_value('system flow-accounting netflow timeout tcp-generic'),
- 'tcp.rst': vc.return_value('system flow-accounting netflow timeout tcp-rst'),
- 'udp': vc.return_value('system flow-accounting netflow timeout udp')
- },
- 'servers': None
- }
- }
-
- # get interfaces list
- if vc.exists('system flow-accounting interface'):
- flow_config['interfaces'] = vc.return_values('system flow-accounting interface')
-
- # get sFlow collectors list
- if vc.exists('system flow-accounting sflow server'):
- flow_config['sflow']['servers'] = []
- sflow_collectors = vc.list_nodes('system flow-accounting sflow server')
- for collector in sflow_collectors:
- port = default_sflow_server_port
- if vc.return_value("system flow-accounting sflow server {} port".format(collector)):
- port = vc.return_value("system flow-accounting sflow server {} port".format(collector))
- flow_config['sflow']['servers'].append({ 'address': collector, 'port': port })
-
- # get NetFlow collectors list
- if vc.exists('system flow-accounting netflow server'):
- flow_config['netflow']['servers'] = []
- netflow_collectors = vc.list_nodes('system flow-accounting netflow server')
- for collector in netflow_collectors:
- port = default_netflow_server_port
- if vc.return_value("system flow-accounting netflow server {} port".format(collector)):
- port = vc.return_value("system flow-accounting netflow server {} port".format(collector))
- flow_config['netflow']['servers'].append({ 'address': collector, 'port': port })
-
- # get sflow agent-id
- if flow_config['sflow']['agent-address'] == None or flow_config['sflow']['agent-address'] == 'auto':
- flow_config['sflow']['agent-address'] = _sflow_default_agentip(vc)
-
- # get NetFlow version
- if not flow_config['netflow']['version']:
- flow_config['netflow']['version'] = default_netflow_version
-
- # convert NetFlow engine-id format, if this is necessary
- if flow_config['netflow']['engine-id'] and flow_config['netflow']['version'] == '5':
- regex_filter = re.compile('^\d+$')
- if regex_filter.search(flow_config['netflow']['engine-id']):
- flow_config['netflow']['engine-id'] = "{}:0".format(flow_config['netflow']['engine-id'])
-
- # return dict with flow-accounting configuration
- return flow_config
-
-def verify(config):
- # Verify that configuration is valid
- # skip all checks if flow-accounting was removed
- if not config['flow-accounting-configured']:
- return True
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['system', 'flow-accounting']
+ if not conf.exists(base):
+ return None
+
+ flow_accounting = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = defaults(base)
+
+ # delete individual flow type default - should only be added if user uses
+ # this feature
+ for flow_type in ['sflow', 'netflow']:
+ if flow_type in default_values:
+ del default_values[flow_type]
+ flow_accounting = dict_merge(default_values, flow_accounting)
+
+ for flow_type in ['sflow', 'netflow']:
+ if flow_type in flow_accounting:
+ default_values = defaults(base + [flow_type])
+ # we need to merge individual server configurations
+ if 'server' in default_values:
+ del default_values['server']
+ flow_accounting[flow_type] = dict_merge(default_values, flow_accounting[flow_type])
+
+ if 'server' in flow_accounting[flow_type]:
+ default_values = defaults(base + [flow_type, 'server'])
+ for server in flow_accounting[flow_type]['server']:
+ flow_accounting[flow_type]['server'][server] = dict_merge(
+ default_values,flow_accounting[flow_type]['server'][server])
+
+ return flow_accounting
+
+def verify(flow_config):
+ if not flow_config:
+ return None
# check if at least one collector is enabled
- if not (config['sflow']['configured'] or config['netflow']['configured'] or not config['disable-imt']):
- raise ConfigError("You need to configure at least one sFlow or NetFlow protocol, or not set \"disable-imt\" for flow-accounting")
+ if 'sflow' not in flow_config and 'netflow' not in flow_config and 'disable_imt' in flow_config:
+ raise ConfigError('You need to configure at least sFlow or NetFlow, ' \
+ 'or not set "disable-imt" for flow-accounting!')
# Check if at least one interface is configured
- if not config['interfaces']:
- raise ConfigError("You need to configure at least one interface for flow-accounting")
+ if 'interface' not in flow_config:
+ raise ConfigError('Flow accounting requires at least one interface to ' \
+ 'be configured!')
# check that all configured interfaces exists in the system
- for iface in config['interfaces']:
- if not iface in Section.interfaces():
- # chnged from error to warning to allow adding dynamic interfaces and interface templates
- # raise ConfigError("The {} interface is not presented in the system".format(iface))
- print("Warning: the {} interface is not presented in the system".format(iface))
+ for interface in flow_config['interface']:
+ if interface not in Section.interfaces():
+ # Changed from error to warning to allow adding dynamic interfaces
+ # and interface templates
+ print(f'Warning: Interface "{interface}" is not presented in the system')
# check sFlow configuration
- if config['sflow']['configured']:
- # check if at least one sFlow collector is configured if sFlow configuration is presented
- if not config['sflow']['servers']:
- raise ConfigError("You need to configure at least one sFlow server")
+ if 'sflow' in flow_config:
+ # check if at least one sFlow collector is configured
+ if 'server' not in flow_config['sflow']:
+ raise ConfigError('You need to configure at least one sFlow server!')
# check that all sFlow collectors use the same IP protocol version
sflow_collector_ipver = None
- for sflow_collector in config['sflow']['servers']:
+ for server in flow_config['sflow']['server']:
if sflow_collector_ipver:
- if sflow_collector_ipver != ip_address(sflow_collector['address']).version:
+ if sflow_collector_ipver != ip_address(server).version:
raise ConfigError("All sFlow servers must use the same IP protocol")
else:
- sflow_collector_ipver = ip_address(sflow_collector['address']).version
-
+ sflow_collector_ipver = ip_address(server).version
# check agent-id for sFlow: we should avoid mixing IPv4 agent-id with IPv6 collectors and vice-versa
- for sflow_collector in config['sflow']['servers']:
- if ip_address(sflow_collector['address']).version != ip_address(config['sflow']['agent-address']).version:
- raise ConfigError("Different IP address versions cannot be mixed in \"sflow agent-address\" and \"sflow server\". You need to set manually the same IP version for \"agent-address\" as for all sFlow servers")
-
- # check if configured sFlow agent-id exist in the system
- agent_id_presented = None
- for iface in Section.interfaces():
- for address in Interface(iface).get_addr():
- # check an IP, if this is not loopback
- regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$')
- if regex_filter.search(address):
- if regex_filter.search(address).group('ipaddr') == config['sflow']['agent-address']:
- agent_id_presented = True
- break
- if not agent_id_presented:
- raise ConfigError("Your \"sflow agent-address\" does not exist in the system")
+ for server in flow_config['sflow']['server']:
+ if 'agent_address' in flow_config['sflow']:
+ if ip_address(server).version != ip_address(flow_config['sflow']['agent_address']).version:
+ raise ConfigError('IPv4 and IPv6 addresses can not be mixed in "sflow agent-address" and "sflow '\
+ 'server". You need to set the same IP version for both "agent-address" and '\
+ 'all sFlow servers')
+
+ if 'agent_address' in flow_config['sflow']:
+ tmp = flow_config['sflow']['agent_address']
+ if not is_addr_assigned(tmp):
+ print(f'Warning: Configured "sflow agent-address {tmp}" does not exist in the system!')
# check NetFlow configuration
- if config['netflow']['configured']:
+ if 'netflow' in flow_config:
# check if at least one NetFlow collector is configured if NetFlow configuration is presented
- if not config['netflow']['servers']:
- raise ConfigError("You need to configure at least one NetFlow server")
-
- # check if configured netflow source-ip exist in the system
- if config['netflow']['source-ip']:
- source_ip_presented = None
- for iface in Section.interfaces():
- for address in Interface(iface).get_addr():
- # check an IP
- regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$')
- if regex_filter.search(address):
- if regex_filter.search(address).group('ipaddr') == config['netflow']['source-ip']:
- source_ip_presented = True
- break
- if not source_ip_presented:
- raise ConfigError("Your \"netflow source-ip\" does not exist in the system")
-
- # check if engine-id compatible with selected protocol version
- if config['netflow']['engine-id']:
+ if 'server' not in flow_config['netflow']:
+ raise ConfigError('You need to configure at least one NetFlow server!')
+
+ # Check if configured netflow source-address exist in the system
+ if 'source_address' in flow_config['netflow']:
+ if not is_addr_assigned(flow_config['netflow']['source_address']):
+ tmp = flow_config['netflow']['source_address']
+ print(f'Warning: Configured "netflow source-address {tmp}" does not exist on the system!')
+
+ # Check if engine-id compatible with selected protocol version
+ if 'engine_id' in flow_config['netflow']:
v5_filter = '^(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]):(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$'
v9v10_filter = '^(\d|[1-9]\d{1,8}|[1-3]\d{9}|4[01]\d{8}|42[0-8]\d{7}|429[0-3]\d{6}|4294[0-8]\d{5}|42949[0-5]\d{4}|429496[0-6]\d{3}|4294967[01]\d{2}|42949672[0-8]\d|429496729[0-5])$'
- if config['netflow']['version'] == '5':
+ engine_id = flow_config['netflow']['engine_id']
+ version = flow_config['netflow']['version']
+
+ if flow_config['netflow']['version'] == '5':
regex_filter = re.compile(v5_filter)
- if not regex_filter.search(config['netflow']['engine-id']):
- raise ConfigError("You cannot use NetFlow engine-id {} together with NetFlow protocol version {}".format(config['netflow']['engine-id'], config['netflow']['version']))
+ if not regex_filter.search(engine_id):
+ raise ConfigError(f'You cannot use NetFlow engine-id "{engine_id}" '\
+ f'together with NetFlow protocol version "{version}"!')
else:
regex_filter = re.compile(v9v10_filter)
- if not regex_filter.search(config['netflow']['engine-id']):
- raise ConfigError("You cannot use NetFlow engine-id {} together with NetFlow protocol version {}".format(config['netflow']['engine-id'], config['netflow']['version']))
+ if not regex_filter.search(flow_config['netflow']['engine_id']):
+ raise ConfigError(f'Can not use NetFlow engine-id "{engine_id}" together '\
+ f'with NetFlow protocol version "{version}"!')
# return True if all checks were passed
return True
-def generate(config):
- # skip all checks if flow-accounting was removed
- if not config['flow-accounting-configured']:
- return True
+def generate(flow_config):
+ if not flow_config:
+ return None
- # Calculate all necessary values
- if config['buffer-size']:
- # circular queue size
- config['plugin_pipe_size'] = int(config['buffer-size']) * 1024**2
- else:
- config['plugin_pipe_size'] = default_plugin_pipe_size * 1024**2
- # transfer buffer size
- # recommended value from pmacct developers 1/1000 of pipe size
- config['plugin_buffer_size'] = int(config['plugin_pipe_size'] / 1000)
-
- # Prepare a timeouts string
- timeout_string = ''
- for timeout_type, timeout_value in config['netflow']['timeout'].items():
- if timeout_value:
- if timeout_string == '':
- timeout_string = "{}{}={}".format(timeout_string, timeout_type, timeout_value)
- else:
- timeout_string = "{}:{}={}".format(timeout_string, timeout_type, timeout_value)
- config['netflow']['timeout_string'] = timeout_string
+ render(uacctd_conf_path, 'netflow/uacctd.conf.tmpl', flow_config)
- render(uacctd_conf_path, 'netflow/uacctd.conf.tmpl', {
- 'templatecfg': config,
- 'snaplen': default_captured_packet_size,
- })
-
-
-def apply(config):
- # define variables
- command = None
+def apply(flow_config):
+ action = 'restart'
# Check if flow-accounting was removed and define command
- if not config['flow-accounting-configured']:
- command = 'systemctl stop uacctd.service'
- else:
- command = 'systemctl restart uacctd.service'
+ if not flow_config:
+ _nftables_config([], 'ingress')
+ _nftables_config([], 'egress')
- # run command to start or stop flow-accounting
- cmd(command, raising=ConfigError, message='Failed to start/stop flow-accounting')
+ # Stop flow-accounting daemon and remove configuration file
+ cmd('systemctl stop uacctd.service')
+ if os.path.exists(uacctd_conf_path):
+ os.unlink(uacctd_conf_path)
+ return
- # configure iptables rules for defined interfaces
- if config['interfaces']:
- _iptables_config(config['interfaces'], 'ingress')
+ # Start/reload flow-accounting daemon
+ cmd(f'systemctl restart uacctd.service')
+
+ # configure nftables rules for defined interfaces
+ if 'interface' in flow_config:
+ _nftables_config(flow_config['interface'], 'ingress', flow_config['packet_length'])
# configure egress the same way if configured otherwise remove it
- if config['enable-egress']:
- _iptables_config(config['interfaces'], 'egress')
+ if 'enable_egress' in flow_config:
+ _nftables_config(flow_config['interface'], 'egress', flow_config['packet_length'])
else:
- _iptables_config([], 'egress')
- else:
- _iptables_config([], 'ingress')
- _iptables_config([], 'egress')
+ _nftables_config([], 'egress')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/host_name.py b/src/conf_mode/host_name.py
index a7135911d..87bad0dc6 100755
--- a/src/conf_mode/host_name.py
+++ b/src/conf_mode/host_name.py
@@ -79,7 +79,7 @@ def get_config(config=None):
# system static-host-mapping
for hn in conf.list_nodes(['system', 'static-host-mapping', 'host-name']):
hosts['static_host_mapping'][hn] = {}
- hosts['static_host_mapping'][hn]['address'] = conf.return_value(['system', 'static-host-mapping', 'host-name', hn, 'inet'])
+ hosts['static_host_mapping'][hn]['address'] = conf.return_values(['system', 'static-host-mapping', 'host-name', hn, 'inet'])
hosts['static_host_mapping'][hn]['aliases'] = conf.return_values(['system', 'static-host-mapping', 'host-name', hn, 'alias'])
return hosts
diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py
index 7e4b117c8..b5f5e919f 100755
--- a/src/conf_mode/http-api.py
+++ b/src/conf_mode/http-api.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019 VyOS maintainers and contributors
+# Copyright (C) 2019-2021 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -13,25 +13,26 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
import sys
import os
import json
-import time
+
+from time import sleep
from copy import deepcopy
import vyos.defaults
+
from vyos.config import Config
-from vyos import ConfigError
+from vyos.template import render
from vyos.util import cmd
from vyos.util import call
-
+from vyos import ConfigError
from vyos import airbag
airbag.enable()
-config_file = '/etc/vyos/http-api.conf'
+api_conf_file = '/etc/vyos/http-api.conf'
+systemd_service = '/run/systemd/system/vyos-http-api.service'
vyos_conf_scripts_dir=vyos.defaults.directories['conf_mode']
@@ -49,21 +50,35 @@ def get_config(config=None):
else:
conf = Config()
- if not conf.exists('service https api'):
+ base = ['service', 'https', 'api']
+ if not conf.exists(base):
return None
- else:
- conf.set_level('service https api')
+ # Do we run inside a VRF context?
+ vrf_path = ['service', 'https', 'vrf']
+ if conf.exists(vrf_path):
+ http_api['vrf'] = conf.return_value(vrf_path)
+
+ conf.set_level('service https api')
if conf.exists('strict'):
- http_api['strict'] = 'true'
+ http_api['strict'] = True
if conf.exists('debug'):
- http_api['debug'] = 'true'
+ http_api['debug'] = True
+
+ if conf.exists('socket'):
+ http_api['socket'] = True
if conf.exists('port'):
port = conf.return_value('port')
http_api['port'] = port
+ if conf.exists('cors'):
+ http_api['cors'] = {}
+ if conf.exists('cors allow-origin'):
+ origins = conf.return_values('cors allow-origin')
+ http_api['cors']['origins'] = origins[:]
+
if conf.exists('keys'):
for name in conf.list_nodes('keys id'):
if conf.exists('keys id {0} key'.format(name)):
@@ -83,24 +98,31 @@ def verify(http_api):
def generate(http_api):
if http_api is None:
+ if os.path.exists(systemd_service):
+ os.unlink(systemd_service)
return None
if not os.path.exists('/etc/vyos'):
os.mkdir('/etc/vyos')
- with open(config_file, 'w') as f:
+ with open(api_conf_file, 'w') as f:
json.dump(http_api, f, indent=2)
+ render(systemd_service, 'https/vyos-http-api.service.tmpl', http_api)
return None
def apply(http_api):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+ service_name = 'vyos-http-api.service'
+
if http_api is not None:
- call('systemctl restart vyos-http-api.service')
+ call(f'systemctl restart {service_name}')
else:
- call('systemctl stop vyos-http-api.service')
+ call(f'systemctl stop {service_name}')
# Let uvicorn settle before restarting Nginx
- time.sleep(2)
+ sleep(1)
cmd(f'{vyos_conf_scripts_dir}/https.py', raising=ConfigError)
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
index be4380462..37fa36797 100755
--- a/src/conf_mode/https.py
+++ b/src/conf_mode/https.py
@@ -23,16 +23,19 @@ import vyos.defaults
import vyos.certbot_util
from vyos.config import Config
+from vyos.configverify import verify_vrf
from vyos import ConfigError
from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
+from vyos.util import write_file
from vyos import airbag
airbag.enable()
config_file = '/etc/nginx/sites-available/default'
+systemd_override = r'/etc/systemd/system/nginx.service.d/override.conf'
cert_dir = '/etc/ssl/certs'
key_dir = '/etc/ssl/private'
certbot_dir = vyos.defaults.directories['certbot']
@@ -58,10 +61,11 @@ def get_config(config=None):
else:
conf = Config()
- if not conf.exists('service https'):
+ base = ['service', 'https']
+ if not conf.exists(base):
return None
- https = conf.get_config_dict('service https', get_first_key=True)
+ https = conf.get_config_dict(base, get_first_key=True)
if https:
https['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
@@ -102,6 +106,8 @@ def verify(https):
if not domains_found:
raise ConfigError("At least one 'virtual-host <id> server-name' "
"matching the 'certbot domain-name' is required.")
+
+ verify_vrf(https)
return None
def generate(https):
@@ -139,15 +145,17 @@ def generate(https):
cert_path = os.path.join(cert_dir, f'{cert_name}.pem')
key_path = os.path.join(key_dir, f'{cert_name}.pem')
- with open(cert_path, 'w') as f:
- f.write(wrap_certificate(pki_cert['certificate']))
+ server_cert = str(wrap_certificate(pki_cert['certificate']))
+ if 'ca-certificate' in cert_dict:
+ ca_cert = cert_dict['ca-certificate']
+ server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate']))
- with open(key_path, 'w') as f:
- f.write(wrap_private_key(pki_cert['private']['key']))
+ write_file(cert_path, server_cert)
+ write_file(key_path, wrap_private_key(pki_cert['private']['key']))
vyos_cert_data = {
- "crt": cert_path,
- "key": key_path
+ 'crt': cert_path,
+ 'key': key_path
}
for block in server_block_list:
@@ -184,6 +192,8 @@ def generate(https):
vhosts = https.get('api-restrict', {}).get('virtual-host', [])
if vhosts:
api_data['vhost'] = vhosts[:]
+ if 'socket' in list(api_settings):
+ api_data['socket'] = True
if api_data:
vhost_list = api_data.get('vhost', [])
@@ -205,10 +215,12 @@ def generate(https):
}
render(config_file, 'https/nginx.default.tmpl', data)
-
+ render(systemd_override, 'https/override.conf.tmpl', https)
return None
def apply(https):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
if https is not None:
call('systemctl restart nginx.service')
else:
diff --git a/src/conf_mode/interfaces-openvpn.py b/src/conf_mode/interfaces-openvpn.py
index 02b7f83bf..3b8fae710 100755
--- a/src/conf_mode/interfaces-openvpn.py
+++ b/src/conf_mode/interfaces-openvpn.py
@@ -16,6 +16,7 @@
import os
import re
+import tempfile
from cryptography.hazmat.primitives.asymmetric import ec
from glob import glob
@@ -26,6 +27,7 @@ from ipaddress import IPv6Address
from ipaddress import IPv6Network
from ipaddress import summarize_address_range
from netifaces import interfaces
+from secrets import SystemRandom
from shutil import rmtree
from vyos.config import Config
@@ -48,6 +50,7 @@ from vyos.util import chown
from vyos.util import dict_search
from vyos.util import dict_search_args
from vyos.util import makedir
+from vyos.util import read_file
from vyos.util import write_file
from vyos.validate import is_addr_assigned
@@ -60,6 +63,10 @@ group = 'openvpn'
cfg_dir = '/run/openvpn'
cfg_file = '/run/openvpn/{ifname}.conf'
+otp_path = '/config/auth/openvpn'
+otp_file = '/config/auth/openvpn/{ifname}-otp-secrets'
+secret_chars = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
+service_file = '/run/systemd/system/openvpn@{ifname}.service.d/20-override.conf'
def get_config(config=None):
"""
@@ -80,7 +87,20 @@ def get_config(config=None):
if 'deleted' not in openvpn:
openvpn['pki'] = tmp_pki
+ # We have to get the dict using 'get_config_dict' instead of 'get_interface_dict'
+ # as 'get_interface_dict' merges the defaults in, so we can not check for defaults in there.
+ tmp = conf.get_config_dict(base + [openvpn['ifname']], get_first_key=True)
+
+ # We have to cleanup the config dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: server mfa totp
+ # originate comes with defaults, which will enable the
+ # totp plugin, even when not set via CLI so we
+ # need to check this first and drop those keys
+ if dict_search('server.mfa.totp', tmp) == None:
+ del openvpn['server']['mfa']
+
openvpn['auth_user_pass_file'] = '/run/openvpn/{ifname}.pw'.format(**openvpn)
+
return openvpn
def is_ec_private_key(pki, cert_name):
@@ -134,7 +154,7 @@ def verify_pki(openvpn):
if tls['certificate'] not in pki['certificate']:
raise ConfigError(f'Invalid certificate on openvpn interface {interface}')
- if dict_search_args(pki, 'certificate', tls['certificate'], 'private', 'password_protected'):
+ if dict_search_args(pki, 'certificate', tls['certificate'], 'private', 'password_protected') is not None:
raise ConfigError(f'Cannot use encrypted private key on openvpn interface {interface}')
if mode == 'server' and 'dh_params' not in tls and not is_ec_private_key(pki, tls['certificate']):
@@ -169,6 +189,10 @@ def verify_pki(openvpn):
def verify(openvpn):
if 'deleted' in openvpn:
+ # remove totp secrets file if totp is not configured
+ if os.path.isfile(otp_file.format(**openvpn)):
+ os.remove(otp_file.format(**openvpn))
+
verify_bridge_delete(openvpn)
return None
@@ -309,10 +333,10 @@ def verify(openvpn):
if 'is_bridge_member' not in openvpn:
raise ConfigError('Must specify "server subnet" or add interface to bridge in server mode')
-
- for client in (dict_search('client', openvpn) or []):
- if len(client['ip']) > 1 or len(client['ipv6_ip']) > 1:
- raise ConfigError(f'Server client "{client["name"]}": cannot specify more than 1 IPv4 and 1 IPv6 IP')
+ if hasattr(dict_search('server.client', openvpn), '__iter__'):
+ for client_k, client_v in dict_search('server.client', openvpn).items():
+ if (client_v.get('ip') and len(client_v['ip']) > 1) or (client_v.get('ipv6_ip') and len(client_v['ipv6_ip']) > 1):
+ raise ConfigError(f'Server client "{client_k}": cannot specify more than 1 IPv4 and 1 IPv6 IP')
if dict_search('server.client_ip_pool', openvpn):
if not (dict_search('server.client_ip_pool.start', openvpn) and dict_search('server.client_ip_pool.stop', openvpn)):
@@ -360,6 +384,29 @@ def verify(openvpn):
if IPv6Address(client['ipv6_ip'][0]) in v6PoolNet:
print(f'Warning: Client "{client["name"]}" IP {client["ipv6_ip"][0]} is in server IP pool, it is not reserved for this client.')
+ # add mfa users to the file the mfa plugin uses
+ if dict_search('server.mfa.totp', openvpn):
+ user_data = ''
+ if not os.path.isfile(otp_file.format(**openvpn)):
+ write_file(otp_file.format(**openvpn), user_data,
+ user=user, group=group, mode=0o644)
+
+ ovpn_users = read_file(otp_file.format(**openvpn))
+ for client in (dict_search('server.client', openvpn) or []):
+ exists = None
+ for ovpn_user in ovpn_users.split('\n'):
+ if re.search('^' + client + ' ', ovpn_user):
+ user_data += f'{ovpn_user}\n'
+ exists = 'true'
+
+ if not exists:
+ random = SystemRandom()
+ totp_secret = ''.join(random.choice(secret_chars) for _ in range(16))
+ user_data += f'{client} otp totp:sha1:base32:{totp_secret}::xxx *\n'
+
+ write_file(otp_file.format(**openvpn), user_data,
+ user=user, group=group, mode=0o644)
+
else:
# checks for both client and site-to-site go here
if dict_search('server.reject_unconfigured_clients', openvpn):
@@ -525,6 +572,7 @@ def generate_pki_files(openvpn):
def generate(openvpn):
interface = openvpn['ifname']
directory = os.path.dirname(cfg_file.format(**openvpn))
+ plugin_dir = '/usr/lib/openvpn'
# create base config directory on demand
makedir(directory, user, group)
# enforce proper permissions on /run/openvpn
@@ -536,6 +584,11 @@ def generate(openvpn):
if os.path.isdir(ccd_dir):
rmtree(ccd_dir, ignore_errors=True)
+ # Remove systemd directories with overrides
+ service_dir = os.path.dirname(service_file.format(**openvpn))
+ if os.path.isdir(service_dir):
+ rmtree(service_dir, ignore_errors=True)
+
if 'deleted' in openvpn or 'disable' in openvpn:
return None
@@ -571,14 +624,20 @@ def generate(openvpn):
render(cfg_file.format(**openvpn), 'openvpn/server.conf.tmpl', openvpn,
formater=lambda _: _.replace("&quot;", '"'), user=user, group=group)
+ # Render 20-override.conf for OpenVPN service
+ render(service_file.format(**openvpn), 'openvpn/service-override.conf.tmpl', openvpn,
+ formater=lambda _: _.replace("&quot;", '"'), user=user, group=group)
+ # Reload systemd services config to apply an override
+ call(f'systemctl daemon-reload')
+
return None
def apply(openvpn):
interface = openvpn['ifname']
- call(f'systemctl stop openvpn@{interface}.service')
# Do some cleanup when OpenVPN is disabled/deleted
if 'deleted' in openvpn or 'disable' in openvpn:
+ call(f'systemctl stop openvpn@{interface}.service')
for cleanup_file in glob(f'/run/openvpn/{interface}.*'):
if os.path.isfile(cleanup_file):
os.unlink(cleanup_file)
@@ -590,7 +649,7 @@ def apply(openvpn):
# No matching OpenVPN process running - maybe it got killed or none
# existed - nevertheless, spawn new OpenVPN process
- call(f'systemctl start openvpn@{interface}.service')
+ call(f'systemctl reload-or-restart openvpn@{interface}.service')
o = VTunIf(**openvpn)
o.update(openvpn)
diff --git a/src/conf_mode/interfaces-tunnel.py b/src/conf_mode/interfaces-tunnel.py
index ef385d2e7..30f57ec0c 100755
--- a/src/conf_mode/interfaces-tunnel.py
+++ b/src/conf_mode/interfaces-tunnel.py
@@ -98,7 +98,7 @@ def verify(tunnel):
# If tunnel source address any and key not set
if tunnel['encapsulation'] in ['gre'] and \
- tunnel['source_address'] == '0.0.0.0' and \
+ dict_search('source_address', tunnel) == '0.0.0.0' and \
dict_search('parameters.ip.key', tunnel) == None:
raise ConfigError('Tunnel parameters ip key must be set!')
@@ -107,19 +107,22 @@ def verify(tunnel):
# Check pairs tunnel source-address/encapsulation/key with exists tunnels.
# Prevent the same key for 2 tunnels with same source-address/encap. T2920
for tunnel_if in Section.interfaces('tunnel'):
+ # It makes no sense to run the test for re-used GRE keys on our
+ # own interface we are currently working on
+ if tunnel['ifname'] == tunnel_if:
+ continue
tunnel_cfg = get_interface_config(tunnel_if)
- exist_encap = tunnel_cfg['linkinfo']['info_kind']
- exist_source_address = tunnel_cfg['address']
- exist_key = tunnel_cfg['linkinfo']['info_data']['ikey']
- new_source_address = tunnel['source_address']
+ # no match on encapsulation - bail out
+ if dict_search('linkinfo.info_kind', tunnel_cfg) != tunnel['encapsulation']:
+ continue
+ new_source_address = dict_search('source_address', tunnel)
# Convert tunnel key to ip key, format "ip -j link show"
# 1 => 0.0.0.1, 999 => 0.0.3.231
- orig_new_key = int(tunnel['parameters']['ip']['key'])
- new_key = IPv4Address(orig_new_key)
+ orig_new_key = dict_search('parameters.ip.key', tunnel)
+ new_key = IPv4Address(int(orig_new_key))
new_key = str(new_key)
- if tunnel['encapsulation'] == exist_encap and \
- new_source_address == exist_source_address and \
- new_key == exist_key:
+ if dict_search('address', tunnel_cfg) == new_source_address and \
+ dict_search('linkinfo.info_data.ikey', tunnel_cfg) == new_key:
raise ConfigError(f'Key "{orig_new_key}" for source-address "{new_source_address}" ' \
f'is already used for tunnel "{tunnel_if}"!')
diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces-vxlan.py
index 804f2d14f..1f097c4e3 100755
--- a/src/conf_mode/interfaces-vxlan.py
+++ b/src/conf_mode/interfaces-vxlan.py
@@ -44,6 +44,20 @@ def get_config(config=None):
base = ['interfaces', 'vxlan']
vxlan = get_interface_dict(conf, base)
+ # We need to verify that no other VXLAN tunnel is configured when external
+ # mode is in use - Linux Kernel limitation
+ conf.set_level(base)
+ vxlan['other_tunnels'] = conf.get_config_dict([], key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # This if-clause is just to be sure - it will always evaluate to true
+ ifname = vxlan['ifname']
+ if ifname in vxlan['other_tunnels']:
+ del vxlan['other_tunnels'][ifname]
+ if len(vxlan['other_tunnels']) == 0:
+ del vxlan['other_tunnels']
+
return vxlan
def verify(vxlan):
@@ -63,8 +77,21 @@ def verify(vxlan):
if not any(tmp in ['group', 'remote', 'source_address'] for tmp in vxlan):
raise ConfigError('Group, remote or source-address must be configured')
- if 'vni' not in vxlan:
- raise ConfigError('Must configure VNI for VXLAN')
+ if 'vni' not in vxlan and 'external' not in vxlan:
+ raise ConfigError(
+ 'Must either configure VXLAN "vni" or use "external" CLI option!')
+
+ if {'external', 'vni'} <= set(vxlan):
+ raise ConfigError('Can not specify both "external" and "VNI"!')
+
+ if {'external', 'other_tunnels'} <= set(vxlan):
+ other_tunnels = ', '.join(vxlan['other_tunnels'])
+ raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\
+ f'CLI option is used. Additional tunnels: {other_tunnels}')
+
+ if 'gpe' in vxlan and 'external' not in vxlan:
+ raise ConfigError(f'VXLAN-GPE is only supported when "external" '\
+ f'CLI option is used.')
if 'source_interface' in vxlan:
# VXLAN adds at least an overhead of 50 byte - we need to check the
diff --git a/src/conf_mode/interfaces-wireless.py b/src/conf_mode/interfaces-wireless.py
index 7b3de6e8a..af35b5f03 100755
--- a/src/conf_mode/interfaces-wireless.py
+++ b/src/conf_mode/interfaces-wireless.py
@@ -82,11 +82,12 @@ def get_config(config=None):
tmp = conf.get_config_dict([], key_mangling=('-', '_'), get_first_key=True)
if not (dict_search('security.wpa.passphrase', tmp) or
dict_search('security.wpa.radius', tmp)):
- del wifi['security']['wpa']
+ if 'deleted' not in wifi:
+ del wifi['security']['wpa']
# defaults include RADIUS server specifics per TAG node which need to be
# added to individual RADIUS servers instead - so we can simply delete them
- if dict_search('security.wpa.radius.server.port', wifi):
+ if dict_search('security.wpa.radius.server.port', wifi) != None:
del wifi['security']['wpa']['radius']['server']['port']
if not len(wifi['security']['wpa']['radius']['server']):
del wifi['security']['wpa']['radius']
diff --git a/src/conf_mode/interfaces-wwan.py b/src/conf_mode/interfaces-wwan.py
index faa5eb628..a4b033374 100755
--- a/src/conf_mode/interfaces-wwan.py
+++ b/src/conf_mode/interfaces-wwan.py
@@ -17,6 +17,7 @@
import os
from sys import exit
+from time import sleep
from vyos.config import Config
from vyos.configdict import get_interface_dict
@@ -25,11 +26,18 @@ from vyos.configverify import verify_interface_exists
from vyos.configverify import verify_vrf
from vyos.ifconfig import WWANIf
from vyos.util import cmd
+from vyos.util import call
from vyos.util import dict_search
+from vyos.util import DEVNULL
+from vyos.util import is_systemd_service_active
+from vyos.util import write_file
from vyos import ConfigError
from vyos import airbag
airbag.enable()
+service_name = 'ModemManager.service'
+cron_script = '/etc/cron.d/wwan'
+
def get_config(config=None):
"""
Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
@@ -42,6 +50,20 @@ def get_config(config=None):
base = ['interfaces', 'wwan']
wwan = get_interface_dict(conf, base)
+ # We need to know the amount of other WWAN interfaces as ModemManager needs
+ # to be started or stopped.
+ conf.set_level(base)
+ wwan['other_interfaces'] = conf.get_config_dict([], key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # This if-clause is just to be sure - it will always evaluate to true
+ ifname = wwan['ifname']
+ if ifname in wwan['other_interfaces']:
+ del wwan['other_interfaces'][ifname]
+ if len(wwan['other_interfaces']) == 0:
+ del wwan['other_interfaces']
+
return wwan
def verify(wwan):
@@ -59,9 +81,26 @@ def verify(wwan):
return None
def generate(wwan):
+ if 'deleted' in wwan:
+ return None
+
+ if not os.path.exists(cron_script):
+ write_file(cron_script, '*/5 * * * * root /usr/libexec/vyos/vyos-check-wwan.py')
return None
def apply(wwan):
+ if not is_systemd_service_active(service_name):
+ cmd(f'systemctl start {service_name}')
+
+ counter = 100
+ # Wait until a modem is detected and then we can continue
+ while counter > 0:
+ counter -= 1
+ tmp = cmd('mmcli -L')
+ if tmp != 'No modems were found':
+ break
+ sleep(0.250)
+
# we only need the modem number. wwan0 -> 0, wwan1 -> 1
modem = wwan['ifname'].lstrip('wwan')
base_cmd = f'mmcli --modem {modem}'
@@ -71,6 +110,15 @@ def apply(wwan):
w = WWANIf(wwan['ifname'])
if 'deleted' in wwan or 'disable' in wwan:
w.remove()
+
+ # There are no other WWAN interfaces - stop the daemon
+ if 'other_interfaces' not in wwan:
+ cmd(f'systemctl stop {service_name}')
+ # Clean CRON helper script which is used for to re-connect when
+ # RF signal is lost
+ if os.path.exists(cron_script):
+ os.unlink(cron_script)
+
return None
ip_type = 'ipv4'
@@ -88,9 +136,12 @@ def apply(wwan):
options += ',user={user},password={password}'.format(**wwan['authentication'])
command = f'{base_cmd} --simple-connect="{options}"'
- cmd(command)
+ call(command, stdout=DEVNULL)
w.update(wwan)
+ if 'other_interfaces' not in wwan and 'deleted' in wwan:
+ cmd(f'systemctl start {service_name}')
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 59939d0fb..96f8f6fb6 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -42,7 +42,7 @@ if LooseVersion(kernel_version()) > LooseVersion('5.1'):
else:
k_mod = ['nft_nat', 'nft_chain_nat_ipv4']
-iptables_nat_config = '/tmp/vyos-nat-rules.nft'
+nftables_nat_config = '/tmp/vyos-nat-rules.nft'
def get_handler(json, chain, target):
""" Get nftable rule handler number of given chain/target combination.
@@ -93,7 +93,6 @@ def get_config(config=None):
nat[direction]['rule'][rule] = dict_merge(default_values,
nat[direction]['rule'][rule])
-
# read in current nftable (once) for further processing
tmp = cmd('nft -j list table raw')
nftable_json = json.loads(tmp)
@@ -106,9 +105,9 @@ def get_config(config=None):
nat['helper_functions'] = 'remove'
# Retrieve current table handler positions
- nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_HELPER')
+ nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER')
nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK')
- nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_HELPER')
+ nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_HELPER')
nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK')
nat['deleted'] = ''
return nat
@@ -119,10 +118,10 @@ def get_config(config=None):
nat['helper_functions'] = 'add'
# Retrieve current table handler positions
- nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_IGNORE')
- nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_PREROUTING_HOOK')
- nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_IGNORE')
- nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_OUTPUT_HOOK')
+ nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_IGNORE')
+ nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_PREROUTING_HOOK')
+ nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_IGNORE')
+ nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_OUTPUT_HOOK')
return nat
@@ -180,14 +179,14 @@ def verify(nat):
return None
def generate(nat):
- render(iptables_nat_config, 'firewall/nftables-nat.tmpl', nat,
+ render(nftables_nat_config, 'firewall/nftables-nat.tmpl', nat,
permission=0o755)
return None
def apply(nat):
- cmd(f'{iptables_nat_config}')
- if os.path.isfile(iptables_nat_config):
- os.unlink(iptables_nat_config)
+ cmd(f'{nftables_nat_config}')
+ if os.path.isfile(nftables_nat_config):
+ os.unlink(nftables_nat_config)
return None
diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py
index f8bc073bb..8bf2e8073 100755
--- a/src/conf_mode/nat66.py
+++ b/src/conf_mode/nat66.py
@@ -35,7 +35,7 @@ airbag.enable()
k_mod = ['nft_nat', 'nft_chain_nat']
-iptables_nat_config = '/tmp/vyos-nat66-rules.nft'
+nftables_nat66_config = '/tmp/vyos-nat66-rules.nft'
ndppd_config = '/run/ndppd/ndppd.conf'
def get_handler(json, chain, target):
@@ -79,9 +79,9 @@ def get_config(config=None):
if not conf.exists(base):
nat['helper_functions'] = 'remove'
- nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_HELPER')
+ nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_HELPER')
nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'NAT_CONNTRACK')
- nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_HELPER')
+ nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_HELPER')
nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'NAT_CONNTRACK')
nat['deleted'] = ''
return nat
@@ -92,10 +92,10 @@ def get_config(config=None):
nat['helper_functions'] = 'add'
# Retrieve current table handler positions
- nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_IGNORE')
- nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYATTA_CT_PREROUTING_HOOK')
- nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_IGNORE')
- nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYATTA_CT_OUTPUT_HOOK')
+ nat['pre_ct_ignore'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_IGNORE')
+ nat['pre_ct_conntrack'] = get_handler(condensed_json, 'PREROUTING', 'VYOS_CT_PREROUTING_HOOK')
+ nat['out_ct_ignore'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_IGNORE')
+ nat['out_ct_conntrack'] = get_handler(condensed_json, 'OUTPUT', 'VYOS_CT_OUTPUT_HOOK')
else:
nat['helper_functions'] = 'has'
@@ -117,7 +117,7 @@ def verify(nat):
raise ConfigError(f'{err_msg} outbound-interface not specified')
if config['outbound_interface'] not in interfaces():
- print(f'WARNING: rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
+ raise ConfigError(f'WARNING: rule "{rule}" interface "{config["outbound_interface"]}" does not exist on this system')
addr = dict_search('translation.address', config)
if addr != None:
@@ -145,22 +145,22 @@ def verify(nat):
return None
def generate(nat):
- render(iptables_nat_config, 'firewall/nftables-nat66.tmpl', nat, permission=0o755)
+ render(nftables_nat66_config, 'firewall/nftables-nat66.tmpl', nat, permission=0o755)
render(ndppd_config, 'ndppd/ndppd.conf.tmpl', nat, permission=0o755)
return None
def apply(nat):
if not nat:
return None
- cmd(f'{iptables_nat_config}')
+ cmd(f'{nftables_nat66_config}')
if 'deleted' in nat or not dict_search('source.rule', nat):
cmd('systemctl stop ndppd')
if os.path.isfile(ndppd_config):
os.unlink(ndppd_config)
else:
cmd('systemctl restart ndppd')
- if os.path.isfile(iptables_nat_config):
- os.unlink(iptables_nat_config)
+ if os.path.isfile(nftables_nat66_config):
+ os.unlink(nftables_nat66_config)
return None
diff --git a/src/conf_mode/netns.py b/src/conf_mode/netns.py
new file mode 100755
index 000000000..0924eb616
--- /dev/null
+++ b/src/conf_mode/netns.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+from tempfile import NamedTemporaryFile
+
+from vyos.config import Config
+from vyos.configdict import node_changed
+from vyos.ifconfig import Interface
+from vyos.util import call
+from vyos.util import dict_search
+from vyos.util import get_interface_config
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+
+def netns_interfaces(c, match):
+ """
+ get NETNS bound interfaces
+ """
+ matched = []
+ old_level = c.get_level()
+ c.set_level(['interfaces'])
+ section = c.get_config_dict([], get_first_key=True)
+ for type in section:
+ interfaces = section[type]
+ for name in interfaces:
+ interface = interfaces[name]
+ if 'netns' in interface:
+ v = interface.get('netns', '')
+ if v == match:
+ matched.append(name)
+
+ c.set_level(old_level)
+ return matched
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['netns']
+ netns = conf.get_config_dict(base, get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # determine which NETNS has been removed
+ for name in node_changed(conf, base + ['name']):
+ if 'netns_remove' not in netns:
+ netns.update({'netns_remove' : {}})
+
+ netns['netns_remove'][name] = {}
+ # get NETNS bound interfaces
+ interfaces = netns_interfaces(conf, name)
+ if interfaces: netns['netns_remove'][name]['interface'] = interfaces
+
+ return netns
+
+def verify(netns):
+ # ensure NETNS is not assigned to any interface
+ if 'netns_remove' in netns:
+ for name, config in netns['netns_remove'].items():
+ if 'interface' in config:
+ raise ConfigError(f'Can not remove NETNS "{name}", it still has '\
+ f'member interfaces!')
+
+ if 'name' in netns:
+ for name, config in netns['name'].items():
+ print(name)
+
+ return None
+
+
+def generate(netns):
+ if not netns:
+ return None
+
+ return None
+
+
+def apply(netns):
+
+ for tmp in (dict_search('netns_remove', netns) or []):
+ if os.path.isfile(f'/run/netns/{tmp}'):
+ call(f'ip netns del {tmp}')
+
+ if 'name' in netns:
+ for name, config in netns['name'].items():
+ if not os.path.isfile(f'/run/netns/{name}'):
+ call(f'ip netns add {name}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/policy-route-interface.py b/src/conf_mode/policy-route-interface.py
new file mode 100755
index 000000000..e81135a74
--- /dev/null
+++ b/src/conf_mode/policy-route-interface.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+
+from sys import argv
+from sys import exit
+
+from vyos.config import Config
+from vyos.ifconfig import Section
+from vyos.template import render
+from vyos.util import cmd
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ ifname = argv[1]
+ ifpath = Section.get_config_path(ifname)
+ if_policy_path = f'interfaces {ifpath} policy'
+
+ if_policy = conf.get_config_dict(if_policy_path, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ if_policy['ifname'] = ifname
+ if_policy['policy'] = conf.get_config_dict(['policy'], key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ return if_policy
+
+def verify(if_policy):
+ # bail out early - looks like removal from running config
+ if not if_policy:
+ return None
+
+ for route in ['route', 'ipv6_route']:
+ if route in if_policy:
+ if route not in if_policy['policy']:
+ raise ConfigError('Policy route not configured')
+
+ route_name = if_policy[route]
+
+ if route_name not in if_policy['policy'][route]:
+ raise ConfigError(f'Invalid policy route name "{name}"')
+
+ return None
+
+def generate(if_policy):
+ return None
+
+def cleanup_rule(table, chain, ifname, new_name=None):
+ results = cmd(f'nft -a list chain {table} {chain}').split("\n")
+ retval = None
+ for line in results:
+ if f'oifname "{ifname}"' in line:
+ if new_name and f'jump {new_name}' in line:
+ # new_name is used to clear rules for any previously referenced chains
+ # returns true when rule exists and doesn't need to be created
+ retval = True
+ continue
+
+ handle_search = re.search('handle (\d+)', line)
+ if handle_search:
+ cmd(f'nft delete rule {table} {chain} handle {handle_search[1]}')
+ return retval
+
+def apply(if_policy):
+ ifname = if_policy['ifname']
+
+ route_chain = 'VYOS_PBR_PREROUTING'
+ ipv6_route_chain = 'VYOS_PBR6_PREROUTING'
+
+ if 'route' in if_policy:
+ name = 'VYOS_PBR_' + if_policy['route']
+ rule_exists = cleanup_rule('ip mangle', route_chain, ifname, name)
+
+ if not rule_exists:
+ cmd(f'nft insert rule ip mangle {route_chain} iifname {ifname} counter jump {name}')
+ else:
+ cleanup_rule('ip mangle', route_chain, ifname)
+
+ if 'ipv6_route' in if_policy:
+ name = 'VYOS_PBR6_' + if_policy['ipv6_route']
+ rule_exists = cleanup_rule('ip6 mangle', ipv6_route_chain, ifname, name)
+
+ if not rule_exists:
+ cmd(f'nft insert rule ip6 mangle {ipv6_route_chain} iifname {ifname} counter jump {name}')
+ else:
+ cleanup_rule('ip6 mangle', ipv6_route_chain, ifname)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/policy-route.py b/src/conf_mode/policy-route.py
new file mode 100755
index 000000000..d098be68d
--- /dev/null
+++ b/src/conf_mode/policy-route.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from json import loads
+from sys import exit
+
+from vyos.config import Config
+from vyos.template import render
+from vyos.util import cmd
+from vyos.util import dict_search_args
+from vyos.util import run
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+mark_offset = 0x7FFFFFFF
+nftables_conf = '/run/nftables_policy.conf'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['policy']
+
+ if not conf.exists(base + ['route']) and not conf.exists(base + ['ipv6-route']):
+ return None
+
+ policy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ return policy
+
+def verify(policy):
+ # bail out early - looks like removal from running config
+ if not policy:
+ return None
+
+ for route in ['route', 'ipv6_route']:
+ if route in policy:
+ for name, pol_conf in policy[route].items():
+ if 'rule' in pol_conf:
+ for rule_id, rule_conf in pol_conf.items():
+ icmp = 'icmp' if route == 'route' else 'icmpv6'
+ if icmp in rule_conf:
+ icmp_defined = False
+ if 'type_name' in rule_conf[icmp]:
+ icmp_defined = True
+ if 'code' in rule_conf[icmp] or 'type' in rule_conf[icmp]:
+ raise ConfigError(f'{name} rule {rule_id}: Cannot use ICMP type/code with ICMP type-name')
+ if 'code' in rule_conf[icmp]:
+ icmp_defined = True
+ if 'type' not in rule_conf[icmp]:
+ raise ConfigError(f'{name} rule {rule_id}: ICMP code can only be defined if ICMP type is defined')
+ if 'type' in rule_conf[icmp]:
+ icmp_defined = True
+
+ if icmp_defined and 'protocol' not in rule_conf or rule_conf['protocol'] != icmp:
+ raise ConfigError(f'{name} rule {rule_id}: ICMP type/code or type-name can only be defined if protocol is ICMP')
+ if 'set' in rule_conf:
+ if 'tcp_mss' in rule_conf['set']:
+ tcp_flags = dict_search_args(rule_conf, 'tcp', 'flags')
+ if not tcp_flags or 'SYN' not in tcp_flags.split(","):
+ raise ConfigError(f'{name} rule {rule_id}: TCP SYN flag must be set to modify TCP-MSS')
+ if 'tcp' in rule_conf:
+ if 'flags' in rule_conf['tcp']:
+ if 'protocol' not in rule_conf or rule_conf['protocol'] != 'tcp':
+ raise ConfigError(f'{name} rule {rule_id}: TCP flags can only be set if protocol is set to TCP')
+
+
+ return None
+
+def generate(policy):
+ if not policy:
+ if os.path.exists(nftables_conf):
+ os.unlink(nftables_conf)
+ return None
+
+ if not os.path.exists(nftables_conf):
+ policy['first_install'] = True
+
+ render(nftables_conf, 'firewall/nftables-policy.tmpl', policy)
+ return None
+
+def apply_table_marks(policy):
+ for route in ['route', 'ipv6_route']:
+ if route in policy:
+ for name, pol_conf in policy[route].items():
+ if 'rule' in pol_conf:
+ for rule_id, rule_conf in pol_conf['rule'].items():
+ set_table = dict_search_args(rule_conf, 'set', 'table')
+ if set_table:
+ if set_table == 'main':
+ set_table = '254'
+ table_mark = mark_offset - int(set_table)
+ cmd(f'ip rule add fwmark {table_mark} table {set_table}')
+
+def cleanup_table_marks():
+ json_rules = cmd('ip -j -N rule list')
+ rules = loads(json_rules)
+ for rule in rules:
+ if 'fwmark' not in rule or 'table' not in rule:
+ continue
+ fwmark = rule['fwmark']
+ table = int(rule['table'])
+ if fwmark[:2] == '0x':
+ fwmark = int(fwmark, 16)
+ if (int(fwmark) == (mark_offset - table)):
+ cmd(f'ip rule del fwmark {fwmark} table {table}')
+
+def apply(policy):
+ if not policy or 'first_install' not in policy:
+ run(f'nft flush table ip mangle')
+ run(f'nft flush table ip6 mangle')
+
+ if not policy:
+ cleanup_table_marks()
+ return None
+
+ install_result = run(f'nft -f {nftables_conf}')
+ if install_result == 1:
+ raise ConfigError('Failed to apply policy based routing')
+
+ if 'first_install' not in policy:
+ cleanup_table_marks()
+
+ apply_table_marks(policy)
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py
index 1a03d520b..e251396c7 100755
--- a/src/conf_mode/policy.py
+++ b/src/conf_mode/policy.py
@@ -171,9 +171,7 @@ def verify(policy):
def generate(policy):
if not policy:
- policy['new_frr_config'] = ''
return None
-
policy['new_frr_config'] = render_to_string('frr/policy.frr.tmpl', policy)
return None
@@ -190,8 +188,9 @@ def apply(policy):
frr_cfg.modify_section(r'^bgp community-list .*')
frr_cfg.modify_section(r'^bgp extcommunity-list .*')
frr_cfg.modify_section(r'^bgp large-community-list .*')
- frr_cfg.modify_section(r'^route-map .*')
- frr_cfg.add_before('^line vty', policy['new_frr_config'])
+ frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', remove_stop_mark=True)
+ if 'new_frr_config' in policy:
+ frr_cfg.add_before(frr.default_add_before, policy['new_frr_config'])
frr_cfg.commit_configuration(bgp_daemon)
# The route-map used for the FIB (zebra) is part of the zebra daemon
@@ -200,19 +199,11 @@ def apply(policy):
frr_cfg.modify_section(r'^ipv6 access-list .*')
frr_cfg.modify_section(r'^ip prefix-list .*')
frr_cfg.modify_section(r'^ipv6 prefix-list .*')
- frr_cfg.modify_section(r'^route-map .*')
- frr_cfg.add_before('^line vty', policy['new_frr_config'])
+ frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', remove_stop_mark=True)
+ if 'new_frr_config' in policy:
+ frr_cfg.add_before(frr.default_add_before, policy['new_frr_config'])
frr_cfg.commit_configuration(zebra_daemon)
- # If FRR config is blank, rerun the blank commit x times due to frr-reload
- # behavior/bug not properly clearing out on one commit.
- if policy['new_frr_config'] == '':
- for a in range(5):
- frr_cfg.commit_configuration(zebra_daemon)
-
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
-
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py
index 348bae59f..4ebc0989c 100755
--- a/src/conf_mode/protocols_bfd.py
+++ b/src/conf_mode/protocols_bfd.py
@@ -16,10 +16,9 @@
import os
-from sys import exit
-
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configverify import verify_vrf
from vyos.template import is_ipv6
from vyos.template import render_to_string
from vyos.validate import is_ipv6_link_local
@@ -35,8 +34,9 @@ def get_config(config=None):
else:
conf = Config()
base = ['protocols', 'bfd']
- bfd = conf.get_config_dict(base, get_first_key=True)
-
+ bfd = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
# Bail out early if configuration tree does not exist
if not conf.exists(base):
return bfd
@@ -79,28 +79,37 @@ def verify(bfd):
# multihop and echo-mode cannot be used together
if 'echo_mode' in peer_config:
- raise ConfigError('Multihop and echo-mode cannot be used together')
+ raise ConfigError('BFD multihop and echo-mode cannot be used together')
# multihop doesn't accept interface names
if 'source' in peer_config and 'interface' in peer_config['source']:
- raise ConfigError('Multihop and source interface cannot be used together')
+ raise ConfigError('BFD multihop and source interface cannot be used together')
+
+ if 'profile' in peer_config:
+ profile_name = peer_config['profile']
+ if 'profile' not in bfd or profile_name not in bfd['profile']:
+ raise ConfigError(f'BFD profile "{profile_name}" does not exist!')
+
+ if 'vrf' in peer_config:
+ verify_vrf(peer_config)
return None
def generate(bfd):
if not bfd:
- bfd['new_frr_config'] = ''
return None
-
- bfd['new_frr_config'] = render_to_string('frr/bfd.frr.tmpl', bfd)
+ bfd['new_frr_config'] = render_to_string('frr/bfdd.frr.tmpl', bfd)
def apply(bfd):
+ bfd_daemon = 'bfdd'
+
# Save original configuration prior to starting any commit actions
frr_cfg = frr.FRRConfig()
- frr_cfg.load_configuration()
- frr_cfg.modify_section('^bfd', '')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bfd['new_frr_config'])
- frr_cfg.commit_configuration()
+ frr_cfg.load_configuration(bfd_daemon)
+ frr_cfg.modify_section('^bfd', stop_pattern='^exit', remove_stop_mark=True)
+ if 'new_frr_config' in bfd:
+ frr_cfg.add_before(frr.default_add_before, bfd['new_frr_config'])
+ frr_cfg.commit_configuration(bfd_daemon)
return None
diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py
index 68284e0f9..d8704727c 100755
--- a/src/conf_mode/protocols_bgp.py
+++ b/src/conf_mode/protocols_bgp.py
@@ -183,6 +183,28 @@ def verify(bgp):
raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!')
afi_config = peer_config['address_family'][afi]
+
+ if 'conditionally_advertise' in afi_config:
+ if 'advertise_map' not in afi_config['conditionally_advertise']:
+ raise ConfigError('Must speficy advertise-map when conditionally-advertise is in use!')
+ # Verify advertise-map (which is a route-map) exists
+ verify_route_map(afi_config['conditionally_advertise']['advertise_map'], bgp)
+
+ if ('exist_map' not in afi_config['conditionally_advertise'] and
+ 'non_exist_map' not in afi_config['conditionally_advertise']):
+ raise ConfigError('Must either speficy exist-map or non-exist-map when ' \
+ 'conditionally-advertise is in use!')
+
+ if {'exist_map', 'non_exist_map'} <= set(afi_config['conditionally_advertise']):
+ raise ConfigError('Can not specify both exist-map and non-exist-map for ' \
+ 'conditionally-advertise!')
+
+ if 'exist_map' in afi_config['conditionally_advertise']:
+ verify_route_map(afi_config['conditionally_advertise']['exist_map'], bgp)
+
+ if 'non_exist_map' in afi_config['conditionally_advertise']:
+ verify_route_map(afi_config['conditionally_advertise']['non_exist_map'], bgp)
+
# Validate if configured Prefix list exists
if 'prefix_list' in afi_config:
for tmp in ['import', 'export']:
@@ -255,21 +277,11 @@ def verify(bgp):
tmp = dict_search(f'route_map.vpn.{export_import}', afi_config)
if tmp: verify_route_map(tmp, bgp)
- if afi in ['l2vpn_evpn'] and 'vrf' not in bgp:
- # Some L2VPN EVPN AFI options are only supported under VRF
- if 'vni' in afi_config:
- for vni, vni_config in afi_config['vni'].items():
- if 'rd' in vni_config:
- raise ConfigError('VNI route-distinguisher is only supported under EVPN VRF')
- if 'route_target' in vni_config:
- raise ConfigError('VNI route-target is only supported under EVPN VRF')
return None
def generate(bgp):
if not bgp or 'deleted' in bgp:
- bgp['frr_bgpd_config'] = ''
- bgp['frr_zebra_config'] = ''
return None
bgp['protocol'] = 'bgp' # required for frr/vrf.route-map.frr.tmpl
@@ -287,8 +299,9 @@ def apply(bgp):
# The route-map used for the FIB (zebra) is part of the zebra daemon
frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'(\s+)?ip protocol bgp route-map [-a-zA-Z0-9.]+$', '', '(\s|!)')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bgp['frr_zebra_config'])
+ frr_cfg.modify_section(r'(\s+)?ip protocol bgp route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
+ if 'frr_zebra_config' in bgp:
+ frr_cfg.add_before(frr.default_add_before, bgp['frr_zebra_config'])
frr_cfg.commit_configuration(zebra_daemon)
# Generate empty helper string which can be ammended to FRR commands, it
@@ -298,13 +311,11 @@ def apply(bgp):
vrf = ' vrf ' + bgp['vrf']
frr_cfg.load_configuration(bgp_daemon)
- frr_cfg.modify_section(f'^router bgp \d+{vrf}$', '')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', bgp['frr_bgpd_config'])
+ frr_cfg.modify_section(f'^router bgp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True)
+ if 'frr_bgpd_config' in bgp:
+ frr_cfg.add_before(frr.default_add_before, bgp['frr_bgpd_config'])
frr_cfg.commit_configuration(bgp_daemon)
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
-
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py
index 4505e2496..9b4b215de 100755
--- a/src/conf_mode/protocols_isis.py
+++ b/src/conf_mode/protocols_isis.py
@@ -56,10 +56,10 @@ def get_config(config=None):
# instead of the VRF instance.
if vrf: isis['vrf'] = vrf
- # As we no re-use this Python handler for both VRF and non VRF instances for
- # IS-IS we need to find out if any interfaces changed so properly adjust
- # the FRR configuration and not by acctident change interfaces from a
- # different VRF.
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
interfaces_removed = node_changed(conf, base + ['interface'])
if interfaces_removed:
isis['interface_removed'] = list(interfaces_removed)
@@ -196,8 +196,6 @@ def verify(isis):
def generate(isis):
if not isis or 'deleted' in isis:
- isis['frr_isisd_config'] = ''
- isis['frr_zebra_config'] = ''
return None
isis['protocol'] = 'isis' # required for frr/vrf.route-map.frr.tmpl
@@ -214,8 +212,9 @@ def apply(isis):
# The route-map used for the FIB (zebra) is part of the zebra daemon
frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'(\s+)?ip protocol isis route-map [-a-zA-Z0-9.]+$', '', '(\s|!)')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', isis['frr_zebra_config'])
+ frr_cfg.modify_section('(\s+)?ip protocol isis route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
+ if 'frr_zebra_config' in isis:
+ frr_cfg.add_before(frr.default_add_before, isis['frr_zebra_config'])
frr_cfg.commit_configuration(zebra_daemon)
# Generate empty helper string which can be ammended to FRR commands, it
@@ -225,19 +224,18 @@ def apply(isis):
vrf = ' vrf ' + isis['vrf']
frr_cfg.load_configuration(isis_daemon)
- frr_cfg.modify_section(f'^router isis VyOS{vrf}$', '')
+ frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True)
for key in ['interface', 'interface_removed']:
if key not in isis:
continue
for interface in isis[key]:
- frr_cfg.modify_section(f'^interface {interface}{vrf}$', '')
+ frr_cfg.modify_section(f'^interface {interface}{vrf}', stop_pattern='^exit', remove_stop_mark=True)
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', isis['frr_isisd_config'])
- frr_cfg.commit_configuration(isis_daemon)
+ if 'frr_isisd_config' in isis:
+ frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config'])
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
+ frr_cfg.commit_configuration(isis_daemon)
return None
diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py
index 3b27608da..0b0c7d07b 100755
--- a/src/conf_mode/protocols_mpls.py
+++ b/src/conf_mode/protocols_mpls.py
@@ -66,36 +66,24 @@ def verify(mpls):
def generate(mpls):
# If there's no MPLS config generated, create dictionary key with no value.
- if not mpls:
- mpls['new_frr_config'] = ''
+ if not mpls or 'deleted' in mpls:
return None
- mpls['new_frr_config'] = render_to_string('frr/ldpd.frr.tmpl', mpls)
+ mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.tmpl', mpls)
return None
def apply(mpls):
- # Define dictionary that will load FRR config
- frr_cfg = {}
+ ldpd_damon = 'ldpd'
+
# Save original configuration prior to starting any commit actions
- frr_cfg['original_config'] = frr.get_configuration(daemon='ldpd')
- frr_cfg['modified_config'] = frr.replace_section(frr_cfg['original_config'], mpls['new_frr_config'], from_re='mpls.*')
-
- # If FRR config is blank, rerun the blank commit three times due to frr-reload
- # behavior/bug not properly clearing out on one commit.
- if mpls['new_frr_config'] == '':
- for x in range(3):
- frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd')
- elif not 'ldp' in mpls:
- for x in range(3):
- frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd')
- else:
- # FRR mark configuration will test for syntax errors and throws an
- # exception if any syntax errors is detected
- frr.mark_configuration(frr_cfg['modified_config'])
+ frr_cfg = frr.FRRConfig()
+
+ frr_cfg.load_configuration(ldpd_damon)
+ frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True)
- # Commit resulting configuration to FRR, this will throw CommitError
- # on failure
- frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd')
+ if 'frr_ldpd_config' in mpls:
+ frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config'])
+ frr_cfg.commit_configuration(ldpd_damon)
# Set number of entries in the platform label tables
labels = '0'
@@ -122,7 +110,7 @@ def apply(mpls):
system_interfaces = []
# Populate system interfaces list with local MPLS capable interfaces
for interface in glob('/proc/sys/net/mpls/conf/*'):
- system_interfaces.append(os.path.basename(interface))
+ system_interfaces.append(os.path.basename(interface))
# This is where the comparison is done on if an interface needs to be enabled/disabled.
for system_interface in system_interfaces:
interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input')
@@ -138,7 +126,7 @@ def apply(mpls):
system_interfaces = []
# If MPLS interfaces are not configured, set MPLS processing disabled
for interface in glob('/proc/sys/net/mpls/conf/*'):
- system_interfaces.append(os.path.basename(interface))
+ system_interfaces.append(os.path.basename(interface))
for system_interface in system_interfaces:
system_interface = system_interface.replace('.', '/')
call(f'sysctl -wq net.mpls.conf.{system_interface}.input=0')
diff --git a/src/conf_mode/protocols_nhrp.py b/src/conf_mode/protocols_nhrp.py
index 12dacdba0..7eeb5cd30 100755
--- a/src/conf_mode/protocols_nhrp.py
+++ b/src/conf_mode/protocols_nhrp.py
@@ -16,6 +16,8 @@
from vyos.config import Config
from vyos.configdict import node_changed
+from vyos.firewall import find_nftables_rule
+from vyos.firewall import remove_nftables_rule
from vyos.template import render
from vyos.util import process_named_running
from vyos.util import run
@@ -88,24 +90,19 @@ def generate(nhrp):
def apply(nhrp):
if 'tunnel' in nhrp:
for tunnel, tunnel_conf in nhrp['tunnel'].items():
- if 'source_address' in tunnel_conf:
- chain = f'VYOS_NHRP_{tunnel}_OUT_HOOK'
- source_address = tunnel_conf['source_address']
+ if 'source_address' in nhrp['if_tunnel'][tunnel]:
+ comment = f'VYOS_NHRP_{tunnel}'
+ source_address = nhrp['if_tunnel'][tunnel]['source_address']
- chain_exists = run(f'sudo iptables --check {chain} -j RETURN') == 0
- if not chain_exists:
- run(f'sudo iptables --new {chain}')
- run(f'sudo iptables --append {chain} -p gre -s {source_address} -d 224.0.0.0/4 -j DROP')
- run(f'sudo iptables --append {chain} -j RETURN')
- run(f'sudo iptables --insert OUTPUT 2 -j {chain}')
+ rule_handle = find_nftables_rule('ip filter', 'VYOS_FW_OUTPUT', ['ip protocol gre', f'ip saddr {source_address}', 'ip daddr 224.0.0.0/4'])
+ if not rule_handle:
+ run(f'sudo nft insert rule ip filter VYOS_FW_OUTPUT ip protocol gre ip saddr {source_address} ip daddr 224.0.0.0/4 counter drop comment "{comment}"')
for tunnel in nhrp['del_tunnels']:
- chain = f'VYOS_NHRP_{tunnel}_OUT_HOOK'
- chain_exists = run(f'sudo iptables --check {chain} -j RETURN') == 0
- if chain_exists:
- run(f'sudo iptables --delete OUTPUT -j {chain}')
- run(f'sudo iptables --flush {chain}')
- run(f'sudo iptables --delete-chain {chain}')
+ comment = f'VYOS_NHRP_{tunnel}'
+ rule_handle = find_nftables_rule('ip filter', 'VYOS_FW_OUTPUT', [f'comment "{comment}"'])
+ if rule_handle:
+ remove_nftables_rule('ip filter', 'VYOS_FW_OUTPUT', rule_handle)
action = 'restart' if nhrp and 'tunnel' in nhrp else 'stop'
run(f'systemctl {action} opennhrp')
diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py
index 6ccda2e5a..4895cde6f 100755
--- a/src/conf_mode/protocols_ospf.py
+++ b/src/conf_mode/protocols_ospf.py
@@ -56,10 +56,10 @@ def get_config(config=None):
# instead of the VRF instance.
if vrf: ospf['vrf'] = vrf
- # As we no re-use this Python handler for both VRF and non VRF instances for
- # OSPF we need to find out if any interfaces changed so properly adjust
- # the FRR configuration and not by acctident change interfaces from a
- # different VRF.
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
interfaces_removed = node_changed(conf, base + ['interface'])
if interfaces_removed:
ospf['interface_removed'] = list(interfaces_removed)
@@ -177,11 +177,11 @@ def verify(ospf):
raise ConfigError('Can not use OSPF interface area and area ' \
'network configuration at the same time!')
- if 'vrf' in ospf:
# If interface specific options are set, we must ensure that the
# interface is bound to our requesting VRF. Due to the VyOS
# priorities the interface is bound to the VRF after creation of
# the VRF itself, and before any routing protocol is configured.
+ if 'vrf' in ospf:
vrf = ospf['vrf']
tmp = get_interface_config(interface)
if 'master' not in tmp or tmp['master'] != vrf:
@@ -191,8 +191,6 @@ def verify(ospf):
def generate(ospf):
if not ospf or 'deleted' in ospf:
- ospf['frr_ospfd_config'] = ''
- ospf['frr_zebra_config'] = ''
return None
ospf['protocol'] = 'ospf' # required for frr/vrf.route-map.frr.tmpl
@@ -209,8 +207,9 @@ def apply(ospf):
# The route-map used for the FIB (zebra) is part of the zebra daemon
frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'(\s+)?ip protocol ospf route-map [-a-zA-Z0-9.]+$', '', '(\s|!)')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospf['frr_zebra_config'])
+ frr_cfg.modify_section('(\s+)?ip protocol ospf route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
+ if 'frr_zebra_config' in ospf:
+ frr_cfg.add_before(frr.default_add_before, ospf['frr_zebra_config'])
frr_cfg.commit_configuration(zebra_daemon)
# Generate empty helper string which can be ammended to FRR commands, it
@@ -220,20 +219,18 @@ def apply(ospf):
vrf = ' vrf ' + ospf['vrf']
frr_cfg.load_configuration(ospf_daemon)
- frr_cfg.modify_section(f'^router ospf{vrf}$', '')
+ frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True)
for key in ['interface', 'interface_removed']:
if key not in ospf:
continue
for interface in ospf[key]:
- frr_cfg.modify_section(f'^interface {interface}{vrf}$', '')
+ frr_cfg.modify_section(f'^interface {interface}{vrf}', stop_pattern='^exit', remove_stop_mark=True)
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospf['frr_ospfd_config'])
+ if 'frr_ospfd_config' in ospf:
+ frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config'])
frr_cfg.commit_configuration(ospf_daemon)
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
-
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py
index 536ffa690..f8e733ba5 100755
--- a/src/conf_mode/protocols_ospfv3.py
+++ b/src/conf_mode/protocols_ospfv3.py
@@ -17,32 +17,80 @@
import os
from sys import exit
+from sys import argv
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import node_changed
from vyos.configverify import verify_common_route_maps
+from vyos.configverify import verify_route_map
+from vyos.configverify import verify_interface_exists
from vyos.template import render_to_string
from vyos.ifconfig import Interface
+from vyos.util import dict_search
+from vyos.util import get_interface_config
from vyos.xml import defaults
from vyos import ConfigError
from vyos import frr
from vyos import airbag
airbag.enable()
-frr_daemon = 'ospf6d'
-
def get_config(config=None):
if config:
conf = config
else:
conf = Config()
- base = ['protocols', 'ospfv3']
+
+ vrf = None
+ if len(argv) > 1:
+ vrf = argv[1]
+
+ base_path = ['protocols', 'ospfv3']
+
+ # eqivalent of the C foo ? 'a' : 'b' statement
+ base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path
ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ # Assign the name of our VRF context. This MUST be done before the return
+ # statement below, else on deletion we will delete the default instance
+ # instead of the VRF instance.
+ if vrf: ospfv3['vrf'] = vrf
+
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ ospfv3['interface_removed'] = list(interfaces_removed)
+
# Bail out early if configuration tree does not exist
if not conf.exists(base):
+ ospfv3.update({'deleted' : ''})
return ospfv3
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ # XXX: Note that we can not call defaults(base), as defaults does not work
+ # on an instance of a tag node. As we use the exact same CLI definition for
+ # both the non-vrf and vrf version this is absolutely safe!
+ default_values = defaults(base_path)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: default-information
+ # originate comes with a default metric-type of 2, which will enable the
+ # entire default-information originate tree, even when not set via CLI so we
+ # need to check this first and probably drop that key.
+ if dict_search('default_information.originate', ospfv3) is None:
+ del default_values['default_information']
+
+ # XXX: T2665: we currently have no nice way for defaults under tag nodes,
+ # clean them out and add them manually :(
+ del default_values['interface']
+
+ # merge in remaining default values
+ ospfv3 = dict_merge(default_values, ospfv3)
+
# We also need some additional information from the config, prefix-lists
# and route-maps for instance. They will be used in verify().
#
@@ -60,34 +108,68 @@ def verify(ospfv3):
verify_common_route_maps(ospfv3)
+ # As we can have a default-information route-map, we need to validate it!
+ route_map_name = dict_search('default_information.originate.route_map', ospfv3)
+ if route_map_name: verify_route_map(route_map_name, ospfv3)
+
+ if 'area' in ospfv3:
+ for area, area_config in ospfv3['area'].items():
+ if 'area_type' in area_config:
+ if len(area_config['area_type']) > 1:
+ raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!')
+
if 'interface' in ospfv3:
- for ifname, if_config in ospfv3['interface'].items():
- if 'ifmtu' in if_config:
- mtu = Interface(ifname).get_mtu()
- if int(if_config['ifmtu']) > int(mtu):
+ for interface, interface_config in ospfv3['interface'].items():
+ verify_interface_exists(interface)
+ if 'ifmtu' in interface_config:
+ mtu = Interface(interface).get_mtu()
+ if int(interface_config['ifmtu']) > int(mtu):
raise ConfigError(f'OSPFv3 ifmtu can not exceed physical MTU of "{mtu}"')
+ # If interface specific options are set, we must ensure that the
+ # interface is bound to our requesting VRF. Due to the VyOS
+ # priorities the interface is bound to the VRF after creation of
+ # the VRF itself, and before any routing protocol is configured.
+ if 'vrf' in ospfv3:
+ vrf = ospfv3['vrf']
+ tmp = get_interface_config(interface)
+ if 'master' not in tmp or tmp['master'] != vrf:
+ raise ConfigError(f'Interface {interface} is not a member of VRF {vrf}!')
+
return None
def generate(ospfv3):
- if not ospfv3:
- ospfv3['new_frr_config'] = ''
+ if not ospfv3 or 'deleted' in ospfv3:
return None
ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.tmpl', ospfv3)
return None
def apply(ospfv3):
+ ospf6_daemon = 'ospf6d'
+
# Save original configuration prior to starting any commit actions
frr_cfg = frr.FRRConfig()
- frr_cfg.load_configuration(frr_daemon)
- frr_cfg.modify_section(r'^interface \S+', '')
- frr_cfg.modify_section('^router ospf6$', '')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ospfv3['new_frr_config'])
- frr_cfg.commit_configuration(frr_daemon)
-
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
+
+ # Generate empty helper string which can be ammended to FRR commands, it
+ # will be either empty (default VRF) or contain the "vrf <name" statement
+ vrf = ''
+ if 'vrf' in ospfv3:
+ vrf = ' vrf ' + ospfv3['vrf']
+
+ frr_cfg.load_configuration(ospf6_daemon)
+ frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True)
+
+ for key in ['interface', 'interface_removed']:
+ if key not in ospfv3:
+ continue
+ for interface in ospfv3[key]:
+ frr_cfg.modify_section(f'^interface {interface}{vrf}', stop_pattern='^exit', remove_stop_mark=True)
+
+ if 'new_frr_config' in ospfv3:
+ frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config'])
+
+ frr_cfg.commit_configuration(ospf6_daemon)
return None
diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py
index e56eb1f56..300f56489 100755
--- a/src/conf_mode/protocols_rip.py
+++ b/src/conf_mode/protocols_rip.py
@@ -20,6 +20,7 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import node_changed
from vyos.configverify import verify_common_route_maps
from vyos.configverify import verify_access_list
from vyos.configverify import verify_prefix_list
@@ -39,8 +40,17 @@ def get_config(config=None):
base = ['protocols', 'rip']
rip = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ # FRR has VRF support for different routing daemons. As interfaces belong
+ # to VRFs - or the global VRF, we need to check for changed interfaces so
+ # that they will be properly rendered for the FRR config. Also this eases
+ # removal of interfaces from the running configuration.
+ interfaces_removed = node_changed(conf, base + ['interface'])
+ if interfaces_removed:
+ rip['interface_removed'] = list(interfaces_removed)
+
# Bail out early if configuration tree does not exist
if not conf.exists(base):
+ rip.update({'deleted' : ''})
return rip
# We have gathered the dict representation of the CLI, but there are default
@@ -89,12 +99,10 @@ def verify(rip):
f'with "split-horizon disable" for "{interface}"!')
def generate(rip):
- if not rip:
- rip['new_frr_config'] = ''
+ if not rip or 'deleted' in rip:
return None
- rip['new_frr_config'] = render_to_string('frr/rip.frr.tmpl', rip)
-
+ rip['new_frr_config'] = render_to_string('frr/ripd.frr.tmpl', rip)
return None
def apply(rip):
@@ -106,19 +114,22 @@ def apply(rip):
# The route-map used for the FIB (zebra) is part of the zebra daemon
frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'^ip protocol rip route-map [-a-zA-Z0-9.]+$', '')
+ frr_cfg.modify_section('^ip protocol rip route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
frr_cfg.commit_configuration(zebra_daemon)
frr_cfg.load_configuration(rip_daemon)
- frr_cfg.modify_section(r'key chain \S+', '')
- frr_cfg.modify_section(r'interface \S+', '')
- frr_cfg.modify_section('^router rip$', '')
+ frr_cfg.modify_section('^key chain \S+', stop_pattern='^exit', remove_stop_mark=True)
+ frr_cfg.modify_section('^router rip', stop_pattern='^exit', remove_stop_mark=True)
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', rip['new_frr_config'])
- frr_cfg.commit_configuration(rip_daemon)
+ for key in ['interface', 'interface_removed']:
+ if key not in rip:
+ continue
+ for interface in rip[key]:
+ frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
+ if 'new_frr_config' in rip:
+ frr_cfg.add_before(frr.default_add_before, rip['new_frr_config'])
+ frr_cfg.commit_configuration(rip_daemon)
return None
diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py
index aaec5dacb..d9b8c0b30 100755
--- a/src/conf_mode/protocols_ripng.py
+++ b/src/conf_mode/protocols_ripng.py
@@ -31,8 +31,6 @@ from vyos import frr
from vyos import airbag
airbag.enable()
-frr_daemon = 'ripngd'
-
def get_config(config=None):
if config:
conf = config
@@ -95,21 +93,28 @@ def generate(ripng):
ripng['new_frr_config'] = ''
return None
- ripng['new_frr_config'] = render_to_string('frr/ripng.frr.tmpl', ripng)
+ ripng['new_frr_config'] = render_to_string('frr/ripngd.frr.tmpl', ripng)
return None
def apply(ripng):
+ ripng_daemon = 'ripngd'
+ zebra_daemon = 'zebra'
+
# Save original configuration prior to starting any commit actions
frr_cfg = frr.FRRConfig()
- frr_cfg.load_configuration(frr_daemon)
- frr_cfg.modify_section(r'key chain \S+', '')
- frr_cfg.modify_section(r'interface \S+', '')
- frr_cfg.modify_section('router ripng', '')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', ripng['new_frr_config'])
- frr_cfg.commit_configuration(frr_daemon)
-
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
+
+ # The route-map used for the FIB (zebra) is part of the zebra daemon
+ frr_cfg.load_configuration(zebra_daemon)
+ frr_cfg.modify_section('^ipv6 protocol ripng route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)')
+ frr_cfg.commit_configuration(zebra_daemon)
+
+ frr_cfg.load_configuration(ripng_daemon)
+ frr_cfg.modify_section('key chain \S+', stop_pattern='^exit', remove_stop_mark=True)
+ frr_cfg.modify_section('interface \S+', stop_pattern='^exit', remove_stop_mark=True)
+ frr_cfg.modify_section('^router ripng', stop_pattern='^exit', remove_stop_mark=True)
+ if 'new_frr_config' in ripng:
+ frr_cfg.add_before(frr.default_add_before, ripng['new_frr_config'])
+ frr_cfg.commit_configuration(ripng_daemon)
return None
diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py
index 947c8ab7a..51ad0d315 100755
--- a/src/conf_mode/protocols_rpki.py
+++ b/src/conf_mode/protocols_rpki.py
@@ -28,8 +28,6 @@ from vyos import frr
from vyos import airbag
airbag.enable()
-frr_daemon = 'bgpd'
-
def get_config(config=None):
if config:
conf = config
@@ -38,7 +36,9 @@ def get_config(config=None):
base = ['protocols', 'rpki']
rpki = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ # Bail out early if configuration tree does not exist
if not conf.exists(base):
+ rpki.update({'deleted' : ''})
return rpki
# We have gathered the dict representation of the CLI, but there are default
@@ -79,17 +79,22 @@ def verify(rpki):
return None
def generate(rpki):
+ if not rpki:
+ return
rpki['new_frr_config'] = render_to_string('frr/rpki.frr.tmpl', rpki)
return None
def apply(rpki):
+ bgp_daemon = 'bgpd'
+
# Save original configuration prior to starting any commit actions
frr_cfg = frr.FRRConfig()
- frr_cfg.load_configuration(frr_daemon)
- frr_cfg.modify_section('rpki', '')
- frr_cfg.add_before(r'(ip prefix-list .*|route-map .*|line vty)', rpki['new_frr_config'])
- frr_cfg.commit_configuration(frr_daemon)
+ frr_cfg.load_configuration(bgp_daemon)
+ frr_cfg.modify_section('^rpki', stop_pattern='^exit', remove_stop_mark=True)
+ if 'new_frr_config' in rpki:
+ frr_cfg.add_before(frr.default_add_before, rpki['new_frr_config'])
+ frr_cfg.commit_configuration(bgp_daemon)
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py
index 338247e30..c1e427b16 100755
--- a/src/conf_mode/protocols_static.py
+++ b/src/conf_mode/protocols_static.py
@@ -21,6 +21,7 @@ from sys import argv
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import get_dhcp_interfaces
from vyos.configverify import verify_common_route_maps
from vyos.configverify import verify_vrf
from vyos.template import render_to_string
@@ -56,6 +57,10 @@ def get_config(config=None):
# Merge policy dict into "regular" config dict
static = dict_merge(tmp, static)
+ # T3680 - get a list of all interfaces currently configured to use DHCP
+ tmp = get_dhcp_interfaces(conf, vrf)
+ if tmp: static['dhcp'] = tmp
+
return static
def verify(static):
@@ -80,7 +85,9 @@ def verify(static):
return None
def generate(static):
- static['new_frr_config'] = render_to_string('frr/static.frr.tmpl', static)
+ if not static:
+ return None
+ static['new_frr_config'] = render_to_string('frr/staticd.frr.tmpl', static)
return None
def apply(static):
@@ -92,24 +99,21 @@ def apply(static):
# The route-map used for the FIB (zebra) is part of the zebra daemon
frr_cfg.load_configuration(zebra_daemon)
- frr_cfg.modify_section(r'^ip protocol static route-map [-a-zA-Z0-9.]+$', '')
+ frr_cfg.modify_section(r'^ip protocol static route-map [-a-zA-Z0-9.]+', '')
frr_cfg.commit_configuration(zebra_daemon)
-
frr_cfg.load_configuration(static_daemon)
if 'vrf' in static:
vrf = static['vrf']
- frr_cfg.modify_section(f'^vrf {vrf}$', '')
+ frr_cfg.modify_section(f'^vrf {vrf}', stop_pattern='^exit', remove_stop_mark=True)
else:
- frr_cfg.modify_section(r'^ip route .*', '')
- frr_cfg.modify_section(r'^ipv6 route .*', '')
+ frr_cfg.modify_section(r'^ip route .*')
+ frr_cfg.modify_section(r'^ipv6 route .*')
- frr_cfg.add_before(r'(interface .*|line vty)', static['new_frr_config'])
+ if 'new_frr_config' in static:
+ frr_cfg.add_before(frr.default_add_before, static['new_frr_config'])
frr_cfg.commit_configuration(static_daemon)
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
-
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/service_mdns-repeater.py b/src/conf_mode/service_mdns-repeater.py
index c920920ed..d31a0c49e 100755
--- a/src/conf_mode/service_mdns-repeater.py
+++ b/src/conf_mode/service_mdns-repeater.py
@@ -28,7 +28,7 @@ from vyos import ConfigError
from vyos import airbag
airbag.enable()
-config_file = r'/etc/default/mdns-repeater'
+config_file = '/run/avahi-daemon/avahi-daemon.conf'
vrrp_running_file = '/run/mdns_vrrp_active'
def get_config(config=None):
@@ -92,12 +92,12 @@ def generate(mdns):
if len(mdns['interface']) < 2:
return None
- render(config_file, 'mdns-repeater/mdns-repeater.tmpl', mdns)
+ render(config_file, 'mdns-repeater/avahi-daemon.tmpl', mdns)
return None
def apply(mdns):
if not mdns or 'disable' in mdns:
- call('systemctl stop mdns-repeater.service')
+ call('systemctl stop avahi-daemon.service')
if os.path.exists(config_file):
os.unlink(config_file)
@@ -106,16 +106,16 @@ def apply(mdns):
else:
if 'vrrp_disable' not in mdns and os.path.exists(vrrp_running_file):
os.unlink(vrrp_running_file)
-
+
if mdns['vrrp_exists'] and 'vrrp_disable' in mdns:
if not os.path.exists(vrrp_running_file):
os.mknod(vrrp_running_file) # vrrp script looks for this file to update mdns repeater
if len(mdns['interface']) < 2:
- call('systemctl stop mdns-repeater.service')
+ call('systemctl stop avahi-daemon.service')
return None
- call('systemctl restart mdns-repeater.service')
+ call('systemctl restart avahi-daemon.service')
return None
diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py
index 9fbd531da..1f31d132d 100755
--- a/src/conf_mode/service_pppoe-server.py
+++ b/src/conf_mode/service_pppoe-server.py
@@ -24,8 +24,11 @@ from vyos.configverify import verify_accel_ppp_base_service
from vyos.template import render
from vyos.util import call
from vyos.util import dict_search
+from vyos.util import get_interface_config
from vyos import ConfigError
from vyos import airbag
+from vyos.range_regex import range_to_regex
+
airbag.enable()
pppoe_conf = r'/run/accel-pppd/pppoe.conf'
@@ -56,6 +59,11 @@ def verify(pppoe):
if 'interface' not in pppoe:
raise ConfigError('At least one listen interface must be defined!')
+ # Check is interface exists in the system
+ for iface in pppoe['interface']:
+ if not get_interface_config(iface):
+ raise ConfigError(f'Interface {iface} does not exist!')
+
# local ippool and gateway settings config checks
if not (dict_search('client_ip_pool.subnet', pppoe) or
(dict_search('client_ip_pool.start', pppoe) and
@@ -73,6 +81,13 @@ def generate(pppoe):
if not pppoe:
return None
+ # Generate special regex for dynamic interfaces
+ for iface in pppoe['interface']:
+ if 'vlan_range' in pppoe['interface'][iface]:
+ pppoe['interface'][iface]['regex'] = []
+ for vlan_range in pppoe['interface'][iface]['vlan_range']:
+ pppoe['interface'][iface]['regex'].append(range_to_regex(vlan_range))
+
render(pppoe_conf, 'accel-ppp/pppoe.config.tmpl', pppoe)
if dict_search('authentication.mode', pppoe) == 'local':
diff --git a/src/conf_mode/snmp.py b/src/conf_mode/snmp.py
index 23e45a5b7..8ce48780b 100755
--- a/src/conf_mode/snmp.py
+++ b/src/conf_mode/snmp.py
@@ -19,70 +19,49 @@ import os
from sys import exit
from vyos.config import Config
+from vyos.configdict import dict_merge
from vyos.configverify import verify_vrf
-from vyos.snmpv3_hashgen import plaintext_to_md5, plaintext_to_sha1, random
+from vyos.snmpv3_hashgen import plaintext_to_md5
+from vyos.snmpv3_hashgen import plaintext_to_sha1
+from vyos.snmpv3_hashgen import random
from vyos.template import render
-from vyos.template import is_ipv4
-from vyos.util import call, chmod_755
+from vyos.util import call
+from vyos.util import chmod_755
+from vyos.util import dict_search
from vyos.validate import is_addr_assigned
from vyos.version import get_version_data
-from vyos import ConfigError, airbag
+from vyos.xml import defaults
+from vyos import ConfigError
+from vyos import airbag
airbag.enable()
config_file_client = r'/etc/snmp/snmp.conf'
config_file_daemon = r'/etc/snmp/snmpd.conf'
config_file_access = r'/usr/share/snmp/snmpd.conf'
config_file_user = r'/var/lib/snmp/snmpd.conf'
-default_script_dir = r'/config/user-data/'
systemd_override = r'/etc/systemd/system/snmpd.service.d/override.conf'
+systemd_service = 'snmpd.service'
-# SNMP OIDs used to mark auth/priv type
-OIDs = {
- 'md5' : '.1.3.6.1.6.3.10.1.1.2',
- 'sha' : '.1.3.6.1.6.3.10.1.1.3',
- 'aes' : '.1.3.6.1.6.3.10.1.2.4',
- 'des' : '.1.3.6.1.6.3.10.1.2.2',
- 'none': '.1.3.6.1.6.3.10.1.2.1'
-}
-
-default_config_data = {
- 'listen_on': [],
- 'listen_address': [],
- 'ipv6_enabled': 'True',
- 'communities': [],
- 'smux_peers': [],
- 'location' : '',
- 'description' : '',
- 'contact' : '',
- 'route_table': 'False',
- 'trap_source': '',
- 'trap_targets': [],
- 'vyos_user': '',
- 'vyos_user_pass': '',
- 'version': '',
- 'v3_enabled': 'False',
- 'v3_engineid': '',
- 'v3_groups': [],
- 'v3_traps': [],
- 'v3_users': [],
- 'v3_views': [],
- 'script_ext': []
-}
-
-def rmfile(file):
- if os.path.isfile(file):
- os.unlink(file)
-
-def get_config():
- snmp = default_config_data
- conf = Config()
- if not conf.exists('service snmp'):
- return None
+def get_config(config=None):
+ if config:
+ conf = config
else:
- if conf.exists('system ipv6 disable'):
- snmp['ipv6_enabled'] = False
+ conf = Config()
+ base = ['service', 'snmp']
+
+ snmp = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, no_tag_node_value_mangle=True)
+ if not conf.exists(base):
+ snmp.update({'deleted' : ''})
+
+ if conf.exists(['service', 'lldp', 'snmp', 'enable']):
+ snmp.update({'lldp_snmp' : ''})
- conf.set_level('service snmp')
+ if conf.exists(['system', 'ipv6', 'disable']):
+ snmp.update({'ipv6_disabled' : ''})
+
+ if 'deleted' in snmp:
+ return snmp
version_data = get_version_data()
snmp['version'] = version_data['version']
@@ -91,461 +70,207 @@ def get_config():
snmp['vyos_user'] = 'vyos' + random(8)
snmp['vyos_user_pass'] = random(16)
- if conf.exists('community'):
- for name in conf.list_nodes('community'):
- community = {
- 'name': name,
- 'authorization': 'ro',
- 'network_v4': [],
- 'network_v6': [],
- 'has_source' : False
- }
-
- if conf.exists('community {0} authorization'.format(name)):
- community['authorization'] = conf.return_value('community {0} authorization'.format(name))
-
- # Subnet of SNMP client(s) allowed to contact system
- if conf.exists('community {0} network'.format(name)):
- for addr in conf.return_values('community {0} network'.format(name)):
- if is_ipv4(addr):
- community['network_v4'].append(addr)
- else:
- community['network_v6'].append(addr)
-
- # IP address of SNMP client allowed to contact system
- if conf.exists('community {0} client'.format(name)):
- for addr in conf.return_values('community {0} client'.format(name)):
- if is_ipv4(addr):
- community['network_v4'].append(addr)
- else:
- community['network_v6'].append(addr)
-
- if (len(community['network_v4']) > 0) or (len(community['network_v6']) > 0):
- community['has_source'] = True
-
- snmp['communities'].append(community)
-
- if conf.exists('contact'):
- snmp['contact'] = conf.return_value('contact')
-
- if conf.exists('description'):
- snmp['description'] = conf.return_value('description')
-
- if conf.exists('listen-address'):
- for addr in conf.list_nodes('listen-address'):
- port = '161'
- if conf.exists('listen-address {0} port'.format(addr)):
- port = conf.return_value('listen-address {0} port'.format(addr))
-
- snmp['listen_address'].append((addr, port))
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = defaults(base)
+
+ # We can not merge defaults for tagNodes - those need to be blended in
+ # per tagNode instance
+ if 'listen_address' in default_values:
+ del default_values['listen_address']
+ if 'community' in default_values:
+ del default_values['community']
+ if 'trap_target' in default_values:
+ del default_values['trap_target']
+ if 'v3' in default_values:
+ del default_values['v3']
+ snmp = dict_merge(default_values, snmp)
+
+ if 'listen_address' in snmp:
+ default_values = defaults(base + ['listen-address'])
+ for address in snmp['listen_address']:
+ snmp['listen_address'][address] = dict_merge(
+ default_values, snmp['listen_address'][address])
# Always listen on localhost if an explicit address has been configured
# This is a safety measure to not end up with invalid listen addresses
# that are not configured on this system. See https://phabricator.vyos.net/T850
- if not '127.0.0.1' in conf.list_nodes('listen-address'):
- snmp['listen_address'].append(('127.0.0.1', '161'))
-
- if not '::1' in conf.list_nodes('listen-address'):
- snmp['listen_address'].append(('::1', '161'))
-
- if conf.exists('location'):
- snmp['location'] = conf.return_value('location')
-
- if conf.exists('smux-peer'):
- snmp['smux_peers'] = conf.return_values('smux-peer')
-
- if conf.exists('trap-source'):
- snmp['trap_source'] = conf.return_value('trap-source')
-
- if conf.exists('trap-target'):
- for target in conf.list_nodes('trap-target'):
- trap_tgt = {
- 'target': target,
- 'community': '',
- 'port': ''
- }
-
- if conf.exists('trap-target {0} community'.format(target)):
- trap_tgt['community'] = conf.return_value('trap-target {0} community'.format(target))
-
- if conf.exists('trap-target {0} port'.format(target)):
- trap_tgt['port'] = conf.return_value('trap-target {0} port'.format(target))
-
- snmp['trap_targets'].append(trap_tgt)
-
- if conf.exists('script-extensions'):
- for extname in conf.list_nodes('script-extensions extension-name'):
- conf_script = conf.return_value('script-extensions extension-name {} script'.format(extname))
- # if script has not absolute path, use pre configured path
- if "/" not in conf_script:
- conf_script = default_script_dir + conf_script
-
- extension = {
- 'name': extname,
- 'script' : conf_script
- }
-
- snmp['script_ext'].append(extension)
-
- if conf.exists('oid-enable route-table'):
- snmp['route_table'] = True
-
- if conf.exists('vrf'):
- # Append key to dict but don't place it in the default dictionary.
- # This is required to make the override.conf.tmpl work until we
- # migrate to get_config_dict().
- snmp['vrf'] = conf.return_value('vrf')
-
-
- #########################################################################
- # ____ _ _ __ __ ____ _____ #
- # / ___|| \ | | \/ | _ \ __ _|___ / #
- # \___ \| \| | |\/| | |_) | \ \ / / |_ \ #
- # ___) | |\ | | | | __/ \ V / ___) | #
- # |____/|_| \_|_| |_|_| \_/ |____/ #
- # #
- # now take care about the fancy SNMP v3 stuff, or bail out eraly #
- #########################################################################
- if not conf.exists('v3'):
- return snmp
- else:
- snmp['v3_enabled'] = True
-
- # 'set service snmp v3 engineid'
- if conf.exists('v3 engineid'):
- snmp['v3_engineid'] = conf.return_value('v3 engineid')
-
- # 'set service snmp v3 group'
- if conf.exists('v3 group'):
- for group in conf.list_nodes('v3 group'):
- v3_group = {
- 'name': group,
- 'mode': 'ro',
- 'seclevel': 'auth',
- 'view': ''
- }
-
- if conf.exists('v3 group {0} mode'.format(group)):
- v3_group['mode'] = conf.return_value('v3 group {0} mode'.format(group))
-
- if conf.exists('v3 group {0} seclevel'.format(group)):
- v3_group['seclevel'] = conf.return_value('v3 group {0} seclevel'.format(group))
-
- if conf.exists('v3 group {0} view'.format(group)):
- v3_group['view'] = conf.return_value('v3 group {0} view'.format(group))
-
- snmp['v3_groups'].append(v3_group)
-
- # 'set service snmp v3 trap-target'
- if conf.exists('v3 trap-target'):
- for trap in conf.list_nodes('v3 trap-target'):
- trap_cfg = {
- 'ipAddr': trap,
- 'secName': '',
- 'authProtocol': 'md5',
- 'authPassword': '',
- 'authMasterKey': '',
- 'privProtocol': 'des',
- 'privPassword': '',
- 'privMasterKey': '',
- 'ipProto': 'udp',
- 'ipPort': '162',
- 'type': '',
- 'secLevel': 'noAuthNoPriv'
- }
-
- if conf.exists('v3 trap-target {0} user'.format(trap)):
- # Set the securityName used for authenticated SNMPv3 messages.
- trap_cfg['secName'] = conf.return_value('v3 trap-target {0} user'.format(trap))
-
- if conf.exists('v3 trap-target {0} auth type'.format(trap)):
- # Set the authentication protocol (MD5 or SHA) used for authenticated SNMPv3 messages
- # cmdline option '-a'
- trap_cfg['authProtocol'] = conf.return_value('v3 trap-target {0} auth type'.format(trap))
-
- if conf.exists('v3 trap-target {0} auth plaintext-password'.format(trap)):
- # Set the authentication pass phrase used for authenticated SNMPv3 messages.
- # cmdline option '-A'
- trap_cfg['authPassword'] = conf.return_value('v3 trap-target {0} auth plaintext-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} auth encrypted-password'.format(trap)):
- # Sets the keys to be used for SNMPv3 transactions. These options allow you to set the master authentication keys.
- # cmdline option '-3m'
- trap_cfg['authMasterKey'] = conf.return_value('v3 trap-target {0} auth encrypted-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} privacy type'.format(trap)):
- # Set the privacy protocol (DES or AES) used for encrypted SNMPv3 messages.
- # cmdline option '-x'
- trap_cfg['privProtocol'] = conf.return_value('v3 trap-target {0} privacy type'.format(trap))
-
- if conf.exists('v3 trap-target {0} privacy plaintext-password'.format(trap)):
- # Set the privacy pass phrase used for encrypted SNMPv3 messages.
- # cmdline option '-X'
- trap_cfg['privPassword'] = conf.return_value('v3 trap-target {0} privacy plaintext-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} privacy encrypted-password'.format(trap)):
- # Sets the keys to be used for SNMPv3 transactions. These options allow you to set the master encryption keys.
- # cmdline option '-3M'
- trap_cfg['privMasterKey'] = conf.return_value('v3 trap-target {0} privacy encrypted-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} protocol'.format(trap)):
- trap_cfg['ipProto'] = conf.return_value('v3 trap-target {0} protocol'.format(trap))
-
- if conf.exists('v3 trap-target {0} port'.format(trap)):
- trap_cfg['ipPort'] = conf.return_value('v3 trap-target {0} port'.format(trap))
-
- if conf.exists('v3 trap-target {0} type'.format(trap)):
- trap_cfg['type'] = conf.return_value('v3 trap-target {0} type'.format(trap))
-
- # Determine securityLevel used for SNMPv3 messages (noAuthNoPriv|authNoPriv|authPriv).
- # Appropriate pass phrase(s) must provided when using any level higher than noAuthNoPriv.
- if trap_cfg['authPassword'] or trap_cfg['authMasterKey']:
- if trap_cfg['privProtocol'] or trap_cfg['privPassword']:
- trap_cfg['secLevel'] = 'authPriv'
- else:
- trap_cfg['secLevel'] = 'authNoPriv'
-
- snmp['v3_traps'].append(trap_cfg)
-
- # 'set service snmp v3 user'
- if conf.exists('v3 user'):
- for user in conf.list_nodes('v3 user'):
- user_cfg = {
- 'name': user,
- 'authMasterKey': '',
- 'authPassword': '',
- 'authProtocol': 'md5',
- 'authOID': 'none',
- 'group': '',
- 'mode': 'ro',
- 'privMasterKey': '',
- 'privPassword': '',
- 'privOID': '',
- 'privProtocol': 'des'
- }
-
- # v3 user {0} auth
- if conf.exists('v3 user {0} auth encrypted-password'.format(user)):
- user_cfg['authMasterKey'] = conf.return_value('v3 user {0} auth encrypted-password'.format(user))
-
- if conf.exists('v3 user {0} auth plaintext-password'.format(user)):
- user_cfg['authPassword'] = conf.return_value('v3 user {0} auth plaintext-password'.format(user))
-
- # load default value
- type = user_cfg['authProtocol']
- if conf.exists('v3 user {0} auth type'.format(user)):
- type = conf.return_value('v3 user {0} auth type'.format(user))
-
- # (re-)update with either default value or value from CLI
- user_cfg['authProtocol'] = type
- user_cfg['authOID'] = OIDs[type]
-
- # v3 user {0} group
- if conf.exists('v3 user {0} group'.format(user)):
- user_cfg['group'] = conf.return_value('v3 user {0} group'.format(user))
-
- # v3 user {0} mode
- if conf.exists('v3 user {0} mode'.format(user)):
- user_cfg['mode'] = conf.return_value('v3 user {0} mode'.format(user))
-
- # v3 user {0} privacy
- if conf.exists('v3 user {0} privacy encrypted-password'.format(user)):
- user_cfg['privMasterKey'] = conf.return_value('v3 user {0} privacy encrypted-password'.format(user))
-
- if conf.exists('v3 user {0} privacy plaintext-password'.format(user)):
- user_cfg['privPassword'] = conf.return_value('v3 user {0} privacy plaintext-password'.format(user))
-
- # load default value
- type = user_cfg['privProtocol']
- if conf.exists('v3 user {0} privacy type'.format(user)):
- type = conf.return_value('v3 user {0} privacy type'.format(user))
-
- # (re-)update with either default value or value from CLI
- user_cfg['privProtocol'] = type
- user_cfg['privOID'] = OIDs[type]
-
- snmp['v3_users'].append(user_cfg)
-
- # 'set service snmp v3 view'
- if conf.exists('v3 view'):
- for view in conf.list_nodes('v3 view'):
- view_cfg = {
- 'name': view,
- 'oids': []
- }
-
- if conf.exists('v3 view {0} oid'.format(view)):
- for oid in conf.list_nodes('v3 view {0} oid'.format(view)):
- oid_cfg = {
- 'oid': oid
- }
- view_cfg['oids'].append(oid_cfg)
- snmp['v3_views'].append(view_cfg)
+ if '127.0.0.1' not in snmp['listen_address']:
+ tmp = {'127.0.0.1': {'port': '161'}}
+ snmp['listen_address'] = dict_merge(tmp, snmp['listen_address'])
+
+ if '::1' not in snmp['listen_address']:
+ if 'ipv6_disabled' not in snmp:
+ tmp = {'::1': {'port': '161'}}
+ snmp['listen_address'] = dict_merge(tmp, snmp['listen_address'])
+
+ if 'community' in snmp:
+ default_values = defaults(base + ['community'])
+ for community in snmp['community']:
+ snmp['community'][community] = dict_merge(
+ default_values, snmp['community'][community])
+
+ if 'trap_target' in snmp:
+ default_values = defaults(base + ['trap-target'])
+ for trap in snmp['trap_target']:
+ snmp['trap_target'][trap] = dict_merge(
+ default_values, snmp['trap_target'][trap])
+
+ if 'v3' in snmp:
+ default_values = defaults(base + ['v3'])
+ # tagNodes need to be merged in individually later on
+ for tmp in ['user', 'group', 'trap_target']:
+ del default_values[tmp]
+ snmp['v3'] = dict_merge(default_values, snmp['v3'])
+
+ for user_group in ['user', 'group']:
+ if user_group in snmp['v3']:
+ default_values = defaults(base + ['v3', user_group])
+ for tmp in snmp['v3'][user_group]:
+ snmp['v3'][user_group][tmp] = dict_merge(
+ default_values, snmp['v3'][user_group][tmp])
+
+ if 'trap_target' in snmp['v3']:
+ default_values = defaults(base + ['v3', 'trap-target'])
+ for trap in snmp['v3']['trap_target']:
+ snmp['v3']['trap_target'][trap] = dict_merge(
+ default_values, snmp['v3']['trap_target'][trap])
return snmp
def verify(snmp):
- if snmp is None:
- # we can not delete SNMP when LLDP is configured with SNMP
- conf = Config()
- if conf.exists('service lldp snmp enable'):
- raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!')
-
+ if not snmp:
return None
+ if {'deleted', 'lldp_snmp'} <= set(snmp):
+ raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!')
+
### check if the configured script actually exist
- if snmp['script_ext']:
- for ext in snmp['script_ext']:
- if not os.path.isfile(ext['script']):
- print ("WARNING: script: {} doesn't exist".format(ext['script']))
+ if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']:
+ for extension, extension_opt in snmp['script_extensions']['extension_name'].items():
+ if 'script' not in extension_opt:
+ raise ConfigError(f'Script extension "{extension}" requires an actual script to be configured!')
+
+ tmp = extension_opt['script']
+ if not os.path.isfile(tmp):
+ print(f'WARNING: script "{tmp}" does not exist!')
else:
- chmod_755(ext['script'])
-
- for listen in snmp['listen_address']:
- addr = listen[0]
- port = listen[1]
-
- if is_ipv4(addr):
- # example: udp:127.0.0.1:161
- listen = 'udp:' + addr + ':' + port
- elif snmp['ipv6_enabled']:
- # example: udp6:[::1]:161
- listen = 'udp6:' + '[' + addr + ']' + ':' + port
-
- # We only wan't to configure addresses that exist on the system.
- # Hint the user if they don't exist
- if is_addr_assigned(addr):
- snmp['listen_on'].append(listen)
- else:
- print('WARNING: SNMP listen address {0} not configured!'.format(addr))
+ chmod_755(extension_opt['script'])
+
+ if 'listen_address' in snmp:
+ for address in snmp['listen_address']:
+ # We only wan't to configure addresses that exist on the system.
+ # Hint the user if they don't exist
+ if not is_addr_assigned(address):
+ print(f'WARNING: SNMP listen address "{address}" not configured!')
+
+ if 'trap_target' in snmp:
+ for trap, trap_config in snmp['trap_target'].items():
+ if 'community' not in trap_config:
+ raise ConfigError(f'Trap target "{trap}" requires a community to be set!')
verify_vrf(snmp)
# bail out early if SNMP v3 is not configured
- if not snmp['v3_enabled']:
+ if 'v3' not in snmp:
return None
- if 'v3_groups' in snmp.keys():
- for group in snmp['v3_groups']:
- #
- # A view must exist prior to mapping it into a group
- #
- if 'view' in group.keys():
- error = True
- if 'v3_views' in snmp.keys():
- for view in snmp['v3_views']:
- if view['name'] == group['view']:
- error = False
- if error:
- raise ConfigError('You must create view "{0}" first'.format(group['view']))
- else:
- raise ConfigError('"view" must be specified')
-
- if not 'mode' in group.keys():
- raise ConfigError('"mode" must be specified')
-
- if not 'seclevel' in group.keys():
- raise ConfigError('"seclevel" must be specified')
-
- if 'v3_traps' in snmp.keys():
- for trap in snmp['v3_traps']:
- if trap['authPassword'] and trap['authMasterKey']:
- raise ConfigError('Must specify only one of encrypted-password/plaintext-key for trap auth')
-
- if trap['authPassword'] == '' and trap['authMasterKey'] == '':
- raise ConfigError('Must specify encrypted-password or plaintext-key for trap auth')
-
- if trap['privPassword'] and trap['privMasterKey']:
- raise ConfigError('Must specify only one of encrypted-password/plaintext-key for trap privacy')
+ if 'user' in snmp['v3']:
+ for user, user_config in snmp['v3']['user'].items():
+ if 'group' not in user_config:
+ raise ConfigError(f'Group membership required for user "{user}"!')
- if trap['privPassword'] == '' and trap['privMasterKey'] == '':
- raise ConfigError('Must specify encrypted-password or plaintext-key for trap privacy')
+ if 'plaintext_password' not in user_config['auth'] and 'encrypted_password' not in user_config['auth']:
+ raise ConfigError(f'Must specify authentication encrypted-password or plaintext-password for user "{user}"!')
- if not 'type' in trap.keys():
- raise ConfigError('v3 trap: "type" must be specified')
+ if 'plaintext_password' not in user_config['privacy'] and 'encrypted_password' not in user_config['privacy']:
+ raise ConfigError(f'Must specify privacy encrypted-password or plaintext-password for user "{user}"!')
- if not 'authPassword' and 'authMasterKey' in trap.keys():
- raise ConfigError('v3 trap: "auth" must be specified')
+ if 'group' in snmp['v3']:
+ for group, group_config in snmp['v3']['group'].items():
+ if 'seclevel' not in group_config:
+ raise ConfigError(f'Must configure "seclevel" for group "{group}"!')
+ if 'view' not in group_config:
+ raise ConfigError(f'Must configure "view" for group "{group}"!')
- if not 'authProtocol' in trap.keys():
- raise ConfigError('v3 trap: "protocol" must be specified')
+ # Check if 'view' exists
+ view = group_config['view']
+ if 'view' not in snmp['v3'] or view not in snmp['v3']['view']:
+ raise ConfigError(f'You must create view "{view}" first!')
- if not 'privPassword' and 'privMasterKey' in trap.keys():
- raise ConfigError('v3 trap: "user" must be specified')
+ if 'view' in snmp['v3']:
+ for view, view_config in snmp['v3']['view'].items():
+ if 'oid' not in view_config:
+ raise ConfigError(f'Must configure an "oid" for view "{view}"!')
- if 'v3_users' in snmp.keys():
- for user in snmp['v3_users']:
- #
- # Group must exist prior to mapping it into a group
- # seclevel will be extracted from group
- #
- if user['group']:
- error = True
- if 'v3_groups' in snmp.keys():
- for group in snmp['v3_groups']:
- if group['name'] == user['group']:
- seclevel = group['seclevel']
- error = False
+ if 'trap_target' in snmp['v3']:
+ for trap, trap_config in snmp['v3']['trap_target'].items():
+ if 'plaintext_password' not in trap_config['auth'] and 'encrypted_password' not in trap_config['auth']:
+ raise ConfigError(f'Must specify one of authentication encrypted-password or plaintext-password for trap "{trap}"!')
- if error:
- raise ConfigError('You must create group "{0}" first'.format(user['group']))
+ if {'plaintext_password', 'encrypted_password'} <= set(trap_config['auth']):
+ raise ConfigError(f'Can not specify both authentication encrypted-password and plaintext-password for trap "{trap}"!')
- # Depending on the configured security level the user has to provide additional info
- if (not user['authPassword'] and not user['authMasterKey']):
- raise ConfigError('Must specify encrypted-password or plaintext-key for user auth')
+ if 'plaintext_password' not in trap_config['privacy'] and 'encrypted_password' not in trap_config['privacy']:
+ raise ConfigError(f'Must specify one of privacy encrypted-password or plaintext-password for trap "{trap}"!')
- if user['privPassword'] == '' and user['privMasterKey'] == '':
- raise ConfigError('Must specify encrypted-password or plaintext-key for user privacy')
+ if {'plaintext_password', 'encrypted_password'} <= set(trap_config['privacy']):
+ raise ConfigError(f'Can not specify both privacy encrypted-password and plaintext-password for trap "{trap}"!')
- if user['mode'] == '':
- raise ConfigError('Must specify user mode ro/rw')
-
- if 'v3_views' in snmp.keys():
- for view in snmp['v3_views']:
- if not view['oids']:
- raise ConfigError('Must configure an oid')
+ if 'type' not in trap_config:
+ raise ConfigError('SNMP v3 trap "type" must be specified!')
return None
def generate(snmp):
+
#
# As we are manipulating the snmpd user database we have to stop it first!
# This is even save if service is going to be removed
- call('systemctl stop snmpd.service')
- config_files = [config_file_client, config_file_daemon, config_file_access,
- config_file_user, systemd_override]
+ call(f'systemctl stop {systemd_service}')
+ # Clean config files
+ config_files = [config_file_client, config_file_daemon,
+ config_file_access, config_file_user, systemd_override]
for file in config_files:
- rmfile(file)
+ if os.path.isfile(file):
+ os.unlink(file)
if not snmp:
return None
- if 'v3_users' in snmp.keys():
+ if 'v3' in snmp:
# net-snmp is now regenerating the configuration file in the background
# thus we need to re-open and re-read the file as the content changed.
# After that we can no read the encrypted password from the config and
# replace the CLI plaintext password with its encrypted version.
- os.environ["vyos_libexec_dir"] = "/usr/libexec/vyos"
+ os.environ['vyos_libexec_dir'] = '/usr/libexec/vyos'
- for user in snmp['v3_users']:
- if user['authProtocol'] == 'sha':
- hash = plaintext_to_sha1
- else:
- hash = plaintext_to_md5
+ if 'user' in snmp['v3']:
+ for user, user_config in snmp['v3']['user'].items():
+ if dict_search('auth.type', user_config) == 'sha':
+ hash = plaintext_to_sha1
+ else:
+ hash = plaintext_to_md5
+
+ if dict_search('auth.plaintext_password', user_config) is not None:
+ tmp = hash(dict_search('auth.plaintext_password', user_config),
+ dict_search('v3.engineid', snmp))
+
+ snmp['v3']['user'][user]['auth']['encrypted_password'] = tmp
+ del snmp['v3']['user'][user]['auth']['plaintext_password']
- if user['authPassword']:
- user['authMasterKey'] = hash(user['authPassword'], snmp['v3_engineid'])
- user['authPassword'] = ''
+ call(f'/opt/vyatta/sbin/my_set service snmp v3 user "{user}" auth encrypted-password "{tmp}" > /dev/null')
+ call(f'/opt/vyatta/sbin/my_delete service snmp v3 user "{user}" auth plaintext-password > /dev/null')
- call('/opt/vyatta/sbin/my_set service snmp v3 user "{name}" auth encrypted-password "{authMasterKey}" > /dev/null'.format(**user))
- call('/opt/vyatta/sbin/my_delete service snmp v3 user "{name}" auth plaintext-password > /dev/null'.format(**user))
+ if dict_search('privacy.plaintext_password', user_config) is not None:
+ tmp = hash(dict_search('privacy.plaintext_password', user_config),
+ dict_search('v3.engineid', snmp))
- if user['privPassword']:
- user['privMasterKey'] = hash(user['privPassword'], snmp['v3_engineid'])
- user['privPassword'] = ''
+ snmp['v3']['user'][user]['privacy']['encrypted_password'] = tmp
+ del snmp['v3']['user'][user]['privacy']['plaintext_password']
- call('/opt/vyatta/sbin/my_set service snmp v3 user "{name}" privacy encrypted-password "{privMasterKey}" > /dev/null'.format(**user))
- call('/opt/vyatta/sbin/my_delete service snmp v3 user "{name}" privacy plaintext-password > /dev/null'.format(**user))
+ call(f'/opt/vyatta/sbin/my_set service snmp v3 user "{user}" privacy encrypted-password "{tmp}" > /dev/null')
+ call(f'/opt/vyatta/sbin/my_delete service snmp v3 user "{user}" privacy plaintext-password > /dev/null')
# Write client config file
render(config_file_client, 'snmp/etc.snmp.conf.tmpl', snmp)
@@ -568,7 +293,7 @@ def apply(snmp):
return None
# start SNMP daemon
- call('systemctl restart snmpd.service')
+ call(f'systemctl restart {systemd_service}')
# Enable AgentX in FRR
call('vtysh -c "configure terminal" -c "agentx" >/dev/null')
diff --git a/src/conf_mode/system-login-banner.py b/src/conf_mode/system-login-banner.py
index a960a4da3..a521c9834 100755
--- a/src/conf_mode/system-login-banner.py
+++ b/src/conf_mode/system-login-banner.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2020 VyOS maintainers and contributors
+# Copyright (C) 2020-2021 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -15,34 +15,33 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sys import exit
+from copy import deepcopy
+
from vyos.config import Config
+from vyos.util import write_file
from vyos import ConfigError
-
from vyos import airbag
airbag.enable()
-motd="""
-The programs included with the Debian/VyOS GNU/Linux system are free software;
-the exact distribution terms for each program are described in the
-individual files in /usr/share/doc/*/copyright.
-
-Debian/VyOS GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent
-permitted by applicable law.
-
-"""
+try:
+ with open('/usr/share/vyos/default_motd') as f:
+ motd = f.read()
+except:
+ # Use an empty banner if the default banner file cannot be read
+ motd = "\n"
PRELOGIN_FILE = r'/etc/issue'
PRELOGIN_NET_FILE = r'/etc/issue.net'
POSTLOGIN_FILE = r'/etc/motd'
default_config_data = {
- 'issue': 'Welcome to VyOS - \\n \\l\n',
- 'issue_net': 'Welcome to VyOS\n',
+ 'issue': 'Welcome to VyOS - \\n \\l\n\n',
+ 'issue_net': '',
'motd': motd
}
def get_config(config=None):
- banner = default_config_data
+ banner = deepcopy(default_config_data)
if config:
conf = config
else:
@@ -91,14 +90,9 @@ def generate(banner):
pass
def apply(banner):
- with open(PRELOGIN_FILE, 'w') as f:
- f.write(banner['issue'])
-
- with open(PRELOGIN_NET_FILE, 'w') as f:
- f.write(banner['issue_net'])
-
- with open(POSTLOGIN_FILE, 'w') as f:
- f.write(banner['motd'])
+ write_file(PRELOGIN_FILE, banner['issue'])
+ write_file(PRELOGIN_NET_FILE, banner['issue_net'])
+ write_file(POSTLOGIN_FILE, banner['motd'])
return None
diff --git a/src/conf_mode/system-logs.py b/src/conf_mode/system-logs.py
new file mode 100755
index 000000000..e6296656d
--- /dev/null
+++ b/src/conf_mode/system-logs.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from sys import exit
+
+from vyos import ConfigError
+from vyos import airbag
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.logger import syslog
+from vyos.template import render
+from vyos.util import dict_search
+from vyos.xml import defaults
+airbag.enable()
+
+# path to logrotate configs
+logrotate_atop_file = '/etc/logrotate.d/vyos-atop'
+logrotate_rsyslog_file = '/etc/logrotate.d/vyos-rsyslog'
+
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['system', 'logs']
+ default_values = defaults(base)
+ logs_config = conf.get_config_dict(base,
+ key_mangling=('-', '_'),
+ get_first_key=True)
+ logs_config = dict_merge(default_values, logs_config)
+
+ return logs_config
+
+
+def verify(logs_config):
+ # Nothing to verify here
+ pass
+
+
+def generate(logs_config):
+ # get configuration for logrotate atop
+ logrotate_atop = dict_search('logrotate.atop', logs_config)
+ # generate new config file for atop
+ syslog.debug('Adding logrotate config for atop')
+ render(logrotate_atop_file, 'logs/logrotate/vyos-atop.tmpl', logrotate_atop)
+
+ # get configuration for logrotate rsyslog
+ logrotate_rsyslog = dict_search('logrotate.messages', logs_config)
+ # generate new config file for rsyslog
+ syslog.debug('Adding logrotate config for rsyslog')
+ render(logrotate_rsyslog_file, 'logs/logrotate/vyos-rsyslog.tmpl',
+ logrotate_rsyslog)
+
+
+def apply(logs_config):
+ # No further actions needed
+ pass
+
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/system-option.py b/src/conf_mode/system-option.py
index 55cf6b142..b1c63e316 100755
--- a/src/conf_mode/system-option.py
+++ b/src/conf_mode/system-option.py
@@ -126,6 +126,12 @@ def apply(options):
if 'keyboard_layout' in options:
cmd('loadkeys {keyboard_layout}'.format(**options))
+ # Enable/diable root-partition-auto-resize SystemD service
+ if 'root_partition_auto_resize' in options:
+ cmd('systemctl enable root-partition-auto-resize.service')
+ else:
+ cmd('systemctl disable root-partition-auto-resize.service')
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/conf_mode/system_console.py b/src/conf_mode/system_console.py
index 33a546bd3..19b252513 100755
--- a/src/conf_mode/system_console.py
+++ b/src/conf_mode/system_console.py
@@ -18,9 +18,14 @@ import os
import re
from vyos.config import Config
-from vyos.util import call, read_file, write_file
+from vyos.configdict import dict_merge
+from vyos.util import call
+from vyos.util import read_file
+from vyos.util import write_file
from vyos.template import render
-from vyos import ConfigError, airbag
+from vyos.xml import defaults
+from vyos import ConfigError
+from vyos import airbag
airbag.enable()
by_bus_dir = '/dev/serial/by-bus'
@@ -36,21 +41,27 @@ def get_config(config=None):
console = conf.get_config_dict(base, get_first_key=True)
# bail out early if no serial console is configured
- if 'device' not in console.keys():
+ if 'device' not in console:
return console
# convert CLI values to system values
- for device in console['device'].keys():
- # no speed setting has been configured - use default value
- if not 'speed' in console['device'][device].keys():
- tmp = { 'speed': '' }
- if device.startswith('hvc'):
- tmp['speed'] = 38400
- else:
- tmp['speed'] = 115200
+ default_values = defaults(base + ['device'])
+ for device, device_config in console['device'].items():
+ if 'speed' not in device_config and device.startswith('hvc'):
+ # XEN console has a different default console speed
+ console['device'][device]['speed'] = 38400
+ else:
+ # Merge in XML defaults - the proper way to do it
+ console['device'][device] = dict_merge(default_values,
+ console['device'][device])
+
+ return console
- console['device'][device].update(tmp)
+def verify(console):
+ if not console or 'device' not in console:
+ return None
+ for device in console['device']:
if device.startswith('usb'):
# It is much easiert to work with the native ttyUSBn name when using
# getty, but that name may change across reboots - depending on the
@@ -58,13 +69,13 @@ def get_config(config=None):
# to its dynamic device file - and create a new dict entry for it.
by_bus_device = f'{by_bus_dir}/{device}'
if os.path.isdir(by_bus_dir) and os.path.exists(by_bus_device):
- tmp = os.path.basename(os.readlink(by_bus_device))
- # updating the dict must come as last step in the loop!
- console['device'][tmp] = console['device'].pop(device)
+ device = os.path.basename(os.readlink(by_bus_device))
- return console
+ # If the device name still starts with usbXXX no matching tty was found
+ # and it can not be used as a serial interface
+ if device.startswith('usb'):
+ raise ConfigError(f'Device {device} does not support beeing used as tty')
-def verify(console):
return None
def generate(console):
@@ -76,20 +87,29 @@ def generate(console):
call(f'systemctl stop {basename}')
os.unlink(os.path.join(root, basename))
- if not console:
+ if not console or 'device' not in console:
return None
- for device in console['device'].keys():
+ for device, device_config in console['device'].items():
+ if device.startswith('usb'):
+ # It is much easiert to work with the native ttyUSBn name when using
+ # getty, but that name may change across reboots - depending on the
+ # amount of connected devices. We will resolve the fixed device name
+ # to its dynamic device file - and create a new dict entry for it.
+ by_bus_device = f'{by_bus_dir}/{device}'
+ if os.path.isdir(by_bus_dir) and os.path.exists(by_bus_device):
+ device = os.path.basename(os.readlink(by_bus_device))
+
config_file = base_dir + f'/serial-getty@{device}.service'
getty_wants_symlink = base_dir + f'/getty.target.wants/serial-getty@{device}.service'
- render(config_file, 'getty/serial-getty.service.tmpl', console['device'][device])
+ render(config_file, 'getty/serial-getty.service.tmpl', device_config)
os.symlink(config_file, getty_wants_symlink)
# GRUB
# For existing serial line change speed (if necessary)
# Only applys to ttyS0
- if 'ttyS0' not in console['device'].keys():
+ if 'ttyS0' not in console['device']:
return None
speed = console['device']['ttyS0']['speed']
@@ -98,7 +118,6 @@ def generate(console):
return None
lines = read_file(grub_config).split('\n')
-
p = re.compile(r'^(.* console=ttyS0),[0-9]+(.*)$')
write = False
newlines = []
@@ -122,9 +141,8 @@ def generate(console):
return None
def apply(console):
- # reset screen blanking
+ # Reset screen blanking
call('/usr/bin/setterm -blank 0 -powersave off -powerdown 0 -term linux </dev/tty1 >/dev/tty1 2>&1')
-
# Reload systemd manager configuration
call('systemctl daemon-reload')
@@ -136,11 +154,11 @@ def apply(console):
call('/usr/bin/setterm -blank 15 -powersave powerdown -powerdown 60 -term linux </dev/tty1 >/dev/tty1 2>&1')
# Start getty process on configured serial interfaces
- for device in console['device'].keys():
+ for device in console['device']:
# Only start console if it exists on the running system. If a user
# detaches a USB serial console and reboots - it should not fail!
if os.path.exists(f'/dev/{device}'):
- call(f'systemctl start serial-getty@{device}.service')
+ call(f'systemctl restart serial-getty@{device}.service')
return None
diff --git a/src/conf_mode/tftp_server.py b/src/conf_mode/tftp_server.py
index 2409eec1f..ef726670c 100755
--- a/src/conf_mode/tftp_server.py
+++ b/src/conf_mode/tftp_server.py
@@ -24,6 +24,7 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configverify import verify_vrf
from vyos.template import render
from vyos.template import is_ipv4
from vyos.util import call
@@ -65,10 +66,11 @@ def verify(tftpd):
if 'listen_address' not in tftpd:
raise ConfigError('TFTP server listen address must be configured!')
- for address in tftpd['listen_address']:
+ for address, address_config in tftpd['listen_address'].items():
if not is_addr_assigned(address):
print(f'WARNING: TFTP server listen address "{address}" not ' \
'assigned to any interface!')
+ verify_vrf(address_config)
return None
@@ -83,7 +85,7 @@ def generate(tftpd):
return None
idx = 0
- for address in tftpd['listen_address']:
+ for address, address_config in tftpd['listen_address'].items():
config = deepcopy(tftpd)
port = tftpd['port']
if is_ipv4(address):
@@ -91,6 +93,9 @@ def generate(tftpd):
else:
config['listen_address'] = f'[{address}]:{port} -6'
+ if 'vrf' in address_config:
+ config['vrf'] = address_config['vrf']
+
file = config_file + str(idx)
render(file, 'tftp-server/default.tmpl', config)
idx = idx + 1
diff --git a/src/conf_mode/vpn_l2tp.py b/src/conf_mode/vpn_l2tp.py
index 9c52f77ca..818e8fa0b 100755
--- a/src/conf_mode/vpn_l2tp.py
+++ b/src/conf_mode/vpn_l2tp.py
@@ -290,6 +290,8 @@ def get_config(config=None):
# LNS secret
if conf.exists(['lns', 'shared-secret']):
l2tp['lns_shared_secret'] = conf.return_value(['lns', 'shared-secret'])
+ if conf.exists(['lns', 'host-name']):
+ l2tp['lns_host_name'] = conf.return_value(['lns', 'host-name'])
if conf.exists(['ccp-disable']):
l2tp['ccp_disable'] = True
diff --git a/src/conf_mode/vpn_openconnect.py b/src/conf_mode/vpn_openconnect.py
index f6db196dc..51ea1f223 100755
--- a/src/conf_mode/vpn_openconnect.py
+++ b/src/conf_mode/vpn_openconnect.py
@@ -23,9 +23,11 @@ from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
+from vyos.util import is_systemd_service_running
from vyos.xml import defaults
from vyos import ConfigError
from crypt import crypt, mksalt, METHOD_SHA512
+from time import sleep
from vyos import airbag
airbag.enable()
@@ -172,6 +174,16 @@ def apply(ocserv):
os.unlink(file)
else:
call('systemctl restart ocserv.service')
+ counter = 0
+ while True:
+ # exit early when service runs
+ if is_systemd_service_running("ocserv.service"):
+ break
+ sleep(0.250)
+ if counter > 5:
+ raise ConfigError('openconnect failed to start, check the logs for details')
+ break
+ counter += 1
if __name__ == '__main__':
diff --git a/src/conf_mode/vpn_sstp.py b/src/conf_mode/vpn_sstp.py
index d1a71a5ad..68980e5ab 100755
--- a/src/conf_mode/vpn_sstp.py
+++ b/src/conf_mode/vpn_sstp.py
@@ -26,6 +26,7 @@ from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
from vyos.util import dict_search
+from vyos.util import write_file
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -34,6 +35,10 @@ cfg_dir = '/run/accel-pppd'
sstp_conf = '/run/accel-pppd/sstp.conf'
sstp_chap_secrets = '/run/accel-pppd/sstp.chap-secrets'
+cert_file_path = os.path.join(cfg_dir, 'sstp-cert.pem')
+cert_key_path = os.path.join(cfg_dir, 'sstp-cert.key')
+ca_cert_file_path = os.path.join(cfg_dir, 'sstp-ca.pem')
+
def get_config(config=None):
if config:
conf = config
@@ -58,7 +63,7 @@ def verify(sstp):
verify_accel_ppp_base_service(sstp)
- if not sstp['client_ip_pool']:
+ if 'client_ip_pool' not in sstp and 'client_ipv6_pool' not in sstp:
raise ConfigError('Client IP subnet required')
#
@@ -72,22 +77,32 @@ def verify(sstp):
ssl = sstp['ssl']
+ # CA
if 'ca_certificate' not in ssl:
raise ConfigError('SSL CA certificate missing on SSTP config')
+ ca_name = ssl['ca_certificate']
+
+ if ca_name not in sstp['pki']['ca']:
+ raise ConfigError('Invalid CA certificate on SSTP config')
+
+ if 'certificate' not in sstp['pki']['ca'][ca_name]:
+ raise ConfigError('Missing certificate data for CA certificate on SSTP config')
+
+ # Certificate
if 'certificate' not in ssl:
raise ConfigError('SSL certificate missing on SSTP config')
cert_name = ssl['certificate']
- if ssl['ca_certificate'] not in sstp['pki']['ca']:
- raise ConfigError('Invalid CA certificate on SSTP config')
-
if cert_name not in sstp['pki']['certificate']:
raise ConfigError('Invalid certificate on SSTP config')
pki_cert = sstp['pki']['certificate'][cert_name]
+ if 'certificate' not in pki_cert:
+ raise ConfigError('Missing certificate data for certificate on SSTP config')
+
if 'private' not in pki_cert or 'key' not in pki_cert['private']:
raise ConfigError('Missing private key for certificate on SSTP config')
@@ -98,27 +113,18 @@ def generate(sstp):
if not sstp:
return None
- cert_file_path = os.path.join(cfg_dir, 'sstp-cert.pem')
- cert_key_path = os.path.join(cfg_dir, 'sstp-cert.key')
- ca_cert_file_path = os.path.join(cfg_dir, 'sstp-ca.pem')
+ # accel-cmd reload doesn't work so any change results in a restart of the daemon
+ render(sstp_conf, 'accel-ppp/sstp.config.tmpl', sstp)
cert_name = sstp['ssl']['certificate']
pki_cert = sstp['pki']['certificate'][cert_name]
- with open(cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_cert['certificate']))
-
- with open(cert_key_path, 'w') as f:
- f.write(wrap_private_key(pki_cert['private']['key']))
-
ca_cert_name = sstp['ssl']['ca_certificate']
pki_ca = sstp['pki']['ca'][ca_cert_name]
- with open(ca_cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_ca['certificate']))
-
- # accel-cmd reload doesn't work so any change results in a restart of the daemon
- render(sstp_conf, 'accel-ppp/sstp.config.tmpl', sstp)
+ write_file(cert_file_path, wrap_certificate(pki_cert['certificate']))
+ write_file(cert_key_path, wrap_private_key(pki_cert['private']['key']))
+ write_file(ca_cert_file_path, wrap_certificate(pki_ca['certificate']))
if dict_search('authentication.mode', sstp) == 'local':
render(sstp_chap_secrets, 'accel-ppp/chap-secrets.config_dict.tmpl',
diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py
index 919083ac4..38c0c4463 100755
--- a/src/conf_mode/vrf.py
+++ b/src/conf_mode/vrf.py
@@ -18,7 +18,6 @@ import os
from sys import exit
from json import loads
-from tempfile import NamedTemporaryFile
from vyos.config import Config
from vyos.configdict import node_changed
@@ -31,10 +30,12 @@ from vyos.util import get_interface_config
from vyos.util import popen
from vyos.util import run
from vyos import ConfigError
+from vyos import frr
from vyos import airbag
airbag.enable()
-config_file = r'/etc/iproute2/rt_tables.d/vyos-vrf.conf'
+config_file = '/etc/iproute2/rt_tables.d/vyos-vrf.conf'
+nft_vrf_config = '/tmp/nftables-vrf-zones'
def list_rules():
command = 'ip -j -4 rule show'
@@ -128,8 +129,8 @@ def verify(vrf):
def generate(vrf):
render(config_file, 'vrf/vrf.conf.tmpl', vrf)
# Render nftables zones config
- vrf['nft_vrf_zones'] = NamedTemporaryFile().name
- render(vrf['nft_vrf_zones'], 'firewall/nftables-vrf-zones.tmpl', vrf)
+
+ render(nft_vrf_config, 'firewall/nftables-vrf-zones.tmpl', vrf)
return None
@@ -165,8 +166,9 @@ def apply(vrf):
_, err = popen('nft list table inet vrf_zones')
# If not, create a table
if err:
- cmd(f'nft -f {vrf["nft_vrf_zones"]}')
- os.unlink(vrf['nft_vrf_zones'])
+ if os.path.exists(nft_vrf_config):
+ cmd(f'nft -f {nft_vrf_config}')
+ os.unlink(nft_vrf_config)
for name, config in vrf['name'].items():
table = config['table']
diff --git a/src/conf_mode/vrf_vni.py b/src/conf_mode/vrf_vni.py
index 87ee8f2d1..1a7bd1f09 100755
--- a/src/conf_mode/vrf_vni.py
+++ b/src/conf_mode/vrf_vni.py
@@ -32,37 +32,26 @@ def get_config(config=None):
else:
conf = Config()
- # This script only works with a passed VRF name
- if len(argv) < 1:
- raise NotImplementedError
- vrf = argv[1]
+ base = ['vrf']
+ vrf = conf.get_config_dict(base, get_first_key=True)
+ return vrf
- # "assemble" dict - easier here then use a full blown get_config_dict()
- # on a single leafNode
- vni = { 'vrf' : vrf }
- tmp = conf.return_value(['vrf', 'name', vrf, 'vni'])
- if tmp: vni.update({ 'vni' : tmp })
-
- return vni
-
-def verify(vni):
+def verify(vrf):
return None
-def generate(vni):
- vni['new_frr_config'] = render_to_string('frr/vrf-vni.frr.tmpl', vni)
+def generate(vrf):
+ vrf['new_frr_config'] = render_to_string('frr/vrf-vni.frr.tmpl', vrf)
return None
-def apply(vni):
+def apply(vrf):
# add configuration to FRR
frr_cfg = frr.FRRConfig()
frr_cfg.load_configuration(frr_daemon)
- frr_cfg.modify_section(f'^vrf [a-zA-Z-]*$', '')
- frr_cfg.add_before(r'(interface .*|line vty)', vni['new_frr_config'])
+ frr_cfg.modify_section(f'^vrf .+', stop_pattern='^exit-vrf', remove_stop_mark=True)
+ if 'new_frr_config' in vrf:
+ frr_cfg.add_before(frr.default_add_before, vrf['new_frr_config'])
frr_cfg.commit_configuration(frr_daemon)
- # Save configuration to /run/frr/config/frr.conf
- frr.save_configuration()
-
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/vrrp.py b/src/conf_mode/vrrp.py
index e8f1c1f99..c72efc61f 100755
--- a/src/conf_mode/vrrp.py
+++ b/src/conf_mode/vrrp.py
@@ -28,6 +28,7 @@ from vyos.template import render
from vyos.template import is_ipv4
from vyos.template import is_ipv6
from vyos.util import call
+from vyos.util import is_systemd_service_running
from vyos.xml import defaults
from vyos import ConfigError
from vyos import airbag
@@ -139,7 +140,12 @@ def apply(vrrp):
call(f'systemctl stop {service_name}')
return None
- call(f'systemctl restart {service_name}')
+ # XXX: T3944 - reload keepalived configuration if service is already running
+ # to not cause any service disruption when applying changes.
+ if is_systemd_service_running(service_name):
+ call(f'systemctl reload {service_name}')
+ else:
+ call(f'systemctl restart {service_name}')
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/zone_policy.py b/src/conf_mode/zone_policy.py
new file mode 100755
index 000000000..2535ea33b
--- /dev/null
+++ b/src/conf_mode/zone_policy.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from json import loads
+from sys import exit
+
+from vyos.config import Config
+from vyos.template import render
+from vyos.util import cmd
+from vyos.util import dict_search_args
+from vyos.util import run
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+nftables_conf = '/run/nftables_zone.conf'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['zone-policy']
+ zone_policy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ if zone_policy:
+ zone_policy['firewall'] = conf.get_config_dict(['firewall'], key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ return zone_policy
+
+def verify(zone_policy):
+ # bail out early - looks like removal from running config
+ if not zone_policy:
+ return None
+
+ local_zone = False
+ interfaces = []
+
+ if 'zone' in zone_policy:
+ for zone, zone_conf in zone_policy['zone'].items():
+ if 'local_zone' not in zone_conf and 'interface' not in zone_conf:
+ raise ConfigError(f'Zone "{zone}" has no interfaces and is not the local zone')
+
+ if 'local_zone' in zone_conf:
+ if local_zone:
+ raise ConfigError('There cannot be multiple local zones')
+ if 'interface' in zone_conf:
+ raise ConfigError('Local zone cannot have interfaces assigned')
+ if 'intra_zone_filtering' in zone_conf:
+ raise ConfigError('Local zone cannot use intra-zone-filtering')
+ local_zone = True
+
+ if 'interface' in zone_conf:
+ found_duplicates = [intf for intf in zone_conf['interface'] if intf in interfaces]
+
+ if found_duplicates:
+ raise ConfigError(f'Interfaces cannot be assigned to multiple zones')
+
+ interfaces += zone_conf['interface']
+
+ if 'intra_zone_filtering' in zone_conf:
+ intra_zone = zone_conf['intra_zone_filtering']
+
+ if len(intra_zone) > 1:
+ raise ConfigError('Only one intra-zone-filtering action must be specified')
+
+ if 'firewall' in intra_zone:
+ v4_name = dict_search_args(intra_zone, 'firewall', 'name')
+ if v4_name and not dict_search_args(zone_policy, 'firewall', 'name', v4_name):
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ v6_name = dict_search_args(intra_zone, 'firewall', 'ipv6-name')
+ if v6_name and not dict_search_args(zone_policy, 'firewall', 'ipv6-name', v6_name):
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
+ if not v4_name and not v6_name:
+ raise ConfigError('No firewall names specified for intra-zone-filtering')
+
+ if 'from' in zone_conf:
+ for from_zone, from_conf in zone_conf['from'].items():
+ v4_name = dict_search_args(from_conf, 'firewall', 'name')
+ if v4_name:
+ if 'name' not in zone_policy['firewall']:
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ if not dict_search_args(zone_policy, 'firewall', 'name', v4_name):
+ raise ConfigError(f'Firewall name "{v4_name}" does not exist')
+
+ v6_name = dict_search_args(from_conf, 'firewall', 'v6_name')
+ if v6_name:
+ if 'ipv6_name' not in zone_policy['firewall']:
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
+ if not dict_search_args(zone_policy, 'firewall', 'ipv6_name', v6_name):
+ raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
+
+ return None
+
+def has_ipv4_fw(zone_conf):
+ if 'from' not in zone_conf:
+ return False
+ zone_from = zone_conf['from']
+ return any([True for fz in zone_from if dict_search_args(zone_from, fz, 'firewall', 'name')])
+
+def has_ipv6_fw(zone_conf):
+ if 'from' not in zone_conf:
+ return False
+ zone_from = zone_conf['from']
+ return any([True for fz in zone_from if dict_search_args(zone_from, fz, 'firewall', 'ipv6_name')])
+
+def get_local_from(zone_policy, local_zone_name):
+ # Get all zone firewall names from the local zone
+ out = {}
+ for zone, zone_conf in zone_policy['zone'].items():
+ if zone == local_zone_name:
+ continue
+ if 'from' not in zone_conf:
+ continue
+ if local_zone_name in zone_conf['from']:
+ out[zone] = zone_conf['from'][local_zone_name]
+ return out
+
+def cleanup_commands():
+ commands = []
+ for table in ['ip filter', 'ip6 filter']:
+ json_str = cmd(f'nft -j list table {table}')
+ obj = loads(json_str)
+ if 'nftables' not in obj:
+ continue
+ for item in obj['nftables']:
+ if 'rule' in item:
+ chain = item['rule']['chain']
+ handle = item['rule']['handle']
+ if 'expr' not in item['rule']:
+ continue
+ for expr in item['rule']['expr']:
+ target = dict_search_args(expr, 'jump', 'target')
+ if target and target.startswith("VZONE"):
+ commands.append(f'delete rule {table} {chain} handle {handle}')
+ for item in obj['nftables']:
+ if 'chain' in item:
+ if item['chain']['name'].startswith("VZONE"):
+ chain = item['chain']['name']
+ commands.append(f'delete chain {table} {chain}')
+ return commands
+
+def generate(zone_policy):
+ data = zone_policy or {}
+
+ if os.path.exists(nftables_conf): # Check to see if we've run before
+ data['cleanup_commands'] = cleanup_commands()
+
+ if 'zone' in data:
+ for zone, zone_conf in data['zone'].items():
+ zone_conf['ipv4'] = has_ipv4_fw(zone_conf)
+ zone_conf['ipv6'] = has_ipv6_fw(zone_conf)
+
+ if 'local_zone' in zone_conf:
+ zone_conf['from_local'] = get_local_from(data, zone)
+
+ render(nftables_conf, 'zone_policy/nftables.tmpl', data)
+ return None
+
+def apply(zone_policy):
+ install_result = run(f'nft -f {nftables_conf}')
+ if install_result == 1:
+ raise ConfigError('Failed to apply zone-policy')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)