summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/conf_mode/dns_forwarding.py207
-rwxr-xr-xsrc/conf_mode/flow_accounting_conf.py359
-rwxr-xr-xsrc/conf_mode/http-api.py54
-rwxr-xr-xsrc/conf_mode/https.py16
-rwxr-xr-xsrc/conf_mode/interfaces-vxlan.py31
-rwxr-xr-xsrc/conf_mode/interfaces-wwan.py49
-rwxr-xr-xsrc/conf_mode/nat.py10
-rwxr-xr-xsrc/conf_mode/nat66.py10
-rwxr-xr-xsrc/conf_mode/netns.py118
-rwxr-xr-xsrc/conf_mode/protocols_bfd.py12
-rwxr-xr-xsrc/conf_mode/protocols_bgp.py22
-rwxr-xr-xsrc/conf_mode/protocols_mpls.py38
-rwxr-xr-xsrc/conf_mode/protocols_ospfv3.py36
-rwxr-xr-xsrc/conf_mode/service_monitoring_telegraf.py154
-rwxr-xr-xsrc/conf_mode/service_pppoe-server.py15
-rwxr-xr-xsrc/conf_mode/snmp.py635
-rwxr-xr-xsrc/conf_mode/system-login-banner.py35
-rwxr-xr-xsrc/conf_mode/system-logs.py83
-rw-r--r--src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf82
-rw-r--r--src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup31
-rw-r--r--src/etc/systemd/system/keepalived.service.d/override.conf13
-rw-r--r--src/etc/systemd/system/uacctd.service.d/override.conf14
-rwxr-xr-xsrc/etc/telegraf/custom_scripts/show_interfaces_input_filter.py47
-rwxr-xr-xsrc/etc/telegraf/custom_scripts/vyos_services_input_filter.py61
-rwxr-xr-xsrc/helpers/vyos-boot-config-loader.py4
-rwxr-xr-xsrc/helpers/vyos_net_name10
-rwxr-xr-xsrc/migration-scripts/flow-accounting/0-to-169
-rwxr-xr-xsrc/op_mode/conntrack_sync.py45
-rwxr-xr-xsrc/op_mode/restart_frr.py2
-rwxr-xr-xsrc/op_mode/show_nat_rules.py22
-rw-r--r--src/services/api/graphql/README.graphql61
-rw-r--r--src/services/api/graphql/bindings.py18
-rw-r--r--src/services/api/graphql/graphql/directives.py16
-rw-r--r--src/services/api/graphql/graphql/mutations.py52
-rw-r--r--src/services/api/graphql/graphql/queries.py89
-rw-r--r--src/services/api/graphql/graphql/schema/firewall_group.graphql48
-rw-r--r--src/services/api/graphql/graphql/schema/schema.graphql14
-rw-r--r--src/services/api/graphql/recipes/remove_firewall_address_group_members.py14
-rw-r--r--src/services/api/graphql/recipes/session.py15
-rw-r--r--src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl4
-rw-r--r--src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl3
-rw-r--r--src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl3
-rwxr-xr-xsrc/services/vyos-configd3
-rwxr-xr-xsrc/services/vyos-hostsd36
-rwxr-xr-xsrc/services/vyos-http-api-server32
-rw-r--r--src/systemd/keepalived.service13
-rw-r--r--src/systemd/vyos-hostsd.service2
-rw-r--r--src/systemd/vyos-http-api.service21
-rw-r--r--src/tests/test_validate.py4
-rwxr-xr-xsrc/validators/ipv4-multicast2
-rwxr-xr-xsrc/validators/ipv6-link-local12
-rwxr-xr-xsrc/validators/ipv6-multicast2
-rwxr-xr-xsrc/validators/range56
-rwxr-xr-xsrc/validators/script4
54 files changed, 1862 insertions, 946 deletions
diff --git a/src/conf_mode/dns_forwarding.py b/src/conf_mode/dns_forwarding.py
index 06366362a..23a16df63 100755
--- a/src/conf_mode/dns_forwarding.py
+++ b/src/conf_mode/dns_forwarding.py
@@ -17,6 +17,7 @@
import os
from sys import exit
+from glob import glob
from vyos.config import Config
from vyos.configdict import dict_merge
@@ -50,10 +51,12 @@ def get_config(config=None):
if not conf.exists(base):
return None
- dns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ dns = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True)
# We have gathered the dict representation of the CLI, but there are default
- # options which we need to update into the dictionary retrived.
+ # options which we need to update into the dictionary retrieved.
default_values = defaults(base)
+ # T2665 due to how defaults under tag nodes work, we must clear these out before we merge
+ del default_values['authoritative_domain']
dns = dict_merge(default_values, dns)
# some additions to the default dictionary
@@ -66,6 +69,183 @@ def get_config(config=None):
if conf.exists(base_nameservers_dhcp):
dns.update({'system_name_server_dhcp': conf.return_values(base_nameservers_dhcp)})
+ if 'authoritative_domain' in dns:
+ dns['authoritative_zones'] = []
+ dns['authoritative_zone_errors'] = []
+ for node in dns['authoritative_domain']:
+ zonedata = dns['authoritative_domain'][node]
+ if ('disable' in zonedata) or (not 'records' in zonedata):
+ continue
+ zone = {
+ 'name': node,
+ 'file': "{}/zone.{}.conf".format(pdns_rec_run_dir, node),
+ 'records': [],
+ }
+
+ recorddata = zonedata['records']
+
+ for rtype in [ 'a', 'aaaa', 'cname', 'mx', 'ptr', 'txt', 'spf', 'srv', 'naptr' ]:
+ if rtype not in recorddata:
+ continue
+ for subnode in recorddata[rtype]:
+ if 'disable' in recorddata[rtype][subnode]:
+ continue
+
+ rdata = recorddata[rtype][subnode]
+
+ if rtype in [ 'a', 'aaaa' ]:
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'address' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one address is required'.format(subnode, node))
+ continue
+
+ for address in rdata['address']:
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': address
+ })
+ elif rtype in ['cname', 'ptr']:
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'target' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: target is required'.format(subnode, node))
+ continue
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{}.'.format(rdata['target'])
+ })
+ elif rtype == 'mx':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ del rdefaults['server']
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'server' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one server is required'.format(subnode, node))
+ continue
+
+ for servername in rdata['server']:
+ serverdata = rdata['server'][servername]
+ serverdefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'server']) # T2665
+ serverdata = dict_merge(serverdefaults, serverdata)
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{} {}.'.format(serverdata['priority'], servername)
+ })
+ elif rtype == 'txt':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'value' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one value is required'.format(subnode, node))
+ continue
+
+ for value in rdata['value']:
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': "\"{}\"".format(value.replace("\"", "\\\""))
+ })
+ elif rtype == 'spf':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'value' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: value is required'.format(subnode, node))
+ continue
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '"{}"'.format(rdata['value'].replace("\"", "\\\""))
+ })
+ elif rtype == 'srv':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ del rdefaults['entry']
+ rdata = dict_merge(rdefaults, rdata)
+
+ if not 'entry' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one entry is required'.format(subnode, node))
+ continue
+
+ for entryno in rdata['entry']:
+ entrydata = rdata['entry'][entryno]
+ entrydefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'entry']) # T2665
+ entrydata = dict_merge(entrydefaults, entrydata)
+
+ if not 'hostname' in entrydata:
+ dns['authoritative_zone_errors'].append('{}.{}: hostname is required for entry {}'.format(subnode, node, entryno))
+ continue
+
+ if not 'port' in entrydata:
+ dns['authoritative_zone_errors'].append('{}.{}: port is required for entry {}'.format(subnode, node, entryno))
+ continue
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{} {} {} {}.'.format(entrydata['priority'], entrydata['weight'], entrydata['port'], entrydata['hostname'])
+ })
+ elif rtype == 'naptr':
+ rdefaults = defaults(base + ['authoritative-domain', 'records', rtype]) # T2665
+ del rdefaults['rule']
+ rdata = dict_merge(rdefaults, rdata)
+
+
+ if not 'rule' in rdata:
+ dns['authoritative_zone_errors'].append('{}.{}: at least one rule is required'.format(subnode, node))
+ continue
+
+ for ruleno in rdata['rule']:
+ ruledata = rdata['rule'][ruleno]
+ ruledefaults = defaults(base + ['authoritative-domain', 'records', rtype, 'rule']) # T2665
+ ruledata = dict_merge(ruledefaults, ruledata)
+ flags = ""
+ if 'lookup-srv' in ruledata:
+ flags += "S"
+ if 'lookup-a' in ruledata:
+ flags += "A"
+ if 'resolve-uri' in ruledata:
+ flags += "U"
+ if 'protocol-specific' in ruledata:
+ flags += "P"
+
+ if 'order' in ruledata:
+ order = ruledata['order']
+ else:
+ order = ruleno
+
+ if 'regexp' in ruledata:
+ regexp= ruledata['regexp'].replace("\"", "\\\"")
+ else:
+ regexp = ''
+
+ if ruledata['replacement']:
+ replacement = '{}.'.format(ruledata['replacement'])
+ else:
+ replacement = ''
+
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': '{} {} "{}" "{}" "{}" {}'.format(order, ruledata['preference'], flags, ruledata['service'], regexp, replacement)
+ })
+
+ dns['authoritative_zones'].append(zone)
+
return dns
def verify(dns):
@@ -86,6 +266,11 @@ def verify(dns):
if 'server' not in dns['domain'][domain]:
raise ConfigError(f'No server configured for domain {domain}!')
+ if ('authoritative_zone_errors' in dns) and dns['authoritative_zone_errors']:
+ for error in dns['authoritative_zone_errors']:
+ print(error)
+ raise ConfigError('Invalid authoritative records have been defined')
+
if 'system' in dns:
if not ('system_name_server' in dns or 'system_name_server_dhcp' in dns):
print("Warning: No 'system name-server' or 'system " \
@@ -104,6 +289,15 @@ def generate(dns):
render(pdns_rec_lua_conf_file, 'dns-forwarding/recursor.conf.lua.tmpl',
dns, user=pdns_rec_user, group=pdns_rec_group)
+ for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'):
+ os.unlink(zone_filename)
+
+ if 'authoritative_zones' in dns:
+ for zone in dns['authoritative_zones']:
+ render(zone['file'], 'dns-forwarding/recursor.zone.conf.tmpl',
+ zone, user=pdns_rec_user, group=pdns_rec_group)
+
+
# if vyos-hostsd didn't create its files yet, create them (empty)
for file in [pdns_rec_hostsd_lua_conf_file, pdns_rec_hostsd_zones_file]:
with open(file, 'a'):
@@ -119,6 +313,9 @@ def apply(dns):
if os.path.isfile(pdns_rec_config_file):
os.unlink(pdns_rec_config_file)
+
+ for zone_filename in glob(f'{pdns_rec_run_dir}/zone.*.conf'):
+ os.unlink(zone_filename)
else:
### first apply vyos-hostsd config
hc = hostsd_client()
@@ -153,6 +350,12 @@ def apply(dns):
if 'domain' in dns:
hc.add_forward_zones(dns['domain'])
+ # hostsd generates NTAs for the authoritative zones
+ # the list and keys() are required as get returns a dict, not list
+ hc.delete_authoritative_zones(list(hc.get_authoritative_zones()))
+ if 'authoritative_zones' in dns:
+ hc.add_authoritative_zones(list(map(lambda zone: zone['name'], dns['authoritative_zones'])))
+
# call hostsd to generate forward-zones and its lua-config-file
hc.apply()
diff --git a/src/conf_mode/flow_accounting_conf.py b/src/conf_mode/flow_accounting_conf.py
index 1b036a53f..975f19acf 100755
--- a/src/conf_mode/flow_accounting_conf.py
+++ b/src/conf_mode/flow_accounting_conf.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2018-2020 VyOS maintainers and contributors
+# Copyright (C) 2018-2021 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -16,65 +16,30 @@
import os
import re
+
from sys import exit
import ipaddress
from ipaddress import ip_address
-from jinja2 import FileSystemLoader, Environment
+from vyos.config import Config
+from vyos.configdict import dict_merge
from vyos.ifconfig import Section
from vyos.ifconfig import Interface
-from vyos.config import Config
-from vyos import ConfigError
-from vyos.util import cmd
from vyos.template import render
-
+from vyos.util import cmd
+from vyos.validate import is_addr_assigned
+from vyos.xml import defaults
+from vyos import ConfigError
from vyos import airbag
airbag.enable()
-# default values
-default_sflow_server_port = 6343
-default_netflow_server_port = 2055
-default_plugin_pipe_size = 10
-default_captured_packet_size = 128
-default_netflow_version = '9'
-default_sflow_agentip = 'auto'
-uacctd_conf_path = '/etc/pmacct/uacctd.conf'
+uacctd_conf_path = '/run/pmacct/uacctd.conf'
nftables_nflog_table = 'raw'
nftables_nflog_chain = 'VYOS_CT_PREROUTING_HOOK'
egress_nftables_nflog_table = 'inet mangle'
egress_nftables_nflog_chain = 'FORWARD'
-# helper functions
-# check if node exists and return True if this is true
-def _node_exists(path):
- vyos_config = Config()
- if vyos_config.exists(path):
- return True
-
-# get sFlow agent-ip if agent-address is "auto" (default behaviour)
-def _sflow_default_agentip(config):
- # check if any of BGP, OSPF, OSPFv3 protocols are configured and use router-id from there
- if config.exists('protocols bgp'):
- bgp_router_id = config.return_value("protocols bgp {} parameters router-id".format(config.list_nodes('protocols bgp')[0]))
- if bgp_router_id:
- return bgp_router_id
- if config.return_value('protocols ospf parameters router-id'):
- return config.return_value('protocols ospf parameters router-id')
- if config.return_value('protocols ospfv3 parameters router-id'):
- return config.return_value('protocols ospfv3 parameters router-id')
-
- # if router-id was not found, use first available ip of any interface
- for iface in Section.interfaces():
- for address in Interface(iface).get_addr():
- # return an IP, if this is not loopback
- regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$')
- if regex_filter.search(address):
- return regex_filter.search(address).group('ipaddr')
-
- # return nothing by default
- return None
-
# get nftables rule dict for chain in table
def _nftables_get_nflog(chain, table):
# define list with rules
@@ -87,7 +52,6 @@ def _nftables_get_nflog(chain, table):
# run nftables, save output and split it by lines
nftables_command = f'nft -a list chain {table} {chain}'
tmp = cmd(nftables_command, message='Failed to get flows list')
-
# parse each line and add information to list
for current_rule in tmp.splitlines():
if 'FLOW_ACCOUNTING_RULE' not in current_rule:
@@ -100,8 +64,7 @@ def _nftables_get_nflog(chain, table):
# return list with rules
return rules
-# modify nftables rules
-def _nftables_config(configured_ifaces, direction):
+def _nftables_config(configured_ifaces, direction, length=None):
# define list of nftables commands to modify settings
nftable_commands = []
nftables_chain = nftables_nflog_chain
@@ -141,7 +104,7 @@ def _nftables_config(configured_ifaces, direction):
for iface_extended in configured_ifaces_extended:
iface = iface_extended['iface']
iface_prefix = "o" if direction == "egress" else "i"
- rule_definition = f'{iface_prefix}ifname "{iface}" counter log group 2 snaplen {default_captured_packet_size} queue-threshold 100 comment "FLOW_ACCOUNTING_RULE"'
+ rule_definition = f'{iface_prefix}ifname "{iface}" counter log group 2 snaplen {length} queue-threshold 100 comment "FLOW_ACCOUNTING_RULE"'
nftable_commands.append(f'nft insert rule {nftables_table} {nftables_chain} {rule_definition}')
# change nftables
@@ -149,231 +112,157 @@ def _nftables_config(configured_ifaces, direction):
cmd(command, raising=ConfigError)
-def get_config():
- vc = Config()
- vc.set_level('')
- # Convert the VyOS config to an abstract internal representation
- flow_config = {
- 'flow-accounting-configured': vc.exists('system flow-accounting'),
- 'buffer-size': vc.return_value('system flow-accounting buffer-size'),
- 'enable-egress': _node_exists('system flow-accounting enable-egress'),
- 'disable-imt': _node_exists('system flow-accounting disable-imt'),
- 'syslog-facility': vc.return_value('system flow-accounting syslog-facility'),
- 'interfaces': None,
- 'sflow': {
- 'configured': vc.exists('system flow-accounting sflow'),
- 'agent-address': vc.return_value('system flow-accounting sflow agent-address'),
- 'sampling-rate': vc.return_value('system flow-accounting sflow sampling-rate'),
- 'servers': None
- },
- 'netflow': {
- 'configured': vc.exists('system flow-accounting netflow'),
- 'engine-id': vc.return_value('system flow-accounting netflow engine-id'),
- 'max-flows': vc.return_value('system flow-accounting netflow max-flows'),
- 'sampling-rate': vc.return_value('system flow-accounting netflow sampling-rate'),
- 'source-ip': vc.return_value('system flow-accounting netflow source-ip'),
- 'version': vc.return_value('system flow-accounting netflow version'),
- 'timeout': {
- 'expint': vc.return_value('system flow-accounting netflow timeout expiry-interval'),
- 'general': vc.return_value('system flow-accounting netflow timeout flow-generic'),
- 'icmp': vc.return_value('system flow-accounting netflow timeout icmp'),
- 'maxlife': vc.return_value('system flow-accounting netflow timeout max-active-life'),
- 'tcp.fin': vc.return_value('system flow-accounting netflow timeout tcp-fin'),
- 'tcp': vc.return_value('system flow-accounting netflow timeout tcp-generic'),
- 'tcp.rst': vc.return_value('system flow-accounting netflow timeout tcp-rst'),
- 'udp': vc.return_value('system flow-accounting netflow timeout udp')
- },
- 'servers': None
- }
- }
-
- # get interfaces list
- if vc.exists('system flow-accounting interface'):
- flow_config['interfaces'] = vc.return_values('system flow-accounting interface')
-
- # get sFlow collectors list
- if vc.exists('system flow-accounting sflow server'):
- flow_config['sflow']['servers'] = []
- sflow_collectors = vc.list_nodes('system flow-accounting sflow server')
- for collector in sflow_collectors:
- port = default_sflow_server_port
- if vc.return_value("system flow-accounting sflow server {} port".format(collector)):
- port = vc.return_value("system flow-accounting sflow server {} port".format(collector))
- flow_config['sflow']['servers'].append({ 'address': collector, 'port': port })
-
- # get NetFlow collectors list
- if vc.exists('system flow-accounting netflow server'):
- flow_config['netflow']['servers'] = []
- netflow_collectors = vc.list_nodes('system flow-accounting netflow server')
- for collector in netflow_collectors:
- port = default_netflow_server_port
- if vc.return_value("system flow-accounting netflow server {} port".format(collector)):
- port = vc.return_value("system flow-accounting netflow server {} port".format(collector))
- flow_config['netflow']['servers'].append({ 'address': collector, 'port': port })
-
- # get sflow agent-id
- if flow_config['sflow']['agent-address'] == None or flow_config['sflow']['agent-address'] == 'auto':
- flow_config['sflow']['agent-address'] = _sflow_default_agentip(vc)
-
- # get NetFlow version
- if not flow_config['netflow']['version']:
- flow_config['netflow']['version'] = default_netflow_version
-
- # convert NetFlow engine-id format, if this is necessary
- if flow_config['netflow']['engine-id'] and flow_config['netflow']['version'] == '5':
- regex_filter = re.compile('^\d+$')
- if regex_filter.search(flow_config['netflow']['engine-id']):
- flow_config['netflow']['engine-id'] = "{}:0".format(flow_config['netflow']['engine-id'])
-
- # return dict with flow-accounting configuration
- return flow_config
-
-def verify(config):
- # Verify that configuration is valid
- # skip all checks if flow-accounting was removed
- if not config['flow-accounting-configured']:
- return True
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['system', 'flow-accounting']
+ if not conf.exists(base):
+ return None
+
+ flow_accounting = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = defaults(base)
+
+ # delete individual flow type default - should only be added if user uses
+ # this feature
+ for flow_type in ['sflow', 'netflow']:
+ if flow_type in default_values:
+ del default_values[flow_type]
+ flow_accounting = dict_merge(default_values, flow_accounting)
+
+ for flow_type in ['sflow', 'netflow']:
+ if flow_type in flow_accounting:
+ default_values = defaults(base + [flow_type])
+ # we need to merge individual server configurations
+ if 'server' in default_values:
+ del default_values['server']
+ flow_accounting[flow_type] = dict_merge(default_values, flow_accounting[flow_type])
+
+ if 'server' in flow_accounting[flow_type]:
+ default_values = defaults(base + [flow_type, 'server'])
+ for server in flow_accounting[flow_type]['server']:
+ flow_accounting[flow_type]['server'][server] = dict_merge(
+ default_values,flow_accounting[flow_type]['server'][server])
+
+ return flow_accounting
+
+def verify(flow_config):
+ if not flow_config:
+ return None
# check if at least one collector is enabled
- if not (config['sflow']['configured'] or config['netflow']['configured'] or not config['disable-imt']):
- raise ConfigError("You need to configure at least one sFlow or NetFlow protocol, or not set \"disable-imt\" for flow-accounting")
+ if 'sflow' not in flow_config and 'netflow' not in flow_config and 'disable_imt' in flow_config:
+ raise ConfigError('You need to configure at least sFlow or NetFlow, ' \
+ 'or not set "disable-imt" for flow-accounting!')
# Check if at least one interface is configured
- if not config['interfaces']:
- raise ConfigError("You need to configure at least one interface for flow-accounting")
+ if 'interface' not in flow_config:
+ raise ConfigError('Flow accounting requires at least one interface to ' \
+ 'be configured!')
# check that all configured interfaces exists in the system
- for iface in config['interfaces']:
- if not iface in Section.interfaces():
- # chnged from error to warning to allow adding dynamic interfaces and interface templates
- # raise ConfigError("The {} interface is not presented in the system".format(iface))
- print("Warning: the {} interface is not presented in the system".format(iface))
+ for interface in flow_config['interface']:
+ if interface not in Section.interfaces():
+ # Changed from error to warning to allow adding dynamic interfaces
+ # and interface templates
+ print(f'Warning: Interface "{interface}" is not presented in the system')
# check sFlow configuration
- if config['sflow']['configured']:
- # check if at least one sFlow collector is configured if sFlow configuration is presented
- if not config['sflow']['servers']:
- raise ConfigError("You need to configure at least one sFlow server")
+ if 'sflow' in flow_config:
+ # check if at least one sFlow collector is configured
+ if 'server' not in flow_config['sflow']:
+ raise ConfigError('You need to configure at least one sFlow server!')
# check that all sFlow collectors use the same IP protocol version
sflow_collector_ipver = None
- for sflow_collector in config['sflow']['servers']:
+ for server in flow_config['sflow']['server']:
if sflow_collector_ipver:
- if sflow_collector_ipver != ip_address(sflow_collector['address']).version:
+ if sflow_collector_ipver != ip_address(server).version:
raise ConfigError("All sFlow servers must use the same IP protocol")
else:
- sflow_collector_ipver = ip_address(sflow_collector['address']).version
-
+ sflow_collector_ipver = ip_address(server).version
# check agent-id for sFlow: we should avoid mixing IPv4 agent-id with IPv6 collectors and vice-versa
- for sflow_collector in config['sflow']['servers']:
- if ip_address(sflow_collector['address']).version != ip_address(config['sflow']['agent-address']).version:
- raise ConfigError("Different IP address versions cannot be mixed in \"sflow agent-address\" and \"sflow server\". You need to set manually the same IP version for \"agent-address\" as for all sFlow servers")
-
- # check if configured sFlow agent-id exist in the system
- agent_id_presented = None
- for iface in Section.interfaces():
- for address in Interface(iface).get_addr():
- # check an IP, if this is not loopback
- regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$')
- if regex_filter.search(address):
- if regex_filter.search(address).group('ipaddr') == config['sflow']['agent-address']:
- agent_id_presented = True
- break
- if not agent_id_presented:
- raise ConfigError("Your \"sflow agent-address\" does not exist in the system")
+ for server in flow_config['sflow']['server']:
+ if 'agent_address' in flow_config['sflow']:
+ if ip_address(server).version != ip_address(flow_config['sflow']['agent_address']).version:
+ raise ConfigError('IPv4 and IPv6 addresses can not be mixed in "sflow agent-address" and "sflow '\
+ 'server". You need to set the same IP version for both "agent-address" and '\
+ 'all sFlow servers')
+
+ if 'agent_address' in flow_config['sflow']:
+ tmp = flow_config['sflow']['agent_address']
+ if not is_addr_assigned(tmp):
+ print(f'Warning: Configured "sflow agent-address {tmp}" does not exist in the system!')
# check NetFlow configuration
- if config['netflow']['configured']:
+ if 'netflow' in flow_config:
# check if at least one NetFlow collector is configured if NetFlow configuration is presented
- if not config['netflow']['servers']:
- raise ConfigError("You need to configure at least one NetFlow server")
-
- # check if configured netflow source-ip exist in the system
- if config['netflow']['source-ip']:
- source_ip_presented = None
- for iface in Section.interfaces():
- for address in Interface(iface).get_addr():
- # check an IP
- regex_filter = re.compile('^(?!(127)|(::1)|(fe80))(?P<ipaddr>[a-f\d\.:]+)/\d+$')
- if regex_filter.search(address):
- if regex_filter.search(address).group('ipaddr') == config['netflow']['source-ip']:
- source_ip_presented = True
- break
- if not source_ip_presented:
- print("Warning: your \"netflow source-ip\" does not exist in the system")
-
- # check if engine-id compatible with selected protocol version
- if config['netflow']['engine-id']:
+ if 'server' not in flow_config['netflow']:
+ raise ConfigError('You need to configure at least one NetFlow server!')
+
+ # Check if configured netflow source-address exist in the system
+ if 'source_address' in flow_config['netflow']:
+ if not is_addr_assigned(flow_config['netflow']['source_address']):
+ tmp = flow_config['netflow']['source_address']
+ print(f'Warning: Configured "netflow source-address {tmp}" does not exist on the system!')
+
+ # Check if engine-id compatible with selected protocol version
+ if 'engine_id' in flow_config['netflow']:
v5_filter = '^(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]):(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$'
v9v10_filter = '^(\d|[1-9]\d{1,8}|[1-3]\d{9}|4[01]\d{8}|42[0-8]\d{7}|429[0-3]\d{6}|4294[0-8]\d{5}|42949[0-5]\d{4}|429496[0-6]\d{3}|4294967[01]\d{2}|42949672[0-8]\d|429496729[0-5])$'
- if config['netflow']['version'] == '5':
+ engine_id = flow_config['netflow']['engine_id']
+ version = flow_config['netflow']['version']
+
+ if flow_config['netflow']['version'] == '5':
regex_filter = re.compile(v5_filter)
- if not regex_filter.search(config['netflow']['engine-id']):
- raise ConfigError("You cannot use NetFlow engine-id {} together with NetFlow protocol version {}".format(config['netflow']['engine-id'], config['netflow']['version']))
+ if not regex_filter.search(engine_id):
+ raise ConfigError(f'You cannot use NetFlow engine-id "{engine_id}" '\
+ f'together with NetFlow protocol version "{version}"!')
else:
regex_filter = re.compile(v9v10_filter)
- if not regex_filter.search(config['netflow']['engine-id']):
- raise ConfigError("You cannot use NetFlow engine-id {} together with NetFlow protocol version {}".format(config['netflow']['engine-id'], config['netflow']['version']))
+ if not regex_filter.search(flow_config['netflow']['engine_id']):
+ raise ConfigError(f'Can not use NetFlow engine-id "{engine_id}" together '\
+ f'with NetFlow protocol version "{version}"!')
# return True if all checks were passed
return True
-def generate(config):
- # skip all checks if flow-accounting was removed
- if not config['flow-accounting-configured']:
- return True
+def generate(flow_config):
+ if not flow_config:
+ return None
- # Calculate all necessary values
- if config['buffer-size']:
- # circular queue size
- config['plugin_pipe_size'] = int(config['buffer-size']) * 1024**2
- else:
- config['plugin_pipe_size'] = default_plugin_pipe_size * 1024**2
- # transfer buffer size
- # recommended value from pmacct developers 1/1000 of pipe size
- config['plugin_buffer_size'] = int(config['plugin_pipe_size'] / 1000)
-
- # Prepare a timeouts string
- timeout_string = ''
- for timeout_type, timeout_value in config['netflow']['timeout'].items():
- if timeout_value:
- if timeout_string == '':
- timeout_string = "{}{}={}".format(timeout_string, timeout_type, timeout_value)
- else:
- timeout_string = "{}:{}={}".format(timeout_string, timeout_type, timeout_value)
- config['netflow']['timeout_string'] = timeout_string
+ render(uacctd_conf_path, 'netflow/uacctd.conf.tmpl', flow_config)
- render(uacctd_conf_path, 'netflow/uacctd.conf.tmpl', {
- 'templatecfg': config,
- 'snaplen': default_captured_packet_size,
- })
-
-
-def apply(config):
- # define variables
- command = None
+def apply(flow_config):
+ action = 'restart'
# Check if flow-accounting was removed and define command
- if not config['flow-accounting-configured']:
- command = 'systemctl stop uacctd.service'
- else:
- command = 'systemctl restart uacctd.service'
+ if not flow_config:
+ _nftables_config([], 'ingress')
+ _nftables_config([], 'egress')
+
+ # Stop flow-accounting daemon and remove configuration file
+ cmd('systemctl stop uacctd.service')
+ if os.path.exists(uacctd_conf_path):
+ os.unlink(uacctd_conf_path)
+ return
- # run command to start or stop flow-accounting
- cmd(command, raising=ConfigError, message='Failed to start/stop flow-accounting')
+ # Start/reload flow-accounting daemon
+ cmd(f'systemctl restart uacctd.service')
# configure nftables rules for defined interfaces
- if config['interfaces']:
- _nftables_config(config['interfaces'], 'ingress')
+ if 'interface' in flow_config:
+ _nftables_config(flow_config['interface'], 'ingress', flow_config['packet_length'])
# configure egress the same way if configured otherwise remove it
- if config['enable-egress']:
- _nftables_config(config['interfaces'], 'egress')
+ if 'enable_egress' in flow_config:
+ _nftables_config(flow_config['interface'], 'egress', flow_config['packet_length'])
else:
_nftables_config([], 'egress')
- else:
- _nftables_config([], 'ingress')
- _nftables_config([], 'egress')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/http-api.py b/src/conf_mode/http-api.py
index 4bfcbeb47..b5f5e919f 100755
--- a/src/conf_mode/http-api.py
+++ b/src/conf_mode/http-api.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019 VyOS maintainers and contributors
+# Copyright (C) 2019-2021 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -13,25 +13,26 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
import sys
import os
import json
-import time
+
+from time import sleep
from copy import deepcopy
import vyos.defaults
+
from vyos.config import Config
-from vyos import ConfigError
+from vyos.template import render
from vyos.util import cmd
from vyos.util import call
-
+from vyos import ConfigError
from vyos import airbag
airbag.enable()
-config_file = '/etc/vyos/http-api.conf'
+api_conf_file = '/etc/vyos/http-api.conf'
+systemd_service = '/run/systemd/system/vyos-http-api.service'
vyos_conf_scripts_dir=vyos.defaults.directories['conf_mode']
@@ -49,21 +50,35 @@ def get_config(config=None):
else:
conf = Config()
- if not conf.exists('service https api'):
+ base = ['service', 'https', 'api']
+ if not conf.exists(base):
return None
- else:
- conf.set_level('service https api')
+ # Do we run inside a VRF context?
+ vrf_path = ['service', 'https', 'vrf']
+ if conf.exists(vrf_path):
+ http_api['vrf'] = conf.return_value(vrf_path)
+
+ conf.set_level('service https api')
if conf.exists('strict'):
- http_api['strict'] = 'true'
+ http_api['strict'] = True
if conf.exists('debug'):
- http_api['debug'] = 'true'
+ http_api['debug'] = True
+
+ if conf.exists('socket'):
+ http_api['socket'] = True
if conf.exists('port'):
port = conf.return_value('port')
http_api['port'] = port
+ if conf.exists('cors'):
+ http_api['cors'] = {}
+ if conf.exists('cors allow-origin'):
+ origins = conf.return_values('cors allow-origin')
+ http_api['cors']['origins'] = origins[:]
+
if conf.exists('keys'):
for name in conf.list_nodes('keys id'):
if conf.exists('keys id {0} key'.format(name)):
@@ -83,24 +98,31 @@ def verify(http_api):
def generate(http_api):
if http_api is None:
+ if os.path.exists(systemd_service):
+ os.unlink(systemd_service)
return None
if not os.path.exists('/etc/vyos'):
os.mkdir('/etc/vyos')
- with open(config_file, 'w') as f:
+ with open(api_conf_file, 'w') as f:
json.dump(http_api, f, indent=2)
+ render(systemd_service, 'https/vyos-http-api.service.tmpl', http_api)
return None
def apply(http_api):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+ service_name = 'vyos-http-api.service'
+
if http_api is not None:
- call('systemctl restart vyos-http-api.service')
+ call(f'systemctl restart {service_name}')
else:
- call('systemctl stop vyos-http-api.service')
+ call(f'systemctl stop {service_name}')
# Let uvicorn settle before restarting Nginx
- time.sleep(1)
+ sleep(1)
cmd(f'{vyos_conf_scripts_dir}/https.py', raising=ConfigError)
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
index 92dc4a410..37fa36797 100755
--- a/src/conf_mode/https.py
+++ b/src/conf_mode/https.py
@@ -23,6 +23,7 @@ import vyos.defaults
import vyos.certbot_util
from vyos.config import Config
+from vyos.configverify import verify_vrf
from vyos import ConfigError
from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
@@ -34,6 +35,7 @@ from vyos import airbag
airbag.enable()
config_file = '/etc/nginx/sites-available/default'
+systemd_override = r'/etc/systemd/system/nginx.service.d/override.conf'
cert_dir = '/etc/ssl/certs'
key_dir = '/etc/ssl/private'
certbot_dir = vyos.defaults.directories['certbot']
@@ -59,10 +61,11 @@ def get_config(config=None):
else:
conf = Config()
- if not conf.exists('service https'):
+ base = ['service', 'https']
+ if not conf.exists(base):
return None
- https = conf.get_config_dict('service https', get_first_key=True)
+ https = conf.get_config_dict(base, get_first_key=True)
if https:
https['pki'] = conf.get_config_dict(['pki'], key_mangling=('-', '_'),
@@ -103,6 +106,8 @@ def verify(https):
if not domains_found:
raise ConfigError("At least one 'virtual-host <id> server-name' "
"matching the 'certbot domain-name' is required.")
+
+ verify_vrf(https)
return None
def generate(https):
@@ -143,7 +148,6 @@ def generate(https):
server_cert = str(wrap_certificate(pki_cert['certificate']))
if 'ca-certificate' in cert_dict:
ca_cert = cert_dict['ca-certificate']
- print(ca_cert)
server_cert += '\n' + str(wrap_certificate(https['pki']['ca'][ca_cert]['certificate']))
write_file(cert_path, server_cert)
@@ -188,6 +192,8 @@ def generate(https):
vhosts = https.get('api-restrict', {}).get('virtual-host', [])
if vhosts:
api_data['vhost'] = vhosts[:]
+ if 'socket' in list(api_settings):
+ api_data['socket'] = True
if api_data:
vhost_list = api_data.get('vhost', [])
@@ -209,10 +215,12 @@ def generate(https):
}
render(config_file, 'https/nginx.default.tmpl', data)
-
+ render(systemd_override, 'https/override.conf.tmpl', https)
return None
def apply(https):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
if https is not None:
call('systemctl restart nginx.service')
else:
diff --git a/src/conf_mode/interfaces-vxlan.py b/src/conf_mode/interfaces-vxlan.py
index 804f2d14f..1f097c4e3 100755
--- a/src/conf_mode/interfaces-vxlan.py
+++ b/src/conf_mode/interfaces-vxlan.py
@@ -44,6 +44,20 @@ def get_config(config=None):
base = ['interfaces', 'vxlan']
vxlan = get_interface_dict(conf, base)
+ # We need to verify that no other VXLAN tunnel is configured when external
+ # mode is in use - Linux Kernel limitation
+ conf.set_level(base)
+ vxlan['other_tunnels'] = conf.get_config_dict([], key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # This if-clause is just to be sure - it will always evaluate to true
+ ifname = vxlan['ifname']
+ if ifname in vxlan['other_tunnels']:
+ del vxlan['other_tunnels'][ifname]
+ if len(vxlan['other_tunnels']) == 0:
+ del vxlan['other_tunnels']
+
return vxlan
def verify(vxlan):
@@ -63,8 +77,21 @@ def verify(vxlan):
if not any(tmp in ['group', 'remote', 'source_address'] for tmp in vxlan):
raise ConfigError('Group, remote or source-address must be configured')
- if 'vni' not in vxlan:
- raise ConfigError('Must configure VNI for VXLAN')
+ if 'vni' not in vxlan and 'external' not in vxlan:
+ raise ConfigError(
+ 'Must either configure VXLAN "vni" or use "external" CLI option!')
+
+ if {'external', 'vni'} <= set(vxlan):
+ raise ConfigError('Can not specify both "external" and "VNI"!')
+
+ if {'external', 'other_tunnels'} <= set(vxlan):
+ other_tunnels = ', '.join(vxlan['other_tunnels'])
+ raise ConfigError(f'Only one VXLAN tunnel is supported when "external" '\
+ f'CLI option is used. Additional tunnels: {other_tunnels}')
+
+ if 'gpe' in vxlan and 'external' not in vxlan:
+ raise ConfigError(f'VXLAN-GPE is only supported when "external" '\
+ f'CLI option is used.')
if 'source_interface' in vxlan:
# VXLAN adds at least an overhead of 50 byte - we need to check the
diff --git a/src/conf_mode/interfaces-wwan.py b/src/conf_mode/interfaces-wwan.py
index f013e5411..a4b033374 100755
--- a/src/conf_mode/interfaces-wwan.py
+++ b/src/conf_mode/interfaces-wwan.py
@@ -17,6 +17,7 @@
import os
from sys import exit
+from time import sleep
from vyos.config import Config
from vyos.configdict import get_interface_dict
@@ -28,10 +29,15 @@ from vyos.util import cmd
from vyos.util import call
from vyos.util import dict_search
from vyos.util import DEVNULL
+from vyos.util import is_systemd_service_active
+from vyos.util import write_file
from vyos import ConfigError
from vyos import airbag
airbag.enable()
+service_name = 'ModemManager.service'
+cron_script = '/etc/cron.d/wwan'
+
def get_config(config=None):
"""
Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
@@ -44,6 +50,20 @@ def get_config(config=None):
base = ['interfaces', 'wwan']
wwan = get_interface_dict(conf, base)
+ # We need to know the amount of other WWAN interfaces as ModemManager needs
+ # to be started or stopped.
+ conf.set_level(base)
+ wwan['other_interfaces'] = conf.get_config_dict([], key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # This if-clause is just to be sure - it will always evaluate to true
+ ifname = wwan['ifname']
+ if ifname in wwan['other_interfaces']:
+ del wwan['other_interfaces'][ifname]
+ if len(wwan['other_interfaces']) == 0:
+ del wwan['other_interfaces']
+
return wwan
def verify(wwan):
@@ -61,9 +81,26 @@ def verify(wwan):
return None
def generate(wwan):
+ if 'deleted' in wwan:
+ return None
+
+ if not os.path.exists(cron_script):
+ write_file(cron_script, '*/5 * * * * root /usr/libexec/vyos/vyos-check-wwan.py')
return None
def apply(wwan):
+ if not is_systemd_service_active(service_name):
+ cmd(f'systemctl start {service_name}')
+
+ counter = 100
+ # Wait until a modem is detected and then we can continue
+ while counter > 0:
+ counter -= 1
+ tmp = cmd('mmcli -L')
+ if tmp != 'No modems were found':
+ break
+ sleep(0.250)
+
# we only need the modem number. wwan0 -> 0, wwan1 -> 1
modem = wwan['ifname'].lstrip('wwan')
base_cmd = f'mmcli --modem {modem}'
@@ -73,6 +110,15 @@ def apply(wwan):
w = WWANIf(wwan['ifname'])
if 'deleted' in wwan or 'disable' in wwan:
w.remove()
+
+ # There are no other WWAN interfaces - stop the daemon
+ if 'other_interfaces' not in wwan:
+ cmd(f'systemctl stop {service_name}')
+ # Clean CRON helper script which is used for to re-connect when
+ # RF signal is lost
+ if os.path.exists(cron_script):
+ os.unlink(cron_script)
+
return None
ip_type = 'ipv4'
@@ -93,6 +139,9 @@ def apply(wwan):
call(command, stdout=DEVNULL)
w.update(wwan)
+ if 'other_interfaces' not in wwan and 'deleted' in wwan:
+ cmd(f'systemctl start {service_name}')
+
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 62fb9abad..96f8f6fb6 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -42,7 +42,7 @@ if LooseVersion(kernel_version()) > LooseVersion('5.1'):
else:
k_mod = ['nft_nat', 'nft_chain_nat_ipv4']
-iptables_nat_config = '/tmp/vyos-nat-rules.nft'
+nftables_nat_config = '/tmp/vyos-nat-rules.nft'
def get_handler(json, chain, target):
""" Get nftable rule handler number of given chain/target combination.
@@ -179,14 +179,14 @@ def verify(nat):
return None
def generate(nat):
- render(iptables_nat_config, 'firewall/nftables-nat.tmpl', nat,
+ render(nftables_nat_config, 'firewall/nftables-nat.tmpl', nat,
permission=0o755)
return None
def apply(nat):
- cmd(f'{iptables_nat_config}')
- if os.path.isfile(iptables_nat_config):
- os.unlink(iptables_nat_config)
+ cmd(f'{nftables_nat_config}')
+ if os.path.isfile(nftables_nat_config):
+ os.unlink(nftables_nat_config)
return None
diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py
index d0b6d27ac..8bf2e8073 100755
--- a/src/conf_mode/nat66.py
+++ b/src/conf_mode/nat66.py
@@ -35,7 +35,7 @@ airbag.enable()
k_mod = ['nft_nat', 'nft_chain_nat']
-iptables_nat_config = '/tmp/vyos-nat66-rules.nft'
+nftables_nat66_config = '/tmp/vyos-nat66-rules.nft'
ndppd_config = '/run/ndppd/ndppd.conf'
def get_handler(json, chain, target):
@@ -145,22 +145,22 @@ def verify(nat):
return None
def generate(nat):
- render(iptables_nat_config, 'firewall/nftables-nat66.tmpl', nat, permission=0o755)
+ render(nftables_nat66_config, 'firewall/nftables-nat66.tmpl', nat, permission=0o755)
render(ndppd_config, 'ndppd/ndppd.conf.tmpl', nat, permission=0o755)
return None
def apply(nat):
if not nat:
return None
- cmd(f'{iptables_nat_config}')
+ cmd(f'{nftables_nat66_config}')
if 'deleted' in nat or not dict_search('source.rule', nat):
cmd('systemctl stop ndppd')
if os.path.isfile(ndppd_config):
os.unlink(ndppd_config)
else:
cmd('systemctl restart ndppd')
- if os.path.isfile(iptables_nat_config):
- os.unlink(iptables_nat_config)
+ if os.path.isfile(nftables_nat66_config):
+ os.unlink(nftables_nat66_config)
return None
diff --git a/src/conf_mode/netns.py b/src/conf_mode/netns.py
new file mode 100755
index 000000000..0924eb616
--- /dev/null
+++ b/src/conf_mode/netns.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+from tempfile import NamedTemporaryFile
+
+from vyos.config import Config
+from vyos.configdict import node_changed
+from vyos.ifconfig import Interface
+from vyos.util import call
+from vyos.util import dict_search
+from vyos.util import get_interface_config
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+
+def netns_interfaces(c, match):
+ """
+ get NETNS bound interfaces
+ """
+ matched = []
+ old_level = c.get_level()
+ c.set_level(['interfaces'])
+ section = c.get_config_dict([], get_first_key=True)
+ for type in section:
+ interfaces = section[type]
+ for name in interfaces:
+ interface = interfaces[name]
+ if 'netns' in interface:
+ v = interface.get('netns', '')
+ if v == match:
+ matched.append(name)
+
+ c.set_level(old_level)
+ return matched
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['netns']
+ netns = conf.get_config_dict(base, get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # determine which NETNS has been removed
+ for name in node_changed(conf, base + ['name']):
+ if 'netns_remove' not in netns:
+ netns.update({'netns_remove' : {}})
+
+ netns['netns_remove'][name] = {}
+ # get NETNS bound interfaces
+ interfaces = netns_interfaces(conf, name)
+ if interfaces: netns['netns_remove'][name]['interface'] = interfaces
+
+ return netns
+
+def verify(netns):
+ # ensure NETNS is not assigned to any interface
+ if 'netns_remove' in netns:
+ for name, config in netns['netns_remove'].items():
+ if 'interface' in config:
+ raise ConfigError(f'Can not remove NETNS "{name}", it still has '\
+ f'member interfaces!')
+
+ if 'name' in netns:
+ for name, config in netns['name'].items():
+ print(name)
+
+ return None
+
+
+def generate(netns):
+ if not netns:
+ return None
+
+ return None
+
+
+def apply(netns):
+
+ for tmp in (dict_search('netns_remove', netns) or []):
+ if os.path.isfile(f'/run/netns/{tmp}'):
+ call(f'ip netns del {tmp}')
+
+ if 'name' in netns:
+ for name, config in netns['name'].items():
+ if not os.path.isfile(f'/run/netns/{name}'):
+ call(f'ip netns add {name}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py
index 6981d0db1..4ebc0989c 100755
--- a/src/conf_mode/protocols_bfd.py
+++ b/src/conf_mode/protocols_bfd.py
@@ -35,7 +35,8 @@ def get_config(config=None):
conf = Config()
base = ['protocols', 'bfd']
bfd = conf.get_config_dict(base, key_mangling=('-', '_'),
- get_first_key=True)
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
# Bail out early if configuration tree does not exist
if not conf.exists(base):
return bfd
@@ -78,11 +79,16 @@ def verify(bfd):
# multihop and echo-mode cannot be used together
if 'echo_mode' in peer_config:
- raise ConfigError('Multihop and echo-mode cannot be used together')
+ raise ConfigError('BFD multihop and echo-mode cannot be used together')
# multihop doesn't accept interface names
if 'source' in peer_config and 'interface' in peer_config['source']:
- raise ConfigError('Multihop and source interface cannot be used together')
+ raise ConfigError('BFD multihop and source interface cannot be used together')
+
+ if 'profile' in peer_config:
+ profile_name = peer_config['profile']
+ if 'profile' not in bfd or profile_name not in bfd['profile']:
+ raise ConfigError(f'BFD profile "{profile_name}" does not exist!')
if 'vrf' in peer_config:
verify_vrf(peer_config)
diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py
index 03fb17ba7..d8704727c 100755
--- a/src/conf_mode/protocols_bgp.py
+++ b/src/conf_mode/protocols_bgp.py
@@ -183,6 +183,28 @@ def verify(bgp):
raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!')
afi_config = peer_config['address_family'][afi]
+
+ if 'conditionally_advertise' in afi_config:
+ if 'advertise_map' not in afi_config['conditionally_advertise']:
+ raise ConfigError('Must speficy advertise-map when conditionally-advertise is in use!')
+ # Verify advertise-map (which is a route-map) exists
+ verify_route_map(afi_config['conditionally_advertise']['advertise_map'], bgp)
+
+ if ('exist_map' not in afi_config['conditionally_advertise'] and
+ 'non_exist_map' not in afi_config['conditionally_advertise']):
+ raise ConfigError('Must either speficy exist-map or non-exist-map when ' \
+ 'conditionally-advertise is in use!')
+
+ if {'exist_map', 'non_exist_map'} <= set(afi_config['conditionally_advertise']):
+ raise ConfigError('Can not specify both exist-map and non-exist-map for ' \
+ 'conditionally-advertise!')
+
+ if 'exist_map' in afi_config['conditionally_advertise']:
+ verify_route_map(afi_config['conditionally_advertise']['exist_map'], bgp)
+
+ if 'non_exist_map' in afi_config['conditionally_advertise']:
+ verify_route_map(afi_config['conditionally_advertise']['non_exist_map'], bgp)
+
# Validate if configured Prefix list exists
if 'prefix_list' in afi_config:
for tmp in ['import', 'export']:
diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py
index 3b27608da..0b0c7d07b 100755
--- a/src/conf_mode/protocols_mpls.py
+++ b/src/conf_mode/protocols_mpls.py
@@ -66,36 +66,24 @@ def verify(mpls):
def generate(mpls):
# If there's no MPLS config generated, create dictionary key with no value.
- if not mpls:
- mpls['new_frr_config'] = ''
+ if not mpls or 'deleted' in mpls:
return None
- mpls['new_frr_config'] = render_to_string('frr/ldpd.frr.tmpl', mpls)
+ mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.tmpl', mpls)
return None
def apply(mpls):
- # Define dictionary that will load FRR config
- frr_cfg = {}
+ ldpd_damon = 'ldpd'
+
# Save original configuration prior to starting any commit actions
- frr_cfg['original_config'] = frr.get_configuration(daemon='ldpd')
- frr_cfg['modified_config'] = frr.replace_section(frr_cfg['original_config'], mpls['new_frr_config'], from_re='mpls.*')
-
- # If FRR config is blank, rerun the blank commit three times due to frr-reload
- # behavior/bug not properly clearing out on one commit.
- if mpls['new_frr_config'] == '':
- for x in range(3):
- frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd')
- elif not 'ldp' in mpls:
- for x in range(3):
- frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd')
- else:
- # FRR mark configuration will test for syntax errors and throws an
- # exception if any syntax errors is detected
- frr.mark_configuration(frr_cfg['modified_config'])
+ frr_cfg = frr.FRRConfig()
+
+ frr_cfg.load_configuration(ldpd_damon)
+ frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True)
- # Commit resulting configuration to FRR, this will throw CommitError
- # on failure
- frr.reload_configuration(frr_cfg['modified_config'], daemon='ldpd')
+ if 'frr_ldpd_config' in mpls:
+ frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config'])
+ frr_cfg.commit_configuration(ldpd_damon)
# Set number of entries in the platform label tables
labels = '0'
@@ -122,7 +110,7 @@ def apply(mpls):
system_interfaces = []
# Populate system interfaces list with local MPLS capable interfaces
for interface in glob('/proc/sys/net/mpls/conf/*'):
- system_interfaces.append(os.path.basename(interface))
+ system_interfaces.append(os.path.basename(interface))
# This is where the comparison is done on if an interface needs to be enabled/disabled.
for system_interface in system_interfaces:
interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input')
@@ -138,7 +126,7 @@ def apply(mpls):
system_interfaces = []
# If MPLS interfaces are not configured, set MPLS processing disabled
for interface in glob('/proc/sys/net/mpls/conf/*'):
- system_interfaces.append(os.path.basename(interface))
+ system_interfaces.append(os.path.basename(interface))
for system_interface in system_interfaces:
system_interface = system_interface.replace('.', '/')
call(f'sysctl -wq net.mpls.conf.{system_interface}.input=0')
diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py
index d0460b830..f8e733ba5 100755
--- a/src/conf_mode/protocols_ospfv3.py
+++ b/src/conf_mode/protocols_ospfv3.py
@@ -23,8 +23,11 @@ from vyos.config import Config
from vyos.configdict import dict_merge
from vyos.configdict import node_changed
from vyos.configverify import verify_common_route_maps
+from vyos.configverify import verify_route_map
+from vyos.configverify import verify_interface_exists
from vyos.template import render_to_string
from vyos.ifconfig import Interface
+from vyos.util import dict_search
from vyos.util import get_interface_config
from vyos.xml import defaults
from vyos import ConfigError
@@ -66,6 +69,28 @@ def get_config(config=None):
ospfv3.update({'deleted' : ''})
return ospfv3
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ # XXX: Note that we can not call defaults(base), as defaults does not work
+ # on an instance of a tag node. As we use the exact same CLI definition for
+ # both the non-vrf and vrf version this is absolutely safe!
+ default_values = defaults(base_path)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: default-information
+ # originate comes with a default metric-type of 2, which will enable the
+ # entire default-information originate tree, even when not set via CLI so we
+ # need to check this first and probably drop that key.
+ if dict_search('default_information.originate', ospfv3) is None:
+ del default_values['default_information']
+
+ # XXX: T2665: we currently have no nice way for defaults under tag nodes,
+ # clean them out and add them manually :(
+ del default_values['interface']
+
+ # merge in remaining default values
+ ospfv3 = dict_merge(default_values, ospfv3)
+
# We also need some additional information from the config, prefix-lists
# and route-maps for instance. They will be used in verify().
#
@@ -83,8 +108,19 @@ def verify(ospfv3):
verify_common_route_maps(ospfv3)
+ # As we can have a default-information route-map, we need to validate it!
+ route_map_name = dict_search('default_information.originate.route_map', ospfv3)
+ if route_map_name: verify_route_map(route_map_name, ospfv3)
+
+ if 'area' in ospfv3:
+ for area, area_config in ospfv3['area'].items():
+ if 'area_type' in area_config:
+ if len(area_config['area_type']) > 1:
+ raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!')
+
if 'interface' in ospfv3:
for interface, interface_config in ospfv3['interface'].items():
+ verify_interface_exists(interface)
if 'ifmtu' in interface_config:
mtu = Interface(interface).get_mtu()
if int(interface_config['ifmtu']) > int(mtu):
diff --git a/src/conf_mode/service_monitoring_telegraf.py b/src/conf_mode/service_monitoring_telegraf.py
new file mode 100755
index 000000000..a1e7a7286
--- /dev/null
+++ b/src/conf_mode/service_monitoring_telegraf.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+
+from sys import exit
+from shutil import rmtree
+
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.template import render
+from vyos.util import call
+from vyos.util import chown
+from vyos.util import cmd
+from vyos.xml import defaults
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+
+base_dir = '/run/telegraf'
+cache_dir = f'/etc/telegraf/.cache'
+config_telegraf = f'{base_dir}/vyos-telegraf.conf'
+custom_scripts_dir = '/etc/telegraf/custom_scripts'
+syslog_telegraf = '/etc/rsyslog.d/50-telegraf.conf'
+systemd_telegraf_service = '/etc/systemd/system/vyos-telegraf.service'
+systemd_telegraf_override_dir = '/etc/systemd/system/vyos-telegraf.service.d'
+systemd_override = f'{systemd_telegraf_override_dir}/10-override.conf'
+
+
+def get_nft_filter_chains():
+ """
+ Get nft chains for table filter
+ """
+ nft = cmd('nft --json list table ip filter')
+ nft = json.loads(nft)
+ chain_list = []
+
+ for output in nft['nftables']:
+ if 'chain' in output:
+ chain = output['chain']['name']
+ chain_list.append(chain)
+
+ return chain_list
+
+def get_config(config=None):
+
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['service', 'monitoring', 'telegraf']
+ if not conf.exists(base):
+ return None
+
+ monitoring = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = defaults(base)
+ monitoring = dict_merge(default_values, monitoring)
+
+ monitoring['nft_chains'] = get_nft_filter_chains()
+ monitoring['custom_scripts_dir'] = custom_scripts_dir
+
+ return monitoring
+
+def verify(monitoring):
+ # bail out early - looks like removal from running config
+ if not monitoring:
+ return None
+
+ if 'authentication' not in monitoring or \
+ 'organization' not in monitoring['authentication'] or \
+ 'token' not in monitoring['authentication']:
+ raise ConfigError(f'Authentication "organization and token" are mandatory!')
+
+ if 'url' not in monitoring:
+ raise ConfigError(f'Monitoring "url" is mandatory!')
+
+ return None
+
+def generate(monitoring):
+ if not monitoring:
+ # Delete config and systemd files
+ config_files = [config_telegraf, systemd_telegraf_service, systemd_override, syslog_telegraf]
+ for file in config_files:
+ if os.path.isfile(file):
+ os.unlink(file)
+
+ # Delete old directories
+ if os.path.isdir(cache_dir):
+ rmtree(cache_dir, ignore_errors=True)
+
+ return None
+
+ # Create telegraf cache dir
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+
+ chown(cache_dir, 'telegraf', 'telegraf')
+
+ # Create systemd override dir
+ if not os.path.exists(systemd_telegraf_override_dir):
+ os.mkdir(systemd_telegraf_override_dir)
+
+ # Create custome scripts dir
+ if not os.path.exists(custom_scripts_dir):
+ os.mkdir(custom_scripts_dir)
+
+ # Render telegraf configuration and systemd override
+ render(config_telegraf, 'monitoring/telegraf.tmpl', monitoring)
+ render(systemd_telegraf_service, 'monitoring/systemd_vyos_telegraf_service.tmpl', monitoring)
+ render(systemd_override, 'monitoring/override.conf.tmpl', monitoring, permission=0o640)
+ render(syslog_telegraf, 'monitoring/syslog_telegraf.tmpl', monitoring)
+
+ chown(base_dir, 'telegraf', 'telegraf')
+
+ return None
+
+def apply(monitoring):
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+ if monitoring:
+ call('systemctl restart vyos-telegraf.service')
+ else:
+ call('systemctl stop vyos-telegraf.service')
+ # Telegraf include custom rsyslog config changes
+ call('systemctl restart rsyslog')
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py
index 9fbd531da..1f31d132d 100755
--- a/src/conf_mode/service_pppoe-server.py
+++ b/src/conf_mode/service_pppoe-server.py
@@ -24,8 +24,11 @@ from vyos.configverify import verify_accel_ppp_base_service
from vyos.template import render
from vyos.util import call
from vyos.util import dict_search
+from vyos.util import get_interface_config
from vyos import ConfigError
from vyos import airbag
+from vyos.range_regex import range_to_regex
+
airbag.enable()
pppoe_conf = r'/run/accel-pppd/pppoe.conf'
@@ -56,6 +59,11 @@ def verify(pppoe):
if 'interface' not in pppoe:
raise ConfigError('At least one listen interface must be defined!')
+ # Check is interface exists in the system
+ for iface in pppoe['interface']:
+ if not get_interface_config(iface):
+ raise ConfigError(f'Interface {iface} does not exist!')
+
# local ippool and gateway settings config checks
if not (dict_search('client_ip_pool.subnet', pppoe) or
(dict_search('client_ip_pool.start', pppoe) and
@@ -73,6 +81,13 @@ def generate(pppoe):
if not pppoe:
return None
+ # Generate special regex for dynamic interfaces
+ for iface in pppoe['interface']:
+ if 'vlan_range' in pppoe['interface'][iface]:
+ pppoe['interface'][iface]['regex'] = []
+ for vlan_range in pppoe['interface'][iface]['vlan_range']:
+ pppoe['interface'][iface]['regex'].append(range_to_regex(vlan_range))
+
render(pppoe_conf, 'accel-ppp/pppoe.config.tmpl', pppoe)
if dict_search('authentication.mode', pppoe) == 'local':
diff --git a/src/conf_mode/snmp.py b/src/conf_mode/snmp.py
index e1852f2ce..8ce48780b 100755
--- a/src/conf_mode/snmp.py
+++ b/src/conf_mode/snmp.py
@@ -19,16 +19,18 @@ import os
from sys import exit
from vyos.config import Config
+from vyos.configdict import dict_merge
from vyos.configverify import verify_vrf
from vyos.snmpv3_hashgen import plaintext_to_md5
from vyos.snmpv3_hashgen import plaintext_to_sha1
from vyos.snmpv3_hashgen import random
from vyos.template import render
-from vyos.template import is_ipv4
from vyos.util import call
from vyos.util import chmod_755
+from vyos.util import dict_search
from vyos.validate import is_addr_assigned
from vyos.version import get_version_data
+from vyos.xml import defaults
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -37,57 +39,29 @@ config_file_client = r'/etc/snmp/snmp.conf'
config_file_daemon = r'/etc/snmp/snmpd.conf'
config_file_access = r'/usr/share/snmp/snmpd.conf'
config_file_user = r'/var/lib/snmp/snmpd.conf'
-default_script_dir = r'/config/user-data/'
systemd_override = r'/etc/systemd/system/snmpd.service.d/override.conf'
+systemd_service = 'snmpd.service'
-# SNMP OIDs used to mark auth/priv type
-OIDs = {
- 'md5' : '.1.3.6.1.6.3.10.1.1.2',
- 'sha' : '.1.3.6.1.6.3.10.1.1.3',
- 'aes' : '.1.3.6.1.6.3.10.1.2.4',
- 'des' : '.1.3.6.1.6.3.10.1.2.2',
- 'none': '.1.3.6.1.6.3.10.1.2.1'
-}
-
-default_config_data = {
- 'listen_on': [],
- 'listen_address': [],
- 'ipv6_enabled': 'True',
- 'communities': [],
- 'smux_peers': [],
- 'location' : '',
- 'protocol' : 'udp',
- 'description' : '',
- 'contact' : '',
- 'route_table': 'False',
- 'trap_source': '',
- 'trap_targets': [],
- 'vyos_user': '',
- 'vyos_user_pass': '',
- 'version': '',
- 'v3_enabled': 'False',
- 'v3_engineid': '',
- 'v3_groups': [],
- 'v3_traps': [],
- 'v3_users': [],
- 'v3_views': [],
- 'script_ext': []
-}
-
-def rmfile(file):
- if os.path.isfile(file):
- os.unlink(file)
-
-def get_config():
- snmp = default_config_data
- conf = Config()
- if not conf.exists('service snmp'):
- return None
+def get_config(config=None):
+ if config:
+ conf = config
else:
- if conf.exists('system ipv6 disable'):
- snmp['ipv6_enabled'] = False
+ conf = Config()
+ base = ['service', 'snmp']
+
+ snmp = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, no_tag_node_value_mangle=True)
+ if not conf.exists(base):
+ snmp.update({'deleted' : ''})
+
+ if conf.exists(['service', 'lldp', 'snmp', 'enable']):
+ snmp.update({'lldp_snmp' : ''})
- conf.set_level('service snmp')
+ if conf.exists(['system', 'ipv6', 'disable']):
+ snmp.update({'ipv6_disabled' : ''})
+
+ if 'deleted' in snmp:
+ return snmp
version_data = get_version_data()
snmp['version'] = version_data['version']
@@ -96,466 +70,207 @@ def get_config():
snmp['vyos_user'] = 'vyos' + random(8)
snmp['vyos_user_pass'] = random(16)
- if conf.exists('community'):
- for name in conf.list_nodes('community'):
- community = {
- 'name': name,
- 'authorization': 'ro',
- 'network_v4': [],
- 'network_v6': [],
- 'has_source' : False
- }
-
- if conf.exists('community {0} authorization'.format(name)):
- community['authorization'] = conf.return_value('community {0} authorization'.format(name))
-
- # Subnet of SNMP client(s) allowed to contact system
- if conf.exists('community {0} network'.format(name)):
- for addr in conf.return_values('community {0} network'.format(name)):
- if is_ipv4(addr):
- community['network_v4'].append(addr)
- else:
- community['network_v6'].append(addr)
-
- # IP address of SNMP client allowed to contact system
- if conf.exists('community {0} client'.format(name)):
- for addr in conf.return_values('community {0} client'.format(name)):
- if is_ipv4(addr):
- community['network_v4'].append(addr)
- else:
- community['network_v6'].append(addr)
-
- if (len(community['network_v4']) > 0) or (len(community['network_v6']) > 0):
- community['has_source'] = True
-
- snmp['communities'].append(community)
-
- if conf.exists('contact'):
- snmp['contact'] = conf.return_value('contact')
-
- if conf.exists('description'):
- snmp['description'] = conf.return_value('description')
-
- if conf.exists('listen-address'):
- for addr in conf.list_nodes('listen-address'):
- port = '161'
- if conf.exists('listen-address {0} port'.format(addr)):
- port = conf.return_value('listen-address {0} port'.format(addr))
-
- snmp['listen_address'].append((addr, port))
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = defaults(base)
+
+ # We can not merge defaults for tagNodes - those need to be blended in
+ # per tagNode instance
+ if 'listen_address' in default_values:
+ del default_values['listen_address']
+ if 'community' in default_values:
+ del default_values['community']
+ if 'trap_target' in default_values:
+ del default_values['trap_target']
+ if 'v3' in default_values:
+ del default_values['v3']
+ snmp = dict_merge(default_values, snmp)
+
+ if 'listen_address' in snmp:
+ default_values = defaults(base + ['listen-address'])
+ for address in snmp['listen_address']:
+ snmp['listen_address'][address] = dict_merge(
+ default_values, snmp['listen_address'][address])
# Always listen on localhost if an explicit address has been configured
# This is a safety measure to not end up with invalid listen addresses
# that are not configured on this system. See https://phabricator.vyos.net/T850
- if not '127.0.0.1' in conf.list_nodes('listen-address'):
- snmp['listen_address'].append(('127.0.0.1', '161'))
-
- if not '::1' in conf.list_nodes('listen-address'):
- snmp['listen_address'].append(('::1', '161'))
-
- if conf.exists('location'):
- snmp['location'] = conf.return_value('location')
-
- if conf.exists('protocol'):
- snmp['protocol'] = conf.return_value('protocol')
-
- if conf.exists('smux-peer'):
- snmp['smux_peers'] = conf.return_values('smux-peer')
-
- if conf.exists('trap-source'):
- snmp['trap_source'] = conf.return_value('trap-source')
-
- if conf.exists('trap-target'):
- for target in conf.list_nodes('trap-target'):
- trap_tgt = {
- 'target': target,
- 'community': '',
- 'port': ''
- }
-
- if conf.exists('trap-target {0} community'.format(target)):
- trap_tgt['community'] = conf.return_value('trap-target {0} community'.format(target))
-
- if conf.exists('trap-target {0} port'.format(target)):
- trap_tgt['port'] = conf.return_value('trap-target {0} port'.format(target))
-
- snmp['trap_targets'].append(trap_tgt)
-
- if conf.exists('script-extensions'):
- for extname in conf.list_nodes('script-extensions extension-name'):
- conf_script = conf.return_value('script-extensions extension-name {} script'.format(extname))
- # if script has not absolute path, use pre configured path
- if "/" not in conf_script:
- conf_script = default_script_dir + conf_script
-
- extension = {
- 'name': extname,
- 'script' : conf_script
- }
-
- snmp['script_ext'].append(extension)
-
- if conf.exists('oid-enable route-table'):
- snmp['route_table'] = True
-
- if conf.exists('vrf'):
- # Append key to dict but don't place it in the default dictionary.
- # This is required to make the override.conf.tmpl work until we
- # migrate to get_config_dict().
- snmp['vrf'] = conf.return_value('vrf')
-
-
- #########################################################################
- # ____ _ _ __ __ ____ _____ #
- # / ___|| \ | | \/ | _ \ __ _|___ / #
- # \___ \| \| | |\/| | |_) | \ \ / / |_ \ #
- # ___) | |\ | | | | __/ \ V / ___) | #
- # |____/|_| \_|_| |_|_| \_/ |____/ #
- # #
- # now take care about the fancy SNMP v3 stuff, or bail out eraly #
- #########################################################################
- if not conf.exists('v3'):
- return snmp
- else:
- snmp['v3_enabled'] = True
-
- # 'set service snmp v3 engineid'
- if conf.exists('v3 engineid'):
- snmp['v3_engineid'] = conf.return_value('v3 engineid')
-
- # 'set service snmp v3 group'
- if conf.exists('v3 group'):
- for group in conf.list_nodes('v3 group'):
- v3_group = {
- 'name': group,
- 'mode': 'ro',
- 'seclevel': 'auth',
- 'view': ''
- }
-
- if conf.exists('v3 group {0} mode'.format(group)):
- v3_group['mode'] = conf.return_value('v3 group {0} mode'.format(group))
-
- if conf.exists('v3 group {0} seclevel'.format(group)):
- v3_group['seclevel'] = conf.return_value('v3 group {0} seclevel'.format(group))
-
- if conf.exists('v3 group {0} view'.format(group)):
- v3_group['view'] = conf.return_value('v3 group {0} view'.format(group))
-
- snmp['v3_groups'].append(v3_group)
-
- # 'set service snmp v3 trap-target'
- if conf.exists('v3 trap-target'):
- for trap in conf.list_nodes('v3 trap-target'):
- trap_cfg = {
- 'ipAddr': trap,
- 'secName': '',
- 'authProtocol': 'md5',
- 'authPassword': '',
- 'authMasterKey': '',
- 'privProtocol': 'des',
- 'privPassword': '',
- 'privMasterKey': '',
- 'ipProto': 'udp',
- 'ipPort': '162',
- 'type': '',
- 'secLevel': 'noAuthNoPriv'
- }
-
- if conf.exists('v3 trap-target {0} user'.format(trap)):
- # Set the securityName used for authenticated SNMPv3 messages.
- trap_cfg['secName'] = conf.return_value('v3 trap-target {0} user'.format(trap))
-
- if conf.exists('v3 trap-target {0} auth type'.format(trap)):
- # Set the authentication protocol (MD5 or SHA) used for authenticated SNMPv3 messages
- # cmdline option '-a'
- trap_cfg['authProtocol'] = conf.return_value('v3 trap-target {0} auth type'.format(trap))
-
- if conf.exists('v3 trap-target {0} auth plaintext-password'.format(trap)):
- # Set the authentication pass phrase used for authenticated SNMPv3 messages.
- # cmdline option '-A'
- trap_cfg['authPassword'] = conf.return_value('v3 trap-target {0} auth plaintext-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} auth encrypted-password'.format(trap)):
- # Sets the keys to be used for SNMPv3 transactions. These options allow you to set the master authentication keys.
- # cmdline option '-3m'
- trap_cfg['authMasterKey'] = conf.return_value('v3 trap-target {0} auth encrypted-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} privacy type'.format(trap)):
- # Set the privacy protocol (DES or AES) used for encrypted SNMPv3 messages.
- # cmdline option '-x'
- trap_cfg['privProtocol'] = conf.return_value('v3 trap-target {0} privacy type'.format(trap))
-
- if conf.exists('v3 trap-target {0} privacy plaintext-password'.format(trap)):
- # Set the privacy pass phrase used for encrypted SNMPv3 messages.
- # cmdline option '-X'
- trap_cfg['privPassword'] = conf.return_value('v3 trap-target {0} privacy plaintext-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} privacy encrypted-password'.format(trap)):
- # Sets the keys to be used for SNMPv3 transactions. These options allow you to set the master encryption keys.
- # cmdline option '-3M'
- trap_cfg['privMasterKey'] = conf.return_value('v3 trap-target {0} privacy encrypted-password'.format(trap))
-
- if conf.exists('v3 trap-target {0} protocol'.format(trap)):
- trap_cfg['ipProto'] = conf.return_value('v3 trap-target {0} protocol'.format(trap))
-
- if conf.exists('v3 trap-target {0} port'.format(trap)):
- trap_cfg['ipPort'] = conf.return_value('v3 trap-target {0} port'.format(trap))
-
- if conf.exists('v3 trap-target {0} type'.format(trap)):
- trap_cfg['type'] = conf.return_value('v3 trap-target {0} type'.format(trap))
-
- # Determine securityLevel used for SNMPv3 messages (noAuthNoPriv|authNoPriv|authPriv).
- # Appropriate pass phrase(s) must provided when using any level higher than noAuthNoPriv.
- if trap_cfg['authPassword'] or trap_cfg['authMasterKey']:
- if trap_cfg['privProtocol'] or trap_cfg['privPassword']:
- trap_cfg['secLevel'] = 'authPriv'
- else:
- trap_cfg['secLevel'] = 'authNoPriv'
-
- snmp['v3_traps'].append(trap_cfg)
-
- # 'set service snmp v3 user'
- if conf.exists('v3 user'):
- for user in conf.list_nodes('v3 user'):
- user_cfg = {
- 'name': user,
- 'authMasterKey': '',
- 'authPassword': '',
- 'authProtocol': 'md5',
- 'authOID': 'none',
- 'group': '',
- 'mode': 'ro',
- 'privMasterKey': '',
- 'privPassword': '',
- 'privOID': '',
- 'privProtocol': 'des'
- }
-
- # v3 user {0} auth
- if conf.exists('v3 user {0} auth encrypted-password'.format(user)):
- user_cfg['authMasterKey'] = conf.return_value('v3 user {0} auth encrypted-password'.format(user))
-
- if conf.exists('v3 user {0} auth plaintext-password'.format(user)):
- user_cfg['authPassword'] = conf.return_value('v3 user {0} auth plaintext-password'.format(user))
-
- # load default value
- type = user_cfg['authProtocol']
- if conf.exists('v3 user {0} auth type'.format(user)):
- type = conf.return_value('v3 user {0} auth type'.format(user))
-
- # (re-)update with either default value or value from CLI
- user_cfg['authProtocol'] = type
- user_cfg['authOID'] = OIDs[type]
-
- # v3 user {0} group
- if conf.exists('v3 user {0} group'.format(user)):
- user_cfg['group'] = conf.return_value('v3 user {0} group'.format(user))
-
- # v3 user {0} mode
- if conf.exists('v3 user {0} mode'.format(user)):
- user_cfg['mode'] = conf.return_value('v3 user {0} mode'.format(user))
-
- # v3 user {0} privacy
- if conf.exists('v3 user {0} privacy encrypted-password'.format(user)):
- user_cfg['privMasterKey'] = conf.return_value('v3 user {0} privacy encrypted-password'.format(user))
-
- if conf.exists('v3 user {0} privacy plaintext-password'.format(user)):
- user_cfg['privPassword'] = conf.return_value('v3 user {0} privacy plaintext-password'.format(user))
-
- # load default value
- type = user_cfg['privProtocol']
- if conf.exists('v3 user {0} privacy type'.format(user)):
- type = conf.return_value('v3 user {0} privacy type'.format(user))
-
- # (re-)update with either default value or value from CLI
- user_cfg['privProtocol'] = type
- user_cfg['privOID'] = OIDs[type]
-
- snmp['v3_users'].append(user_cfg)
-
- # 'set service snmp v3 view'
- if conf.exists('v3 view'):
- for view in conf.list_nodes('v3 view'):
- view_cfg = {
- 'name': view,
- 'oids': []
- }
-
- if conf.exists('v3 view {0} oid'.format(view)):
- for oid in conf.list_nodes('v3 view {0} oid'.format(view)):
- oid_cfg = {
- 'oid': oid
- }
- view_cfg['oids'].append(oid_cfg)
- snmp['v3_views'].append(view_cfg)
+ if '127.0.0.1' not in snmp['listen_address']:
+ tmp = {'127.0.0.1': {'port': '161'}}
+ snmp['listen_address'] = dict_merge(tmp, snmp['listen_address'])
+
+ if '::1' not in snmp['listen_address']:
+ if 'ipv6_disabled' not in snmp:
+ tmp = {'::1': {'port': '161'}}
+ snmp['listen_address'] = dict_merge(tmp, snmp['listen_address'])
+
+ if 'community' in snmp:
+ default_values = defaults(base + ['community'])
+ for community in snmp['community']:
+ snmp['community'][community] = dict_merge(
+ default_values, snmp['community'][community])
+
+ if 'trap_target' in snmp:
+ default_values = defaults(base + ['trap-target'])
+ for trap in snmp['trap_target']:
+ snmp['trap_target'][trap] = dict_merge(
+ default_values, snmp['trap_target'][trap])
+
+ if 'v3' in snmp:
+ default_values = defaults(base + ['v3'])
+ # tagNodes need to be merged in individually later on
+ for tmp in ['user', 'group', 'trap_target']:
+ del default_values[tmp]
+ snmp['v3'] = dict_merge(default_values, snmp['v3'])
+
+ for user_group in ['user', 'group']:
+ if user_group in snmp['v3']:
+ default_values = defaults(base + ['v3', user_group])
+ for tmp in snmp['v3'][user_group]:
+ snmp['v3'][user_group][tmp] = dict_merge(
+ default_values, snmp['v3'][user_group][tmp])
+
+ if 'trap_target' in snmp['v3']:
+ default_values = defaults(base + ['v3', 'trap-target'])
+ for trap in snmp['v3']['trap_target']:
+ snmp['v3']['trap_target'][trap] = dict_merge(
+ default_values, snmp['v3']['trap_target'][trap])
return snmp
def verify(snmp):
- if snmp is None:
- # we can not delete SNMP when LLDP is configured with SNMP
- conf = Config()
- if conf.exists('service lldp snmp enable'):
- raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!')
-
+ if not snmp:
return None
+ if {'deleted', 'lldp_snmp'} <= set(snmp):
+ raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!')
+
### check if the configured script actually exist
- if snmp['script_ext']:
- for ext in snmp['script_ext']:
- if not os.path.isfile(ext['script']):
- print ("WARNING: script: {} doesn't exist".format(ext['script']))
+ if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']:
+ for extension, extension_opt in snmp['script_extensions']['extension_name'].items():
+ if 'script' not in extension_opt:
+ raise ConfigError(f'Script extension "{extension}" requires an actual script to be configured!')
+
+ tmp = extension_opt['script']
+ if not os.path.isfile(tmp):
+ print(f'WARNING: script "{tmp}" does not exist!')
else:
- chmod_755(ext['script'])
-
- for listen in snmp['listen_address']:
- addr = listen[0]
- port = listen[1]
- protocol = snmp['protocol']
-
- tmp = None
- if is_ipv4(addr):
- # example: udp:127.0.0.1:161
- tmp = f'{protocol}:{addr}:{port}'
- elif snmp['ipv6_enabled']:
- # example: udp6:[::1]:161
- tmp = f'{protocol}6:[{addr}]:{port}'
-
- # We only wan't to configure addresses that exist on the system.
- # Hint the user if they don't exist
- if is_addr_assigned(addr):
- if tmp: snmp['listen_on'].append(tmp)
- else:
- print(f'WARNING: SNMP listen address {addr} not configured!')
+ chmod_755(extension_opt['script'])
+
+ if 'listen_address' in snmp:
+ for address in snmp['listen_address']:
+ # We only wan't to configure addresses that exist on the system.
+ # Hint the user if they don't exist
+ if not is_addr_assigned(address):
+ print(f'WARNING: SNMP listen address "{address}" not configured!')
+
+ if 'trap_target' in snmp:
+ for trap, trap_config in snmp['trap_target'].items():
+ if 'community' not in trap_config:
+ raise ConfigError(f'Trap target "{trap}" requires a community to be set!')
verify_vrf(snmp)
# bail out early if SNMP v3 is not configured
- if not snmp['v3_enabled']:
+ if 'v3' not in snmp:
return None
- if 'v3_groups' in snmp.keys():
- for group in snmp['v3_groups']:
- #
- # A view must exist prior to mapping it into a group
- #
- if 'view' in group.keys():
- error = True
- if 'v3_views' in snmp.keys():
- for view in snmp['v3_views']:
- if view['name'] == group['view']:
- error = False
- if error:
- raise ConfigError('You must create view "{0}" first'.format(group['view']))
- else:
- raise ConfigError('"view" must be specified')
-
- if not 'mode' in group.keys():
- raise ConfigError('"mode" must be specified')
-
- if not 'seclevel' in group.keys():
- raise ConfigError('"seclevel" must be specified')
-
- if 'v3_traps' in snmp.keys():
- for trap in snmp['v3_traps']:
- if trap['authPassword'] and trap['authMasterKey']:
- raise ConfigError('Must specify only one of encrypted-password/plaintext-key for trap auth')
-
- if trap['authPassword'] == '' and trap['authMasterKey'] == '':
- raise ConfigError('Must specify encrypted-password or plaintext-key for trap auth')
-
- if trap['privPassword'] and trap['privMasterKey']:
- raise ConfigError('Must specify only one of encrypted-password/plaintext-key for trap privacy')
+ if 'user' in snmp['v3']:
+ for user, user_config in snmp['v3']['user'].items():
+ if 'group' not in user_config:
+ raise ConfigError(f'Group membership required for user "{user}"!')
- if trap['privPassword'] == '' and trap['privMasterKey'] == '':
- raise ConfigError('Must specify encrypted-password or plaintext-key for trap privacy')
+ if 'plaintext_password' not in user_config['auth'] and 'encrypted_password' not in user_config['auth']:
+ raise ConfigError(f'Must specify authentication encrypted-password or plaintext-password for user "{user}"!')
- if not 'type' in trap.keys():
- raise ConfigError('v3 trap: "type" must be specified')
+ if 'plaintext_password' not in user_config['privacy'] and 'encrypted_password' not in user_config['privacy']:
+ raise ConfigError(f'Must specify privacy encrypted-password or plaintext-password for user "{user}"!')
- if not 'authPassword' and 'authMasterKey' in trap.keys():
- raise ConfigError('v3 trap: "auth" must be specified')
+ if 'group' in snmp['v3']:
+ for group, group_config in snmp['v3']['group'].items():
+ if 'seclevel' not in group_config:
+ raise ConfigError(f'Must configure "seclevel" for group "{group}"!')
+ if 'view' not in group_config:
+ raise ConfigError(f'Must configure "view" for group "{group}"!')
- if not 'authProtocol' in trap.keys():
- raise ConfigError('v3 trap: "protocol" must be specified')
+ # Check if 'view' exists
+ view = group_config['view']
+ if 'view' not in snmp['v3'] or view not in snmp['v3']['view']:
+ raise ConfigError(f'You must create view "{view}" first!')
- if not 'privPassword' and 'privMasterKey' in trap.keys():
- raise ConfigError('v3 trap: "user" must be specified')
+ if 'view' in snmp['v3']:
+ for view, view_config in snmp['v3']['view'].items():
+ if 'oid' not in view_config:
+ raise ConfigError(f'Must configure an "oid" for view "{view}"!')
- if 'v3_users' in snmp.keys():
- for user in snmp['v3_users']:
- #
- # Group must exist prior to mapping it into a group
- # seclevel will be extracted from group
- #
- if user['group']:
- error = True
- if 'v3_groups' in snmp.keys():
- for group in snmp['v3_groups']:
- if group['name'] == user['group']:
- seclevel = group['seclevel']
- error = False
+ if 'trap_target' in snmp['v3']:
+ for trap, trap_config in snmp['v3']['trap_target'].items():
+ if 'plaintext_password' not in trap_config['auth'] and 'encrypted_password' not in trap_config['auth']:
+ raise ConfigError(f'Must specify one of authentication encrypted-password or plaintext-password for trap "{trap}"!')
- if error:
- raise ConfigError('You must create group "{0}" first'.format(user['group']))
+ if {'plaintext_password', 'encrypted_password'} <= set(trap_config['auth']):
+ raise ConfigError(f'Can not specify both authentication encrypted-password and plaintext-password for trap "{trap}"!')
- # Depending on the configured security level the user has to provide additional info
- if (not user['authPassword'] and not user['authMasterKey']):
- raise ConfigError('Must specify encrypted-password or plaintext-key for user auth')
+ if 'plaintext_password' not in trap_config['privacy'] and 'encrypted_password' not in trap_config['privacy']:
+ raise ConfigError(f'Must specify one of privacy encrypted-password or plaintext-password for trap "{trap}"!')
- if user['privPassword'] == '' and user['privMasterKey'] == '':
- raise ConfigError('Must specify encrypted-password or plaintext-key for user privacy')
+ if {'plaintext_password', 'encrypted_password'} <= set(trap_config['privacy']):
+ raise ConfigError(f'Can not specify both privacy encrypted-password and plaintext-password for trap "{trap}"!')
- if user['mode'] == '':
- raise ConfigError('Must specify user mode ro/rw')
-
- if 'v3_views' in snmp.keys():
- for view in snmp['v3_views']:
- if not view['oids']:
- raise ConfigError('Must configure an oid')
+ if 'type' not in trap_config:
+ raise ConfigError('SNMP v3 trap "type" must be specified!')
return None
def generate(snmp):
+
#
# As we are manipulating the snmpd user database we have to stop it first!
# This is even save if service is going to be removed
- call('systemctl stop snmpd.service')
- config_files = [config_file_client, config_file_daemon, config_file_access,
- config_file_user, systemd_override]
+ call(f'systemctl stop {systemd_service}')
+ # Clean config files
+ config_files = [config_file_client, config_file_daemon,
+ config_file_access, config_file_user, systemd_override]
for file in config_files:
- rmfile(file)
+ if os.path.isfile(file):
+ os.unlink(file)
if not snmp:
return None
- if 'v3_users' in snmp.keys():
+ if 'v3' in snmp:
# net-snmp is now regenerating the configuration file in the background
# thus we need to re-open and re-read the file as the content changed.
# After that we can no read the encrypted password from the config and
# replace the CLI plaintext password with its encrypted version.
- os.environ["vyos_libexec_dir"] = "/usr/libexec/vyos"
+ os.environ['vyos_libexec_dir'] = '/usr/libexec/vyos'
- for user in snmp['v3_users']:
- if user['authProtocol'] == 'sha':
- hash = plaintext_to_sha1
- else:
- hash = plaintext_to_md5
+ if 'user' in snmp['v3']:
+ for user, user_config in snmp['v3']['user'].items():
+ if dict_search('auth.type', user_config) == 'sha':
+ hash = plaintext_to_sha1
+ else:
+ hash = plaintext_to_md5
+
+ if dict_search('auth.plaintext_password', user_config) is not None:
+ tmp = hash(dict_search('auth.plaintext_password', user_config),
+ dict_search('v3.engineid', snmp))
+
+ snmp['v3']['user'][user]['auth']['encrypted_password'] = tmp
+ del snmp['v3']['user'][user]['auth']['plaintext_password']
- if user['authPassword']:
- user['authMasterKey'] = hash(user['authPassword'], snmp['v3_engineid'])
- user['authPassword'] = ''
+ call(f'/opt/vyatta/sbin/my_set service snmp v3 user "{user}" auth encrypted-password "{tmp}" > /dev/null')
+ call(f'/opt/vyatta/sbin/my_delete service snmp v3 user "{user}" auth plaintext-password > /dev/null')
- call('/opt/vyatta/sbin/my_set service snmp v3 user "{name}" auth encrypted-password "{authMasterKey}" > /dev/null'.format(**user))
- call('/opt/vyatta/sbin/my_delete service snmp v3 user "{name}" auth plaintext-password > /dev/null'.format(**user))
+ if dict_search('privacy.plaintext_password', user_config) is not None:
+ tmp = hash(dict_search('privacy.plaintext_password', user_config),
+ dict_search('v3.engineid', snmp))
- if user['privPassword']:
- user['privMasterKey'] = hash(user['privPassword'], snmp['v3_engineid'])
- user['privPassword'] = ''
+ snmp['v3']['user'][user]['privacy']['encrypted_password'] = tmp
+ del snmp['v3']['user'][user]['privacy']['plaintext_password']
- call('/opt/vyatta/sbin/my_set service snmp v3 user "{name}" privacy encrypted-password "{privMasterKey}" > /dev/null'.format(**user))
- call('/opt/vyatta/sbin/my_delete service snmp v3 user "{name}" privacy plaintext-password > /dev/null'.format(**user))
+ call(f'/opt/vyatta/sbin/my_set service snmp v3 user "{user}" privacy encrypted-password "{tmp}" > /dev/null')
+ call(f'/opt/vyatta/sbin/my_delete service snmp v3 user "{user}" privacy plaintext-password > /dev/null')
# Write client config file
render(config_file_client, 'snmp/etc.snmp.conf.tmpl', snmp)
@@ -578,7 +293,7 @@ def apply(snmp):
return None
# start SNMP daemon
- call('systemctl restart snmpd.service')
+ call(f'systemctl restart {systemd_service}')
# Enable AgentX in FRR
call('vtysh -c "configure terminal" -c "agentx" >/dev/null')
diff --git a/src/conf_mode/system-login-banner.py b/src/conf_mode/system-login-banner.py
index 2220d7b66..a521c9834 100755
--- a/src/conf_mode/system-login-banner.py
+++ b/src/conf_mode/system-login-banner.py
@@ -15,22 +15,20 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sys import exit
+from copy import deepcopy
+
from vyos.config import Config
+from vyos.util import write_file
from vyos import ConfigError
-
from vyos import airbag
airbag.enable()
-motd="""
-Check out project news at https://blog.vyos.io
-and feel free to report bugs at https://phabricator.vyos.net
-
-You can change this banner using "set system login banner post-login" command.
-
-VyOS is a free software distribution that includes multiple components,
-you can check individual component licenses under /usr/share/doc/*/copyright
-
-"""
+try:
+ with open('/usr/share/vyos/default_motd') as f:
+ motd = f.read()
+except:
+ # Use an empty banner if the default banner file cannot be read
+ motd = "\n"
PRELOGIN_FILE = r'/etc/issue'
PRELOGIN_NET_FILE = r'/etc/issue.net'
@@ -38,12 +36,12 @@ POSTLOGIN_FILE = r'/etc/motd'
default_config_data = {
'issue': 'Welcome to VyOS - \\n \\l\n\n',
- 'issue_net': 'Welcome to VyOS\n',
+ 'issue_net': '',
'motd': motd
}
def get_config(config=None):
- banner = default_config_data
+ banner = deepcopy(default_config_data)
if config:
conf = config
else:
@@ -92,14 +90,9 @@ def generate(banner):
pass
def apply(banner):
- with open(PRELOGIN_FILE, 'w') as f:
- f.write(banner['issue'])
-
- with open(PRELOGIN_NET_FILE, 'w') as f:
- f.write(banner['issue_net'])
-
- with open(POSTLOGIN_FILE, 'w') as f:
- f.write(banner['motd'])
+ write_file(PRELOGIN_FILE, banner['issue'])
+ write_file(PRELOGIN_NET_FILE, banner['issue_net'])
+ write_file(POSTLOGIN_FILE, banner['motd'])
return None
diff --git a/src/conf_mode/system-logs.py b/src/conf_mode/system-logs.py
new file mode 100755
index 000000000..e6296656d
--- /dev/null
+++ b/src/conf_mode/system-logs.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from sys import exit
+
+from vyos import ConfigError
+from vyos import airbag
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.logger import syslog
+from vyos.template import render
+from vyos.util import dict_search
+from vyos.xml import defaults
+airbag.enable()
+
+# path to logrotate configs
+logrotate_atop_file = '/etc/logrotate.d/vyos-atop'
+logrotate_rsyslog_file = '/etc/logrotate.d/vyos-rsyslog'
+
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['system', 'logs']
+ default_values = defaults(base)
+ logs_config = conf.get_config_dict(base,
+ key_mangling=('-', '_'),
+ get_first_key=True)
+ logs_config = dict_merge(default_values, logs_config)
+
+ return logs_config
+
+
+def verify(logs_config):
+ # Nothing to verify here
+ pass
+
+
+def generate(logs_config):
+ # get configuration for logrotate atop
+ logrotate_atop = dict_search('logrotate.atop', logs_config)
+ # generate new config file for atop
+ syslog.debug('Adding logrotate config for atop')
+ render(logrotate_atop_file, 'logs/logrotate/vyos-atop.tmpl', logrotate_atop)
+
+ # get configuration for logrotate rsyslog
+ logrotate_rsyslog = dict_search('logrotate.messages', logs_config)
+ # generate new config file for rsyslog
+ syslog.debug('Adding logrotate config for rsyslog')
+ render(logrotate_rsyslog_file, 'logs/logrotate/vyos-rsyslog.tmpl',
+ logrotate_rsyslog)
+
+
+def apply(logs_config):
+ # No further actions needed
+ pass
+
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf b/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf
index 24090e2a8..b1902b585 100644
--- a/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf
+++ b/src/etc/dhcp/dhclient-enter-hooks.d/04-vyos-resolvconf
@@ -1,44 +1,48 @@
-# modified make_resolv_conf () for VyOS
-make_resolv_conf() {
- hostsd_client="/usr/bin/vyos-hostsd-client"
- hostsd_changes=
+# modified make_resolv_conf() for VyOS
+# should be used only if vyos-hostsd is running
- if [ -n "$new_domain_name" ]; then
- logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client"
- $hostsd_client --delete-search-domains --tag "dhcp-$interface"
- logmsg info "Adding domain name \"$new_domain_name\" as search domain with tag \"dhcp-$interface\" via vyos-hostsd-client"
- $hostsd_client --add-search-domains "$new_domain_name" --tag "dhcp-$interface"
- hostsd_changes=y
- fi
+if /usr/bin/systemctl -q is-active vyos-hostsd; then
+ make_resolv_conf() {
+ hostsd_client="/usr/bin/vyos-hostsd-client"
+ hostsd_changes=
- if [ -n "$new_dhcp6_domain_search" ]; then
- logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
- $hostsd_client --delete-search-domains --tag "dhcpv6-$interface"
- logmsg info "Adding search domain \"$new_dhcp6_domain_search\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
- $hostsd_client --add-search-domains "$new_dhcp6_domain_search" --tag "dhcpv6-$interface"
- hostsd_changes=y
- fi
+ if [ -n "$new_domain_name" ]; then
+ logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client"
+ $hostsd_client --delete-search-domains --tag "dhcp-$interface"
+ logmsg info "Adding domain name \"$new_domain_name\" as search domain with tag \"dhcp-$interface\" via vyos-hostsd-client"
+ $hostsd_client --add-search-domains "$new_domain_name" --tag "dhcp-$interface"
+ hostsd_changes=y
+ fi
- if [ -n "$new_domain_name_servers" ]; then
- logmsg info "Deleting nameservers with tag \"dhcp-$interface\" via vyos-hostsd-client"
- $hostsd_client --delete-name-servers --tag "dhcp-$interface"
- logmsg info "Adding nameservers \"$new_domain_name_servers\" with tag \"dhcp-$interface\" via vyos-hostsd-client"
- $hostsd_client --add-name-servers $new_domain_name_servers --tag "dhcp-$interface"
- hostsd_changes=y
- fi
+ if [ -n "$new_dhcp6_domain_search" ]; then
+ logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
+ $hostsd_client --delete-search-domains --tag "dhcpv6-$interface"
+ logmsg info "Adding search domain \"$new_dhcp6_domain_search\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
+ $hostsd_client --add-search-domains "$new_dhcp6_domain_search" --tag "dhcpv6-$interface"
+ hostsd_changes=y
+ fi
- if [ -n "$new_dhcp6_name_servers" ]; then
- logmsg info "Deleting nameservers with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
- $hostsd_client --delete-name-servers --tag "dhcpv6-$interface"
- logmsg info "Adding nameservers \"$new_dhcpv6_name_servers\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
- $hostsd_client --add-name-servers $new_dhcpv6_name_servers --tag "dhcpv6-$interface"
- hostsd_changes=y
- fi
+ if [ -n "$new_domain_name_servers" ]; then
+ logmsg info "Deleting nameservers with tag \"dhcp-$interface\" via vyos-hostsd-client"
+ $hostsd_client --delete-name-servers --tag "dhcp-$interface"
+ logmsg info "Adding nameservers \"$new_domain_name_servers\" with tag \"dhcp-$interface\" via vyos-hostsd-client"
+ $hostsd_client --add-name-servers $new_domain_name_servers --tag "dhcp-$interface"
+ hostsd_changes=y
+ fi
- if [ $hostsd_changes ]; then
- logmsg info "Applying changes via vyos-hostsd-client"
- $hostsd_client --apply
- else
- logmsg info "No changes to apply via vyos-hostsd-client"
- fi
-}
+ if [ -n "$new_dhcp6_name_servers" ]; then
+ logmsg info "Deleting nameservers with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
+ $hostsd_client --delete-name-servers --tag "dhcpv6-$interface"
+ logmsg info "Adding nameservers \"$new_dhcpv6_name_servers\" with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
+ $hostsd_client --add-name-servers $new_dhcpv6_name_servers --tag "dhcpv6-$interface"
+ hostsd_changes=y
+ fi
+
+ if [ $hostsd_changes ]; then
+ logmsg info "Applying changes via vyos-hostsd-client"
+ $hostsd_client --apply
+ else
+ logmsg info "No changes to apply via vyos-hostsd-client"
+ fi
+ }
+fi
diff --git a/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup b/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup
index fec792b64..ad6a1d5eb 100644
--- a/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup
+++ b/src/etc/dhcp/dhclient-exit-hooks.d/01-vyos-cleanup
@@ -4,14 +4,19 @@
# NOTE: here we use 'ip' wrapper, therefore a route will be actually deleted via /usr/sbin/ip or vtysh, according to the system state
hostsd_client="/usr/bin/vyos-hostsd-client"
hostsd_changes=
+# check vyos-hostsd status
+/usr/bin/systemctl -q is-active vyos-hostsd
+hostsd_status=$?
if [[ $reason =~ (EXPIRE|FAIL|RELEASE|STOP) ]]; then
- # delete search domains and nameservers via vyos-hostsd
- logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client"
- $hostsd_client --delete-search-domains --tag "dhcp-$interface"
- logmsg info "Deleting nameservers with tag \"dhcp-${interface}\" via vyos-hostsd-client"
- $hostsd_client --delete-name-servers --tag "dhcp-${interface}"
- hostsd_changes=y
+ if [[ $hostsd_status -eq 0 ]]; then
+ # delete search domains and nameservers via vyos-hostsd
+ logmsg info "Deleting search domains with tag \"dhcp-$interface\" via vyos-hostsd-client"
+ $hostsd_client --delete-search-domains --tag "dhcp-$interface"
+ logmsg info "Deleting nameservers with tag \"dhcp-${interface}\" via vyos-hostsd-client"
+ $hostsd_client --delete-name-servers --tag "dhcp-${interface}"
+ hostsd_changes=y
+ fi
if_metric="$IF_METRIC"
@@ -92,12 +97,14 @@ if [[ $reason =~ (EXPIRE|FAIL|RELEASE|STOP) ]]; then
fi
if [[ $reason =~ (EXPIRE6|RELEASE6|STOP6) ]]; then
- # delete search domains and nameservers via vyos-hostsd
- logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
- $hostsd_client --delete-search-domains --tag "dhcpv6-$interface"
- logmsg info "Deleting nameservers with tag \"dhcpv6-${interface}\" via vyos-hostsd-client"
- $hostsd_client --delete-name-servers --tag "dhcpv6-${interface}"
- hostsd_changes=y
+ if [[ $hostsd_status -eq 0 ]]; then
+ # delete search domains and nameservers via vyos-hostsd
+ logmsg info "Deleting search domains with tag \"dhcpv6-$interface\" via vyos-hostsd-client"
+ $hostsd_client --delete-search-domains --tag "dhcpv6-$interface"
+ logmsg info "Deleting nameservers with tag \"dhcpv6-${interface}\" via vyos-hostsd-client"
+ $hostsd_client --delete-name-servers --tag "dhcpv6-${interface}"
+ hostsd_changes=y
+ fi
fi
if [ $hostsd_changes ]; then
diff --git a/src/etc/systemd/system/keepalived.service.d/override.conf b/src/etc/systemd/system/keepalived.service.d/override.conf
deleted file mode 100644
index 1c68913f2..000000000
--- a/src/etc/systemd/system/keepalived.service.d/override.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-ConditionPathExists=
-ConditionPathExists=/run/keepalived/keepalived.conf
-After=
-After=vyos-router.service
-
-[Service]
-KillMode=process
-EnvironmentFile=
-ExecStart=
-ExecStart=/usr/sbin/keepalived --use-file /run/keepalived/keepalived.conf --pid /run/keepalived/keepalived.pid --dont-fork --snmp
-PIDFile=
-PIDFile=/run/keepalived/keepalived.pid
diff --git a/src/etc/systemd/system/uacctd.service.d/override.conf b/src/etc/systemd/system/uacctd.service.d/override.conf
new file mode 100644
index 000000000..38bcce515
--- /dev/null
+++ b/src/etc/systemd/system/uacctd.service.d/override.conf
@@ -0,0 +1,14 @@
+[Unit]
+After=
+After=vyos-router.service
+ConditionPathExists=
+ConditionPathExists=/run/pmacct/uacctd.conf
+
+[Service]
+EnvironmentFile=
+ExecStart=
+ExecStart=/usr/sbin/uacctd -f /run/pmacct/uacctd.conf
+WorkingDirectory=
+WorkingDirectory=/run/pmacct
+PIDFile=
+PIDFile=/run/pmacct/uacctd.pid
diff --git a/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py b/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py
new file mode 100755
index 000000000..0f5e366cd
--- /dev/null
+++ b/src/etc/telegraf/custom_scripts/show_interfaces_input_filter.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+import subprocess
+import time
+
+def status_to_int(status):
+ switcher={
+ 'u':'0',
+ 'D':'1',
+ 'A':'2'
+ }
+ return switcher.get(status,"")
+
+def description_check(line):
+ desc=" ".join(line[3:])
+ if desc == "":
+ return "empty"
+ else:
+ return desc
+
+def gen_ip_list(index,interfaces):
+ line=interfaces[index].split()
+ ip_list=line[1]
+ if index < len(interfaces):
+ index += 1
+ while len(interfaces[index].split())==1:
+ ip = interfaces[index].split()
+ ip_list = ip_list + " " + ip[0]
+ index += 1
+ if index == len(interfaces):
+ break
+ return ip_list
+
+interfaces = subprocess.check_output("/usr/libexec/vyos/op_mode/show_interfaces.py --action=show-brief", shell=True).decode('utf-8').splitlines()
+del interfaces[:3]
+lines_count=len(interfaces)
+index=0
+while index<lines_count:
+ line=interfaces[index].split()
+ if len(line)>1:
+ print(f'show_interfaces,interface={line[0]} '
+ f'ip_addresses="{gen_ip_list(index,interfaces)}",'
+ f'state={status_to_int(line[2][0])}i,'
+ f'link={status_to_int(line[2][2])}i,'
+ f'description="{description_check(line)}" '
+ f'{str(int(time.time()))}000000000')
+ index += 1
diff --git a/src/etc/telegraf/custom_scripts/vyos_services_input_filter.py b/src/etc/telegraf/custom_scripts/vyos_services_input_filter.py
new file mode 100755
index 000000000..df4eed131
--- /dev/null
+++ b/src/etc/telegraf/custom_scripts/vyos_services_input_filter.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import time
+from vyos.configquery import ConfigTreeQuery
+from vyos.util import is_systemd_service_running, process_named_running
+
+# Availible services and prouceses
+# 1 - service
+# 2 - process
+services = {
+ "protocols bgp" : "bgpd",
+ "protocols ospf" : "ospfd",
+ "protocols ospfv3" : "ospf6d",
+ "protocols rip" : "ripd",
+ "protocols ripng" : "ripngd",
+ "protocols isis" : "isisd",
+ "service pppoe" : "accel-ppp@pppoe.service",
+ "vpn l2tp remote-access" : "accel-ppp@l2tp.service",
+ "vpn pptp remote-access" : "accel-ppp@pptp.service",
+ "vpn sstp" : "accel-ppp@sstp.service",
+ "vpn ipsec" : "charon"
+}
+
+# Configured services
+conf_services = {
+ 'zebra' : 0,
+ 'staticd' : 0,
+}
+# Get configured service and create list to check if process running
+config = ConfigTreeQuery()
+for service in services:
+ if config.exists(service):
+ conf_services[services[service]] = 0
+
+for conf_service in conf_services:
+ status = 0
+ if ".service" in conf_service:
+ # Check systemd service
+ if is_systemd_service_running(conf_service):
+ status = 1
+ else:
+ # Check process
+ if process_named_running(conf_service):
+ status = 1
+ print(f'vyos_services,service="{conf_service}" '
+ f'status={str(status)}i {str(int(time.time()))}000000000')
diff --git a/src/helpers/vyos-boot-config-loader.py b/src/helpers/vyos-boot-config-loader.py
index c5bf22f10..b9cc87bfa 100755
--- a/src/helpers/vyos-boot-config-loader.py
+++ b/src/helpers/vyos-boot-config-loader.py
@@ -23,12 +23,12 @@ import grp
import traceback
from datetime import datetime
-from vyos.defaults import directories
+from vyos.defaults import directories, config_status
from vyos.configsession import ConfigSession, ConfigSessionError
from vyos.configtree import ConfigTree
from vyos.util import cmd
-STATUS_FILE = '/tmp/vyos-config-status'
+STATUS_FILE = config_status
TRACE_FILE = '/tmp/boot-config-trace'
CFG_GROUP = 'vyattacfg'
diff --git a/src/helpers/vyos_net_name b/src/helpers/vyos_net_name
index e21d8c9ff..afeef8f2d 100755
--- a/src/helpers/vyos_net_name
+++ b/src/helpers/vyos_net_name
@@ -25,14 +25,13 @@ from sys import argv
from vyos.configtree import ConfigTree
from vyos.defaults import directories
-from vyos.util import cmd
+from vyos.util import cmd, boot_configuration_complete
vyos_udev_dir = directories['vyos_udev_dir']
vyos_log_dir = '/run/udev/log'
vyos_log_file = os.path.join(vyos_log_dir, 'vyos-net-name')
config_path = '/opt/vyatta/etc/config/config.boot'
-config_status = '/tmp/vyos-config-status'
lock = threading.Lock()
@@ -43,13 +42,6 @@ except FileExistsError:
logging.basicConfig(filename=vyos_log_file, level=logging.DEBUG)
-def boot_configuration_complete() -> bool:
- """ Check if vyos-router has completed, hence hotplug event
- """
- if os.path.isfile(config_status):
- return True
- return False
-
def is_available(intfs: dict, intf_name: str) -> bool:
""" Check if interface name is already assigned
"""
diff --git a/src/migration-scripts/flow-accounting/0-to-1 b/src/migration-scripts/flow-accounting/0-to-1
new file mode 100755
index 000000000..72cce77b0
--- /dev/null
+++ b/src/migration-scripts/flow-accounting/0-to-1
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# T4099: flow-accounting: sync "source-ip" and "source-address" between netflow
+# and sflow ion CLI
+# T4105: flow-accounting: drop "sflow agent-address auto"
+
+from sys import argv
+from vyos.configtree import ConfigTree
+
+if (len(argv) < 1):
+ print("Must specify file name!")
+ exit(1)
+
+file_name = argv[1]
+
+with open(file_name, 'r') as f:
+ config_file = f.read()
+
+base = ['system', 'flow-accounting']
+config = ConfigTree(config_file)
+
+if not config.exists(base):
+ # Nothing to do
+ exit(0)
+
+# T4099
+tmp = base + ['netflow', 'source-ip']
+if config.exists(tmp):
+ config.rename(tmp, 'source-address')
+
+# T4105
+tmp = base + ['sflow', 'agent-address']
+if config.exists(tmp):
+ value = config.return_value(tmp)
+ if value == 'auto':
+ # delete the "auto"
+ config.delete(tmp)
+
+ # 1) check if BGP router-id is set
+ # 2) check if OSPF router-id is set
+ # 3) check if OSPFv3 router-id is set
+ router_id = None
+ for protocol in ['bgp', 'ospf', 'ospfv3']:
+ if config.exists(['protocols', protocol, 'parameters', 'router-id']):
+ router_id = config.return_value(['protocols', protocol, 'parameters', 'router-id'])
+ break
+ if router_id:
+ config.set(tmp, value=router_id)
+
+try:
+ with open(file_name, 'w') as f:
+ f.write(config.to_string())
+except OSError as e:
+ print("Failed to save the modified config: {}".format(e))
+ exit(1)
diff --git a/src/op_mode/conntrack_sync.py b/src/op_mode/conntrack_sync.py
index 66ecf8439..89f6df4b9 100755
--- a/src/op_mode/conntrack_sync.py
+++ b/src/op_mode/conntrack_sync.py
@@ -20,12 +20,15 @@ import xmltodict
from argparse import ArgumentParser
from vyos.configquery import CliShellApiConfigQuery
+from vyos.configquery import ConfigTreeQuery
+from vyos.util import call
from vyos.util import cmd
from vyos.util import run
from vyos.template import render_to_string
conntrackd_bin = '/usr/sbin/conntrackd'
conntrackd_config = '/run/conntrackd/conntrackd.conf'
+failover_state_file = '/var/run/vyatta-conntrackd-failover-state'
parser = ArgumentParser(description='Conntrack Sync')
group = parser.add_mutually_exclusive_group()
@@ -36,6 +39,8 @@ group.add_argument('--show-internal', help='Show internal (main) tracking cache'
group.add_argument('--show-external', help='Show external (main) tracking cache', action='store_true')
group.add_argument('--show-internal-expect', help='Show internal (expect) tracking cache', action='store_true')
group.add_argument('--show-external-expect', help='Show external (expect) tracking cache', action='store_true')
+group.add_argument('--show-statistics', help='Show connection syncing statistics', action='store_true')
+group.add_argument('--show-status', help='Show conntrack-sync status', action='store_true')
def is_configured():
""" Check if conntrack-sync service is configured """
@@ -131,6 +136,46 @@ if __name__ == '__main__':
out = cmd(f'sudo {conntrackd_bin} -C {conntrackd_config} {opt} -x')
xml_to_stdout(out)
+ elif args.show_statistics:
+ is_configured()
+ config = ConfigTreeQuery()
+ print('\nMain Table Statistics:\n')
+ call(f'sudo {conntrackd_bin} -C {conntrackd_config} -s')
+ print()
+ if config.exists(['service', 'conntrack-sync', 'expect-sync']):
+ print('\nExpect Table Statistics:\n')
+ call(f'sudo {conntrackd_bin} -C {conntrackd_config} -s exp')
+ print()
+
+ elif args.show_status:
+ is_configured()
+ config = ConfigTreeQuery()
+ ct_sync_intf = config.list_nodes(['service', 'conntrack-sync', 'interface'])
+ ct_sync_intf = ', '.join(ct_sync_intf)
+ failover_state = "no transition yet!"
+ expect_sync_protocols = "disabled"
+
+ if config.exists(['service', 'conntrack-sync', 'failover-mechanism', 'vrrp']):
+ failover_mechanism = "vrrp"
+ vrrp_sync_grp = config.value(['service', 'conntrack-sync', 'failover-mechanism', 'vrrp', 'sync-group'])
+
+ if os.path.isfile(failover_state_file):
+ with open(failover_state_file, "r") as f:
+ failover_state = f.readline()
+
+ if config.exists(['service', 'conntrack-sync', 'expect-sync']):
+ expect_sync_protocols = config.values(['service', 'conntrack-sync', 'expect-sync'])
+ if 'all' in expect_sync_protocols:
+ expect_sync_protocols = ["ftp", "sip", "h323", "nfs", "sqlnet"]
+ expect_sync_protocols = ', '.join(expect_sync_protocols)
+
+ show_status = (f'\nsync-interface : {ct_sync_intf}\n'
+ f'failover-mechanism : {failover_mechanism} [sync-group {vrrp_sync_grp}]\n'
+ f'last state transition : {failover_state}'
+ f'ExpectationSync : {expect_sync_protocols}')
+
+ print(show_status)
+
else:
parser.print_help()
exit(1)
diff --git a/src/op_mode/restart_frr.py b/src/op_mode/restart_frr.py
index 109c8dd7b..e5014452f 100755
--- a/src/op_mode/restart_frr.py
+++ b/src/op_mode/restart_frr.py
@@ -138,7 +138,7 @@ def _reload_config(daemon):
# define program arguments
cmd_args_parser = argparse.ArgumentParser(description='restart frr daemons')
cmd_args_parser.add_argument('--action', choices=['restart'], required=True, help='action to frr daemons')
-cmd_args_parser.add_argument('--daemon', choices=['bfdd', 'bgpd', 'ospfd', 'ospf6d', 'isisd', 'ripd', 'ripngd', 'staticd', 'zebra'], required=False, nargs='*', help='select single or multiple daemons')
+cmd_args_parser.add_argument('--daemon', choices=['bfdd', 'bgpd', 'ldpd', 'ospfd', 'ospf6d', 'isisd', 'ripd', 'ripngd', 'staticd', 'zebra'], required=False, nargs='*', help='select single or multiple daemons')
# parse arguments
cmd_args = cmd_args_parser.parse_args()
diff --git a/src/op_mode/show_nat_rules.py b/src/op_mode/show_nat_rules.py
index d68def26a..98adb31dd 100755
--- a/src/op_mode/show_nat_rules.py
+++ b/src/op_mode/show_nat_rules.py
@@ -32,7 +32,7 @@ args = parser.parse_args()
if args.source or args.destination:
tmp = cmd('sudo nft -j list table ip nat')
tmp = json.loads(tmp)
-
+
format_nat_rule = '{0: <10} {1: <50} {2: <50} {3: <10}'
print(format_nat_rule.format("Rule", "Source" if args.source else "Destination", "Translation", "Outbound Interface" if args.source else "Inbound Interface"))
print(format_nat_rule.format("----", "------" if args.source else "-----------", "-----------", "------------------" if args.source else "-----------------"))
@@ -40,7 +40,7 @@ if args.source or args.destination:
data_json = jmespath.search('nftables[?rule].rule[?chain]', tmp)
for idx in range(0, len(data_json)):
data = data_json[idx]
-
+
# The following key values must exist
# When the rule JSON does not have some keys, this is not a rule we can work with
continue_rule = False
@@ -50,9 +50,9 @@ if args.source or args.destination:
continue
if continue_rule:
continue
-
+
comment = data['comment']
-
+
# Check the annotation to see if the annotation format is created by VYOS
continue_rule = True
for comment_prefix in ['SRC-NAT-', 'DST-NAT-']:
@@ -60,7 +60,7 @@ if args.source or args.destination:
continue_rule = False
if continue_rule:
continue
-
+
rule = int(''.join(list(filter(str.isdigit, comment))))
chain = data['chain']
if not ((args.source and chain == 'POSTROUTING') or (not args.source and chain == 'PREROUTING')):
@@ -88,7 +88,7 @@ if args.source or args.destination:
else:
port_range = srcdest_json['set'][0]['range']
srcdest += 'port ' + str(port_range[0]) + '-' + str(port_range[1]) + ' '
-
+
tran_addr_json = dict_search('snat' if args.source else 'dnat', data['expr'][i])
if tran_addr_json:
if isinstance(tran_addr_json['addr'],str):
@@ -98,10 +98,10 @@ if args.source or args.destination:
len_tmp = dict_search('snat.addr.prefix.len' if args.source else 'dnat.addr.prefix.len', data['expr'][3])
if addr_tmp and len_tmp:
tran_addr += addr_tmp + '/' + str(len_tmp) + ' '
-
+
if isinstance(tran_addr_json['port'],int):
- tran_addr += 'port ' + tran_addr_json['port']
-
+ tran_addr += 'port ' + str(tran_addr_json['port'])
+
else:
if 'masquerade' in data['expr'][i]:
tran_addr = 'masquerade'
@@ -112,10 +112,10 @@ if args.source or args.destination:
srcdests.append(srcdest)
srcdest = ''
print(format_nat_rule.format(rule, srcdests[0], tran_addr, interface))
-
+
for i in range(1, len(srcdests)):
print(format_nat_rule.format(' ', srcdests[i], ' ', ' '))
-
+
exit(0)
else:
parser.print_help()
diff --git a/src/services/api/graphql/README.graphql b/src/services/api/graphql/README.graphql
index a3c30b005..1133d79ed 100644
--- a/src/services/api/graphql/README.graphql
+++ b/src/services/api/graphql/README.graphql
@@ -1,7 +1,12 @@
+The following examples are in the form as entered in the GraphQL
+'playground', which is found at:
+
+https://{{ host_address }}/graphql
+
Example using GraphQL mutations to configure a DHCP server:
-This assumes that the http-api is running:
+All examples assume that the http-api is running:
'set service https api'
@@ -58,8 +63,8 @@ N.B. fileName can be empty (fileName: "") or data can be empty (data: {}) to
save to /config/config.boot; to save to an alternative path, specify
fileName.
-Similarly, using the same 'endpoint' (meaning the form of the request and
-resolver; the actual enpoint for all GraphQL requests is
+Similarly, using an analogous 'endpoint' (meaning the form of the request
+and resolver; the actual enpoint for all GraphQL requests is
https://hostname/graphql), one can load an arbitrary config file from a
path.
@@ -75,7 +80,7 @@ mutation {
Op-mode 'show' commands may be requested by path, e.g.:
-mutation {
+query {
Show (data: {path: ["interfaces", "ethernet", "detail"]}) {
success
errors
@@ -88,16 +93,58 @@ mutation {
N.B. to see the output the 'data' field 'result' must be present in the
request.
-The GraphQL playground will be found at:
+Mutations to manipulate firewall address groups:
-https://{{ host_address }}/graphql
+mutation {
+ CreateFirewallAddressGroup (data: {name: "ADDR-GRP", address: "10.0.0.1"}) {
+ success
+ errors
+ }
+}
+
+mutation {
+ UpdateFirewallAddressGroupMembers (data: {name: "ADDR-GRP",
+ address: ["10.0.0.1-10.0.0.8", "192.168.0.1"]}) {
+ success
+ errors
+ }
+}
+
+mutation {
+ RemoveFirewallAddressGroupMembers (data: {name: "ADDR-GRP",
+ address: "192.168.0.1"}) {
+ success
+ errors
+ }
+}
+
+N.B. The schema for the above specify that 'address' be of the form 'list of
+strings' (SDL type [String!]! for UpdateFirewallAddressGroupMembers, where
+the ! indicates that the input is required; SDL type [String] in
+CreateFirewallAddressGroup, since a group may be created without any
+addresses). However, notice that a single string may be passed without being
+a member of a list, in which case the specification allows for 'input
+coercion':
+
+http://spec.graphql.org/October2021/#sec-Scalars.Input-Coercion
+
+Similarly, IPv6 versions of the above:
-An equivalent curl command to the first example above would be:
+CreateFirewallAddressIpv6Group
+UpdateFirewallAddressIpv6GroupMembers
+RemoveFirewallAddressIpv6GroupMembers
+
+
+Instead of using the GraphQL playground, an equivalent curl command to the
+first example above would be:
curl -k 'https://192.168.100.168/graphql' -H 'Content-Type: application/json' --data-binary '{"query": "mutation {createInterfaceEthernet (data: {interface: \"eth1\", address: \"192.168.0.1/24\", description: \"BOB\"}) {success errors data {address}}}"}'
Note that the 'mutation' term is prefaced by 'query' in the curl command.
+Curl equivalents may be read from within the GraphQL playground at the 'copy
+curl' button.
+
What's here:
services
diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py
index 1fbe13d0c..84d719fda 100644
--- a/src/services/api/graphql/bindings.py
+++ b/src/services/api/graphql/bindings.py
@@ -1,4 +1,20 @@
+# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
import vyos.defaults
+from . graphql.queries import query
from . graphql.mutations import mutation
from . graphql.directives import directives_dict
from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers
@@ -8,6 +24,6 @@ def generate_schema():
type_defs = load_schema_from_path(api_schema_dir)
- schema = make_executable_schema(type_defs, mutation, snake_case_fallback_resolvers, directives=directives_dict)
+ schema = make_executable_schema(type_defs, query, mutation, snake_case_fallback_resolvers, directives=directives_dict)
return schema
diff --git a/src/services/api/graphql/graphql/directives.py b/src/services/api/graphql/graphql/directives.py
index 10bc522db..0a9298f55 100644
--- a/src/services/api/graphql/graphql/directives.py
+++ b/src/services/api/graphql/graphql/directives.py
@@ -1,4 +1,20 @@
+# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
from ariadne import SchemaDirectiveVisitor, ObjectType
+from . queries import *
from . mutations import *
def non(arg):
diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py
index 8e5aab56d..0c3eb702a 100644
--- a/src/services/api/graphql/graphql/mutations.py
+++ b/src/services/api/graphql/graphql/mutations.py
@@ -1,3 +1,17 @@
+# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
from importlib import import_module
from typing import Any, Dict
@@ -10,7 +24,7 @@ from api.graphql.recipes.session import Session
mutation = ObjectType("Mutation")
-def make_resolver(mutation_name, class_name, session_func):
+def make_mutation_resolver(mutation_name, class_name, session_func):
"""Dynamically generate a resolver for the mutation named in the
schema by 'mutation_name'.
@@ -66,34 +80,20 @@ def make_resolver(mutation_name, class_name, session_func):
return func_impl
-def make_configure_resolver(mutation_name):
- class_name = mutation_name
- return make_resolver(mutation_name, class_name, 'configure')
+def make_prefix_resolver(mutation_name, prefix=[]):
+ for pre in prefix:
+ Pre = pre.capitalize()
+ if Pre in mutation_name:
+ class_name = mutation_name.replace(Pre, '', 1)
+ return make_mutation_resolver(mutation_name, class_name, pre)
+ raise Exception
-def make_show_config_resolver(mutation_name):
+def make_configure_resolver(mutation_name):
class_name = mutation_name
- return make_resolver(mutation_name, class_name, 'show_config')
+ return make_mutation_resolver(mutation_name, class_name, 'configure')
def make_config_file_resolver(mutation_name):
- if 'Save' in mutation_name:
- class_name = mutation_name.replace('Save', '', 1)
- return make_resolver(mutation_name, class_name, 'save')
- elif 'Load' in mutation_name:
- class_name = mutation_name.replace('Load', '', 1)
- return make_resolver(mutation_name, class_name, 'load')
- else:
- raise Exception
-
-def make_show_resolver(mutation_name):
- class_name = mutation_name
- return make_resolver(mutation_name, class_name, 'show')
+ return make_prefix_resolver(mutation_name, prefix=['save', 'load'])
def make_image_resolver(mutation_name):
- if 'Add' in mutation_name:
- class_name = mutation_name.replace('Add', '', 1)
- return make_resolver(mutation_name, class_name, 'add')
- elif 'Delete' in mutation_name:
- class_name = mutation_name.replace('Delete', '', 1)
- return make_resolver(mutation_name, class_name, 'delete')
- else:
- raise Exception
+ return make_prefix_resolver(mutation_name, prefix=['add', 'delete'])
diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py
new file mode 100644
index 000000000..e1868091e
--- /dev/null
+++ b/src/services/api/graphql/graphql/queries.py
@@ -0,0 +1,89 @@
+# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from importlib import import_module
+from typing import Any, Dict
+from ariadne import ObjectType, convert_kwargs_to_snake_case, convert_camel_case_to_snake
+from graphql import GraphQLResolveInfo
+from makefun import with_signature
+
+from .. import state
+from api.graphql.recipes.session import Session
+
+query = ObjectType("Query")
+
+def make_query_resolver(query_name, class_name, session_func):
+ """Dynamically generate a resolver for the query named in the
+ schema by 'query_name'.
+
+ Dynamic generation is provided using the package 'makefun' (via the
+ decorator 'with_signature'), which provides signature-preserving
+ function wrappers; it provides several improvements over, say,
+ functools.wraps.
+
+ :raise Exception:
+ raising ConfigErrors, or internal errors
+ """
+
+ func_base_name = convert_camel_case_to_snake(class_name)
+ resolver_name = f'resolve_{func_base_name}'
+ func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Dict)'
+
+ @query.field(query_name)
+ @convert_kwargs_to_snake_case
+ @with_signature(func_sig, func_name=resolver_name)
+ async def func_impl(*args, **kwargs):
+ try:
+ if 'data' not in kwargs:
+ return {
+ "success": False,
+ "errors": ['missing data']
+ }
+
+ data = kwargs['data']
+ session = state.settings['app'].state.vyos_session
+
+ # one may override the session functions with a local subclass
+ try:
+ mod = import_module(f'api.graphql.recipes.{func_base_name}')
+ klass = getattr(mod, class_name)
+ except ImportError:
+ # otherwise, dynamically generate subclass to invoke subclass
+ # name based templates
+ klass = type(class_name, (Session,), {})
+ k = klass(session, data)
+ method = getattr(k, session_func)
+ result = method()
+ data['result'] = result
+
+ return {
+ "success": True,
+ "data": data
+ }
+ except Exception as error:
+ return {
+ "success": False,
+ "errors": [str(error)]
+ }
+
+ return func_impl
+
+def make_show_config_resolver(query_name):
+ class_name = query_name
+ return make_query_resolver(query_name, class_name, 'show_config')
+
+def make_show_resolver(query_name):
+ class_name = query_name
+ return make_query_resolver(query_name, class_name, 'show')
diff --git a/src/services/api/graphql/graphql/schema/firewall_group.graphql b/src/services/api/graphql/graphql/schema/firewall_group.graphql
index efe7de632..d89904b9e 100644
--- a/src/services/api/graphql/graphql/schema/firewall_group.graphql
+++ b/src/services/api/graphql/graphql/schema/firewall_group.graphql
@@ -45,3 +45,51 @@ type RemoveFirewallAddressGroupMembersResult {
success: Boolean!
errors: [String]
}
+
+input CreateFirewallAddressIpv6GroupInput {
+ name: String!
+ address: [String]
+}
+
+type CreateFirewallAddressIpv6Group {
+ name: String!
+ address: [String]
+}
+
+type CreateFirewallAddressIpv6GroupResult {
+ data: CreateFirewallAddressIpv6Group
+ success: Boolean!
+ errors: [String]
+}
+
+input UpdateFirewallAddressIpv6GroupMembersInput {
+ name: String!
+ address: [String!]!
+}
+
+type UpdateFirewallAddressIpv6GroupMembers {
+ name: String!
+ address: [String!]!
+}
+
+type UpdateFirewallAddressIpv6GroupMembersResult {
+ data: UpdateFirewallAddressIpv6GroupMembers
+ success: Boolean!
+ errors: [String]
+}
+
+input RemoveFirewallAddressIpv6GroupMembersInput {
+ name: String!
+ address: [String!]!
+}
+
+type RemoveFirewallAddressIpv6GroupMembers {
+ name: String!
+ address: [String!]!
+}
+
+type RemoveFirewallAddressIpv6GroupMembersResult {
+ data: RemoveFirewallAddressIpv6GroupMembers
+ success: Boolean!
+ errors: [String]
+}
diff --git a/src/services/api/graphql/graphql/schema/schema.graphql b/src/services/api/graphql/graphql/schema/schema.graphql
index c6899bee6..952e46f34 100644
--- a/src/services/api/graphql/graphql/schema/schema.graphql
+++ b/src/services/api/graphql/graphql/schema/schema.graphql
@@ -3,26 +3,28 @@ schema {
mutation: Mutation
}
-type Query {
- _dummy: String
-}
-
directive @configure on FIELD_DEFINITION
directive @configfile on FIELD_DEFINITION
directive @show on FIELD_DEFINITION
directive @showconfig on FIELD_DEFINITION
directive @image on FIELD_DEFINITION
+type Query {
+ Show(data: ShowInput) : ShowResult @show
+ ShowConfig(data: ShowConfigInput) : ShowConfigResult @showconfig
+}
+
type Mutation {
CreateDhcpServer(data: DhcpServerConfigInput) : CreateDhcpServerResult @configure
CreateInterfaceEthernet(data: InterfaceEthernetConfigInput) : CreateInterfaceEthernetResult @configure
CreateFirewallAddressGroup(data: CreateFirewallAddressGroupInput) : CreateFirewallAddressGroupResult @configure
UpdateFirewallAddressGroupMembers(data: UpdateFirewallAddressGroupMembersInput) : UpdateFirewallAddressGroupMembersResult @configure
RemoveFirewallAddressGroupMembers(data: RemoveFirewallAddressGroupMembersInput) : RemoveFirewallAddressGroupMembersResult @configure
+ CreateFirewallAddressIpv6Group(data: CreateFirewallAddressIpv6GroupInput) : CreateFirewallAddressIpv6GroupResult @configure
+ UpdateFirewallAddressIpv6GroupMembers(data: UpdateFirewallAddressIpv6GroupMembersInput) : UpdateFirewallAddressIpv6GroupMembersResult @configure
+ RemoveFirewallAddressIpv6GroupMembers(data: RemoveFirewallAddressIpv6GroupMembersInput) : RemoveFirewallAddressIpv6GroupMembersResult @configure
SaveConfigFile(data: SaveConfigFileInput) : SaveConfigFileResult @configfile
LoadConfigFile(data: LoadConfigFileInput) : LoadConfigFileResult @configfile
- Show(data: ShowInput) : ShowResult @show
- ShowConfig(data: ShowConfigInput) : ShowConfigResult @showconfig
AddSystemImage(data: AddSystemImageInput) : AddSystemImageResult @image
DeleteSystemImage(data: DeleteSystemImageInput) : DeleteSystemImageResult @image
}
diff --git a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py b/src/services/api/graphql/recipes/remove_firewall_address_group_members.py
index cde30c27a..b91932e14 100644
--- a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py
+++ b/src/services/api/graphql/recipes/remove_firewall_address_group_members.py
@@ -1,3 +1,17 @@
+# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
from . session import Session
diff --git a/src/services/api/graphql/recipes/session.py b/src/services/api/graphql/recipes/session.py
index 5ece78ee6..1f844ff70 100644
--- a/src/services/api/graphql/recipes/session.py
+++ b/src/services/api/graphql/recipes/session.py
@@ -1,3 +1,18 @@
+# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
import json
from ariadne import convert_camel_case_to_snake
diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl b/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl
new file mode 100644
index 000000000..e9b660722
--- /dev/null
+++ b/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl
@@ -0,0 +1,4 @@
+set firewall group ipv6-address-group {{ name }}
+{% for add in address %}
+set firewall group ipv6-address-group {{ name }} address {{ add }}
+{% endfor %}
diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl
new file mode 100644
index 000000000..0efa0b226
--- /dev/null
+++ b/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl
@@ -0,0 +1,3 @@
+{% for add in address %}
+delete firewall group ipv6-address-group {{ name }} address {{ add }}
+{% endfor %}
diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl
new file mode 100644
index 000000000..f98a5517c
--- /dev/null
+++ b/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl
@@ -0,0 +1,3 @@
+{% for add in address %}
+set firewall group ipv6-address-group {{ name }} address {{ add }}
+{% endfor %}
diff --git a/src/services/vyos-configd b/src/services/vyos-configd
index 670b6e66a..48c9135e2 100755
--- a/src/services/vyos-configd
+++ b/src/services/vyos-configd
@@ -28,6 +28,7 @@ import zmq
from contextlib import contextmanager
from vyos.defaults import directories
+from vyos.util import boot_configuration_complete
from vyos.configsource import ConfigSourceString, ConfigSourceError
from vyos.config import Config
from vyos import ConfigError
@@ -186,7 +187,7 @@ def initialization(socket):
session_out = None
# if not a 'live' session, for example on boot, write to file
- if not session_out or not os.path.isfile('/tmp/vyos-config-status'):
+ if not session_out or not boot_configuration_complete():
session_out = script_stdout_log
session_mode = 'a'
diff --git a/src/services/vyos-hostsd b/src/services/vyos-hostsd
index f4b1d0fc2..df9f18d2d 100755
--- a/src/services/vyos-hostsd
+++ b/src/services/vyos-hostsd
@@ -139,6 +139,27 @@
# }
#
#
+### authoritative_zones
+## Additional zones hosted authoritatively by pdns-recursor.
+## We add NTAs for these zones but do not do much else here.
+#
+# { 'type': 'authoritative_zones',
+# 'op': 'add',
+# 'data': ['<str zone>', ...]
+# }
+#
+# { 'type': 'authoritative_zones',
+# 'op': 'delete',
+# 'data': ['<str zone>', ...]
+# }
+#
+# { 'type': 'authoritative_zones',
+# 'op': 'get',
+# }
+# response:
+# { 'data': ['<str zone>', ...] }
+#
+#
### search_domains
#
# { 'type': 'search_domains',
@@ -255,6 +276,7 @@ STATE = {
"name_server_tags_recursor": [],
"name_server_tags_system": [],
"forward_zones": {},
+ "authoritative_zones": [],
"hosts": {},
"host_name": "vyos",
"domain_name": "",
@@ -267,7 +289,8 @@ base_schema = Schema({
Required('op'): Any('add', 'delete', 'set', 'get', 'apply'),
'type': Any('name_servers',
'name_server_tags_recursor', 'name_server_tags_system',
- 'forward_zones', 'search_domains', 'hosts', 'host_name'),
+ 'forward_zones', 'authoritative_zones', 'search_domains',
+ 'hosts', 'host_name'),
'data': Any(list, dict),
'tag': str,
'tag_regex': str
@@ -347,6 +370,11 @@ msg_schema_map = {
'delete': data_list_schema,
'get': op_type_schema
},
+ 'authoritative_zones': {
+ 'add': data_list_schema,
+ 'delete': data_list_schema,
+ 'get': op_type_schema
+ },
'search_domains': {
'add': data_dict_list_schema,
'delete': data_list_schema,
@@ -522,7 +550,7 @@ def handle_message(msg):
data = get_option(msg, 'data')
if _type in ['name_servers', 'forward_zones', 'search_domains', 'hosts']:
delete_items_from_dict(STATE[_type], data)
- elif _type in ['name_server_tags_recursor', 'name_server_tags_system']:
+ elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'authoritative_zones']:
delete_items_from_list(STATE[_type], data)
else:
raise ValueError(f'Operation "{op}" unknown data type "{_type}"')
@@ -534,7 +562,7 @@ def handle_message(msg):
elif _type in ['forward_zones', 'hosts']:
add_items_to_dict(STATE[_type], data)
# maybe we need to rec_control clear-nta each domain that was removed here?
- elif _type in ['name_server_tags_recursor', 'name_server_tags_system']:
+ elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'authoritative_zones']:
add_items_to_list(STATE[_type], data)
else:
raise ValueError(f'Operation "{op}" unknown data type "{_type}"')
@@ -550,7 +578,7 @@ def handle_message(msg):
if _type in ['name_servers', 'search_domains', 'hosts']:
tag_regex = get_option(msg, 'tag_regex')
result = get_items_from_dict_regex(STATE[_type], tag_regex)
- elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'forward_zones']:
+ elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'forward_zones', 'authoritative_zones']:
result = STATE[_type]
else:
raise ValueError(f'Operation "{op}" unknown data type "{_type}"')
diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server
index aa7ac6708..06871f1d6 100755
--- a/src/services/vyos-http-api-server
+++ b/src/services/vyos-http-api-server
@@ -32,6 +32,7 @@ from fastapi.responses import HTMLResponse
from fastapi.exceptions import RequestValidationError
from fastapi.routing import APIRoute
from pydantic import BaseModel, StrictStr, validator
+from starlette.middleware.cors import CORSMiddleware
from starlette.datastructures import FormData
from starlette.formparsers import FormParser, MultiPartParser
from multipart.multipart import parse_options_header
@@ -610,13 +611,19 @@ def show_op(data: ShowModel):
# GraphQL integration
###
-from api.graphql.bindings import generate_schema
+def graphql_init(fast_api_app):
+ from api.graphql.bindings import generate_schema
-api.graphql.state.init()
+ api.graphql.state.init()
+ api.graphql.state.settings['app'] = app
-schema = generate_schema()
+ schema = generate_schema()
-app.add_route('/graphql', GraphQL(schema, debug=True))
+ if app.state.vyos_origins:
+ origins = app.state.vyos_origins
+ app.add_route('/graphql', CORSMiddleware(GraphQL(schema, debug=True), allow_origins=origins, allow_methods=("GET", "POST", "OPTIONS")))
+ else:
+ app.add_route('/graphql', GraphQL(schema, debug=True))
###
@@ -640,15 +647,20 @@ if __name__ == '__main__':
app.state.vyos_session = config_session
app.state.vyos_keys = server_config['api_keys']
- app.state.vyos_debug = bool(server_config['debug'] == 'true')
- app.state.vyos_strict = bool(server_config['strict'] == 'true')
+ app.state.vyos_debug = server_config['debug']
+ app.state.vyos_strict = server_config['strict']
+ app.state.vyos_origins = server_config.get('cors', {}).get('origins', [])
- api.graphql.state.settings['app'] = app
+ graphql_init(app)
try:
- uvicorn.run(app, host=server_config["listen_address"],
- port=int(server_config["port"]),
- proxy_headers=True)
+ if not server_config['socket']:
+ uvicorn.run(app, host=server_config["listen_address"],
+ port=int(server_config["port"]),
+ proxy_headers=True)
+ else:
+ uvicorn.run(app, uds="/run/api.sock",
+ proxy_headers=True)
except OSError as err:
logger.critical(f"OSError {err}")
sys.exit(1)
diff --git a/src/systemd/keepalived.service b/src/systemd/keepalived.service
new file mode 100644
index 000000000..a462d8614
--- /dev/null
+++ b/src/systemd/keepalived.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Keepalive Daemon (LVS and VRRP)
+After=vyos-router.service
+# Only start if there is a configuration file
+ConditionFileNotEmpty=/run/keepalived/keepalived.conf
+
+[Service]
+KillMode=process
+Type=simple
+# Read configuration variable file if it is present
+ExecStart=/usr/sbin/keepalived --use-file /run/keepalived/keepalived.conf --pid /run/keepalived/keepalived.pid --dont-fork --snmp
+ExecReload=/bin/kill -HUP $MAINPID
+PIDFile=/run/keepalived/keepalived.pid
diff --git a/src/systemd/vyos-hostsd.service b/src/systemd/vyos-hostsd.service
index b77335778..4da55f518 100644
--- a/src/systemd/vyos-hostsd.service
+++ b/src/systemd/vyos-hostsd.service
@@ -7,7 +7,7 @@ DefaultDependencies=no
# Seemingly sensible way to say "as early as the system is ready"
# All vyos-hostsd needs is read/write mounted root
-After=systemd-remount-fs.service
+After=systemd-remount-fs.service cloud-init.service
[Service]
WorkingDirectory=/run/vyos-hostsd
diff --git a/src/systemd/vyos-http-api.service b/src/systemd/vyos-http-api.service
deleted file mode 100644
index 55370b356..000000000
--- a/src/systemd/vyos-http-api.service
+++ /dev/null
@@ -1,21 +0,0 @@
-[Unit]
-Description=VyOS HTTP API service
-After=vyos-router.service
-Requires=vyos-router.service
-
-[Service]
-ExecStart=/usr/libexec/vyos/services/vyos-http-api-server
-Type=idle
-
-SyslogIdentifier=vyos-http-api
-SyslogFacility=daemon
-
-Restart=on-failure
-
-# Does't work but leave it here
-User=root
-Group=vyattacfg
-
-[Install]
-WantedBy=vyos.target
-
diff --git a/src/tests/test_validate.py b/src/tests/test_validate.py
index b43dbd97e..68a257d25 100644
--- a/src/tests/test_validate.py
+++ b/src/tests/test_validate.py
@@ -30,8 +30,12 @@ class TestVyOSValidate(TestCase):
self.assertFalse(vyos.validate.is_ipv6_link_local('169.254.0.1'))
self.assertTrue(vyos.validate.is_ipv6_link_local('fe80::'))
self.assertTrue(vyos.validate.is_ipv6_link_local('fe80::affe:1'))
+ self.assertTrue(vyos.validate.is_ipv6_link_local('fe80::affe:1%eth0'))
self.assertFalse(vyos.validate.is_ipv6_link_local('2001:db8::'))
+ self.assertFalse(vyos.validate.is_ipv6_link_local('2001:db8::%eth0'))
self.assertFalse(vyos.validate.is_ipv6_link_local('VyOS'))
+ self.assertFalse(vyos.validate.is_ipv6_link_local('::1'))
+ self.assertFalse(vyos.validate.is_ipv6_link_local('::1%lo'))
def test_is_ipv6_link_local(self):
self.assertTrue(vyos.validate.is_loopback_addr('127.0.0.1'))
diff --git a/src/validators/ipv4-multicast b/src/validators/ipv4-multicast
index e5cbc9532..5465c728d 100755
--- a/src/validators/ipv4-multicast
+++ b/src/validators/ipv4-multicast
@@ -1,3 +1,3 @@
#!/bin/sh
-ipaddrcheck --is-ipv4-multicast $1
+ipaddrcheck --is-ipv4-multicast $1 && ipaddrcheck --is-ipv4-single $1
diff --git a/src/validators/ipv6-link-local b/src/validators/ipv6-link-local
new file mode 100755
index 000000000..05e693b77
--- /dev/null
+++ b/src/validators/ipv6-link-local
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+
+import sys
+from vyos.validate import is_ipv6_link_local
+
+if __name__ == '__main__':
+ if len(sys.argv)>1:
+ addr = sys.argv[1]
+ if not is_ipv6_link_local(addr):
+ sys.exit(1)
+
+ sys.exit(0)
diff --git a/src/validators/ipv6-multicast b/src/validators/ipv6-multicast
index 66cd90c9c..5afc437e5 100755
--- a/src/validators/ipv6-multicast
+++ b/src/validators/ipv6-multicast
@@ -1,3 +1,3 @@
#!/bin/sh
-ipaddrcheck --is-ipv6-multicast $1
+ipaddrcheck --is-ipv6-multicast $1 && ipaddrcheck --is-ipv6-single $1
diff --git a/src/validators/range b/src/validators/range
new file mode 100755
index 000000000..d4c25f3c4
--- /dev/null
+++ b/src/validators/range
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2021 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import sys
+import argparse
+
+class MalformedRange(Exception):
+ pass
+
+def validate_range(value, min=None, max=None):
+ try:
+ lower, upper = re.match(r'^(\d+)-(\d+)$', value).groups()
+
+ lower, upper = int(lower), int(upper)
+
+ if int(lower) > int(upper):
+ raise MalformedRange("the lower bound exceeds the upper bound".format(value))
+
+ if min is not None:
+ if lower < min:
+ raise MalformedRange("the lower bound must not be less than {}".format(min))
+
+ if max is not None:
+ if upper > max:
+ raise MalformedRange("the upper bound must not be greater than {}".format(max))
+
+ except (AttributeError, ValueError):
+ raise MalformedRange("range syntax error")
+
+parser = argparse.ArgumentParser(description='Range validator.')
+parser.add_argument('--min', type=int, action='store')
+parser.add_argument('--max', type=int, action='store')
+parser.add_argument('value', action='store')
+
+if __name__ == '__main__':
+ args = parser.parse_args()
+
+ try:
+ validate_range(args.value, min=args.min, max=args.max)
+ except MalformedRange as e:
+ print("Incorrect range '{}': {}".format(args.value, e))
+ sys.exit(1)
diff --git a/src/validators/script b/src/validators/script
index 1d8a27e5c..4ffdeb2a0 100755
--- a/src/validators/script
+++ b/src/validators/script
@@ -36,7 +36,7 @@ if __name__ == '__main__':
# File outside the config dir is just a warning
if not vyos.util.file_is_persistent(script):
- sys.exit(
- f'Warning: file {path} is outside the / config directory\n'
+ sys.exit(0)(
+ f'Warning: file {script} is outside the "/config" directory\n'
'It will not be automatically migrated to a new image on system update'
)