summaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to 'python')
-rw-r--r--python/vyos/configdict.py495
-rw-r--r--python/vyos/configsession.py4
-rw-r--r--python/vyos/configverify.py18
-rw-r--r--python/vyos/defaults.py1
-rw-r--r--python/vyos/frr.py551
-rw-r--r--python/vyos/frrender.py176
-rw-r--r--python/vyos/utils/misc.py2
-rwxr-xr-xpython/vyos/xml_ref/generate_cache.py6
8 files changed, 694 insertions, 559 deletions
diff --git a/python/vyos/configdict.py b/python/vyos/configdict.py
index 5a353b110..cbcbf9f72 100644
--- a/python/vyos/configdict.py
+++ b/python/vyos/configdict.py
@@ -19,6 +19,7 @@ A library for retrieving value dicts from VyOS configs in a declarative fashion.
import os
import json
+from vyos.defaults import frr_debug_enable
from vyos.utils.dict import dict_search
from vyos.utils.process import cmd
@@ -664,3 +665,497 @@ def get_accel_dict(config, base, chap_secrets, with_pki=False):
dict['authentication']['radius']['server'][server]['acct_port'] = '0'
return dict
+
+def get_frrender_dict(conf, argv=None) -> dict:
+ from copy import deepcopy
+ from vyos.config import config_dict_merge
+ from vyos.frrender import frr_protocols
+
+ # Create an empty dictionary which will be filled down the code path and
+ # returned to the caller
+ dict = {}
+
+ if argv and len(argv) > 1:
+ dict['vrf_context'] = argv[1]
+
+ def dict_helper_ospf_defaults(ospf, path):
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(path, key_mangling=('-', '_'),
+ get_first_key=True, recursive=True)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: default-information
+ # originate comes with a default metric-type of 2, which will enable the
+ # entire default-information originate tree, even when not set via CLI so we
+ # need to check this first and probably drop that key.
+ if dict_search('default_information.originate', ospf) is None:
+ del default_values['default_information']
+ if 'mpls_te' not in ospf:
+ del default_values['mpls_te']
+ if 'graceful_restart' not in ospf:
+ del default_values['graceful_restart']
+ for area_num in default_values.get('area', []):
+ if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None:
+ del default_values['area'][area_num]['area_type']['nssa']
+
+ for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']:
+ if dict_search(f'redistribute.{protocol}', ospf) is None:
+ del default_values['redistribute'][protocol]
+ if not bool(default_values['redistribute']):
+ del default_values['redistribute']
+
+ for interface in ospf.get('interface', []):
+ # We need to reload the defaults on every pass b/c of
+ # hello-multiplier dependency on dead-interval
+ # If hello-multiplier is set, we need to remove the default from
+ # dead-interval.
+ if 'hello_multiplier' in ospf['interface'][interface]:
+ del default_values['interface'][interface]['dead_interval']
+
+ ospf = config_dict_merge(default_values, ospf)
+ return ospf
+
+ def dict_helper_ospfv3_defaults(ospfv3, path):
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(path, key_mangling=('-', '_'),
+ get_first_key=True, recursive=True)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI. Example: default-information
+ # originate comes with a default metric-type of 2, which will enable the
+ # entire default-information originate tree, even when not set via CLI so we
+ # need to check this first and probably drop that key.
+ if dict_search('default_information.originate', ospfv3) is None:
+ del default_values['default_information']
+ if 'graceful_restart' not in ospfv3:
+ del default_values['graceful_restart']
+
+ for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']:
+ if dict_search(f'redistribute.{protocol}', ospfv3) is None:
+ del default_values['redistribute'][protocol]
+ if not bool(default_values['redistribute']):
+ del default_values['redistribute']
+
+ default_values.pop('interface', {})
+
+ # merge in remaining default values
+ ospfv3 = config_dict_merge(default_values, ospfv3)
+ return ospfv3
+
+ def dict_helper_pim_defaults(pim, path):
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(path, key_mangling=('-', '_'),
+ get_first_key=True, recursive=True)
+
+ # We have to cleanup the default dict, as default values could enable features
+ # which are not explicitly enabled on the CLI.
+ for interface in pim.get('interface', []):
+ if 'igmp' not in pim['interface'][interface]:
+ del default_values['interface'][interface]['igmp']
+
+ pim = config_dict_merge(default_values, pim)
+ return pim
+
+ # Ethernet and bonding interfaces can participate in EVPN which is configured via FRR
+ tmp = {}
+ for if_type in ['ethernet', 'bonding']:
+ interface_path = ['interfaces', if_type]
+ if not conf.exists(interface_path):
+ continue
+ for interface in conf.list_nodes(interface_path):
+ evpn_path = interface_path + [interface, 'evpn']
+ if not conf.exists(evpn_path):
+ continue
+
+ evpn = conf.get_config_dict(evpn_path, key_mangling=('-', '_'))
+ tmp.update({interface : evpn})
+ # At least one participating EVPN interface found, add to result dict
+ if tmp: dict['interfaces'] = tmp
+
+ # Zebra prefix exchange for Kernel IP/IPv6 and routing protocols
+ for ip_version in ['ip', 'ipv6']:
+ ip_cli_path = ['system', ip_version]
+ ip_dict = conf.get_config_dict(ip_cli_path, key_mangling=('-', '_'),
+ get_first_key=True, with_recursive_defaults=True)
+ if ip_dict:
+ ip_dict['afi'] = ip_version
+ dict.update({ip_version : ip_dict})
+
+ # Enable SNMP agentx support
+ # SNMP AgentX support cannot be disabled once enabled
+ if conf.exists(['service', 'snmp']):
+ dict['snmp'] = {}
+
+ # We will always need the policy key
+ dict['policy'] = conf.get_config_dict(['policy'], key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # We need to check the CLI if the BABEL node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ babel_cli_path = ['protocols', 'babel']
+ if conf.exists(babel_cli_path):
+ babel = conf.get_config_dict(babel_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
+ dict.update({'babel' : babel})
+
+ # We need to check the CLI if the BFD node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ bfd_cli_path = ['protocols', 'bfd']
+ if conf.exists(bfd_cli_path):
+ bfd = conf.get_config_dict(bfd_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+ dict.update({'bfd' : bfd})
+
+ # We need to check the CLI if the BGP node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ bgp_cli_path = ['protocols', 'bgp']
+ if conf.exists(bgp_cli_path):
+ bgp = conf.get_config_dict(bgp_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+ bgp['dependent_vrfs'] = {}
+ dict.update({'bgp' : bgp})
+ elif conf.exists_effective(bgp_cli_path):
+ dict.update({'bgp' : {'deleted' : '', 'dependent_vrfs' : {}}})
+
+ # We need to check the CLI if the EIGRP node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ eigrp_cli_path = ['protocols', 'eigrp']
+ if conf.exists(eigrp_cli_path):
+ isis = conf.get_config_dict(eigrp_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+ dict.update({'eigrp' : isis})
+ elif conf.exists_effective(eigrp_cli_path):
+ dict.update({'eigrp' : {'deleted' : ''}})
+
+ # We need to check the CLI if the ISIS node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ isis_cli_path = ['protocols', 'isis']
+ if conf.exists(isis_cli_path):
+ isis = conf.get_config_dict(isis_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+ dict.update({'isis' : isis})
+ elif conf.exists_effective(isis_cli_path):
+ dict.update({'isis' : {'deleted' : ''}})
+
+ # We need to check the CLI if the MPLS node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ mpls_cli_path = ['protocols', 'mpls']
+ if conf.exists(mpls_cli_path):
+ mpls = conf.get_config_dict(mpls_cli_path, key_mangling=('-', '_'),
+ get_first_key=True)
+ dict.update({'mpls' : mpls})
+ elif conf.exists_effective(mpls_cli_path):
+ dict.update({'mpls' : {'deleted' : ''}})
+
+ # We need to check the CLI if the OPENFABRIC node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ openfabric_cli_path = ['protocols', 'openfabric']
+ if conf.exists(openfabric_cli_path):
+ openfabric = conf.get_config_dict(openfabric_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+ dict.update({'openfabric' : openfabric})
+ elif conf.exists_effective(openfabric_cli_path):
+ dict.update({'openfabric' : {'deleted' : ''}})
+
+ # We need to check the CLI if the OSPF node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ ospf_cli_path = ['protocols', 'ospf']
+ if conf.exists(ospf_cli_path):
+ ospf = conf.get_config_dict(ospf_cli_path, key_mangling=('-', '_'),
+ get_first_key=True)
+ ospf = dict_helper_ospf_defaults(ospf, ospf_cli_path)
+ dict.update({'ospf' : ospf})
+ elif conf.exists_effective(ospf_cli_path):
+ dict.update({'ospf' : {'deleted' : ''}})
+
+ # We need to check the CLI if the OSPFv3 node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ ospfv3_cli_path = ['protocols', 'ospfv3']
+ if conf.exists(ospfv3_cli_path):
+ ospfv3 = conf.get_config_dict(ospfv3_cli_path, key_mangling=('-', '_'),
+ get_first_key=True)
+ ospfv3 = dict_helper_ospfv3_defaults(ospfv3, ospfv3_cli_path)
+ dict.update({'ospfv3' : ospfv3})
+ elif conf.exists_effective(ospfv3_cli_path):
+ dict.update({'ospfv3' : {'deleted' : ''}})
+
+ # We need to check the CLI if the PIM node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ pim_cli_path = ['protocols', 'pim']
+ if conf.exists(pim_cli_path):
+ pim = conf.get_config_dict(pim_cli_path, key_mangling=('-', '_'),
+ get_first_key=True)
+ pim = dict_helper_pim_defaults(pim, pim_cli_path)
+ dict.update({'pim' : pim})
+ elif conf.exists_effective(pim_cli_path):
+ dict.update({'pim' : {'deleted' : ''}})
+
+ # We need to check the CLI if the PIM6 node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ pim6_cli_path = ['protocols', 'pim6']
+ if conf.exists(pim6_cli_path):
+ pim6 = conf.get_config_dict(pim6_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
+ dict.update({'pim6' : pim6})
+ elif conf.exists_effective(pim6_cli_path):
+ dict.update({'pim6' : {'deleted' : ''}})
+
+ # We need to check the CLI if the RIP node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ rip_cli_path = ['protocols', 'rip']
+ if conf.exists(rip_cli_path):
+ rip = conf.get_config_dict(rip_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
+ dict.update({'rip' : rip})
+ elif conf.exists_effective(rip_cli_path):
+ dict.update({'rip' : {'deleted' : ''}})
+
+ # We need to check the CLI if the RIPng node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ ripng_cli_path = ['protocols', 'ripng']
+ if conf.exists(ripng_cli_path):
+ ripng = conf.get_config_dict(ripng_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ with_recursive_defaults=True)
+ dict.update({'ripng' : ripng})
+ elif conf.exists_effective(ripng_cli_path):
+ dict.update({'ripng' : {'deleted' : ''}})
+
+ # We need to check the CLI if the RPKI node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ rpki_cli_path = ['protocols', 'rpki']
+ if conf.exists(rpki_cli_path):
+ rpki = conf.get_config_dict(rpki_cli_path, key_mangling=('-', '_'),
+ get_first_key=True, with_pki=True,
+ with_recursive_defaults=True)
+ rpki_ssh_key_base = '/run/frr/id_rpki'
+ for cache, cache_config in rpki.get('cache',{}).items():
+ if 'ssh' in cache_config:
+ cache_config['ssh']['public_key_file'] = f'{rpki_ssh_key_base}_{cache}.pub'
+ cache_config['ssh']['private_key_file'] = f'{rpki_ssh_key_base}_{cache}'
+ dict.update({'rpki' : rpki})
+ elif conf.exists_effective(rpki_cli_path):
+ dict.update({'rpki' : {'deleted' : ''}})
+
+ # We need to check the CLI if the Segment Routing node is present and thus load in
+ # all the default values present on the CLI - that's why we have if conf.exists()
+ sr_cli_path = ['protocols', 'segment-routing']
+ if conf.exists(sr_cli_path):
+ sr = conf.get_config_dict(sr_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True,
+ with_recursive_defaults=True)
+ dict.update({'segment_routing' : sr})
+ elif conf.exists_effective(sr_cli_path):
+ dict.update({'segment_routing' : {'deleted' : ''}})
+
+ # We need to check the CLI if the static node is present and thus load in
+ # all the default values present on the CLI - that's why we have if conf.exists()
+ static_cli_path = ['protocols', 'static']
+ if conf.exists(static_cli_path):
+ static = conf.get_config_dict(static_cli_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # T3680 - get a list of all interfaces currently configured to use DHCP
+ tmp = get_dhcp_interfaces(conf)
+ if tmp: static.update({'dhcp' : tmp})
+ tmp = get_pppoe_interfaces(conf)
+ if tmp: static.update({'pppoe' : tmp})
+
+ dict.update({'static' : static})
+ elif conf.exists_effective(static_cli_path):
+ dict.update({'static' : {'deleted' : ''}})
+
+ # keep a re-usable list of dependent VRFs
+ dependent_vrfs_default = {}
+ if 'bgp' in dict:
+ dependent_vrfs_default = deepcopy(dict['bgp'])
+ # we do not need to nest the 'dependent_vrfs' key - simply remove it
+ if 'dependent_vrfs' in dependent_vrfs_default:
+ del dependent_vrfs_default['dependent_vrfs']
+
+ vrf_cli_path = ['vrf', 'name']
+ if conf.exists(vrf_cli_path):
+ vrf = conf.get_config_dict(vrf_cli_path, key_mangling=('-', '_'),
+ get_first_key=False,
+ no_tag_node_value_mangle=True)
+ # We do not have any VRF related default values on the CLI. The defaults will only
+ # come into place under the protocols tree, thus we can safely merge them with the
+ # appropriate routing protocols
+ for vrf_name, vrf_config in vrf['name'].items():
+ bgp_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'bgp']
+ if 'bgp' in vrf_config.get('protocols', []):
+ # We have gathered the dict representation of the CLI, but there are default
+ # options which we need to update into the dictionary retrived.
+ default_values = conf.get_config_defaults(bgp_vrf_path, key_mangling=('-', '_'),
+ get_first_key=True, recursive=True)
+
+ # merge in remaining default values
+ vrf_config['protocols']['bgp'] = config_dict_merge(default_values,
+ vrf_config['protocols']['bgp'])
+
+ # Add this BGP VRF instance as dependency into the default VRF
+ if 'bgp' in dict:
+ dict['bgp']['dependent_vrfs'].update({vrf_name : deepcopy(vrf_config)})
+
+ vrf_config['protocols']['bgp']['dependent_vrfs'] = conf.get_config_dict(
+ vrf_cli_path, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ # We can safely delete ourself from the dependent VRF list
+ if vrf_name in vrf_config['protocols']['bgp']['dependent_vrfs']:
+ del vrf_config['protocols']['bgp']['dependent_vrfs'][vrf_name]
+
+ # Add dependency on possible existing default VRF to this VRF
+ if 'bgp' in dict:
+ vrf_config['protocols']['bgp']['dependent_vrfs'].update({'default': {'protocols': {
+ 'bgp': dependent_vrfs_default}}})
+ elif conf.exists_effective(bgp_vrf_path):
+ # Add this BGP VRF instance as dependency into the default VRF
+ tmp = {'deleted' : '', 'dependent_vrfs': deepcopy(vrf['name'])}
+ # We can safely delete ourself from the dependent VRF list
+ if vrf_name in tmp['dependent_vrfs']:
+ del tmp['dependent_vrfs'][vrf_name]
+
+ # Add dependency on possible existing default VRF to this VRF
+ if 'bgp' in dict:
+ tmp['dependent_vrfs'].update({'default': {'protocols': {
+ 'bgp': dependent_vrfs_default}}})
+
+ if 'bgp' in dict:
+ dict['bgp']['dependent_vrfs'].update({vrf_name : {'protocols': tmp} })
+
+ if 'protocols' not in vrf['name'][vrf_name]:
+ vrf['name'][vrf_name].update({'protocols': {'bgp' : tmp}})
+ else:
+ vrf['name'][vrf_name]['protocols'].update({'bgp' : tmp})
+
+ # We need to check the CLI if the EIGRP node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ eigrp_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'eigrp']
+ if 'eigrp' in vrf_config.get('protocols', []):
+ eigrp = conf.get_config_dict(eigrp_vrf_path, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True)
+ vrf['name'][vrf_name]['protocols'].update({'eigrp' : isis})
+ elif conf.exists_effective(eigrp_vrf_path):
+ vrf['name'][vrf_name]['protocols'].update({'eigrp' : {'deleted' : ''}})
+
+ # We need to check the CLI if the ISIS node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ isis_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'isis']
+ if 'isis' in vrf_config.get('protocols', []):
+ isis = conf.get_config_dict(isis_vrf_path, key_mangling=('-', '_'), get_first_key=True,
+ no_tag_node_value_mangle=True, with_recursive_defaults=True)
+ vrf['name'][vrf_name]['protocols'].update({'isis' : isis})
+ elif conf.exists_effective(isis_vrf_path):
+ vrf['name'][vrf_name]['protocols'].update({'isis' : {'deleted' : ''}})
+
+ # We need to check the CLI if the OSPF node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ ospf_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'ospf']
+ if 'ospf' in vrf_config.get('protocols', []):
+ ospf = conf.get_config_dict(ospf_vrf_path, key_mangling=('-', '_'), get_first_key=True)
+ ospf = dict_helper_ospf_defaults(vrf_config['protocols']['ospf'], ospf_vrf_path)
+ vrf['name'][vrf_name]['protocols'].update({'ospf' : ospf})
+ elif conf.exists_effective(ospf_vrf_path):
+ vrf['name'][vrf_name]['protocols'].update({'ospf' : {'deleted' : ''}})
+
+ # We need to check the CLI if the OSPFv3 node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ ospfv3_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'ospfv3']
+ if 'ospfv3' in vrf_config.get('protocols', []):
+ ospfv3 = conf.get_config_dict(ospfv3_vrf_path, key_mangling=('-', '_'), get_first_key=True)
+ ospfv3 = dict_helper_ospfv3_defaults(vrf_config['protocols']['ospfv3'], ospfv3_vrf_path)
+ vrf['name'][vrf_name]['protocols'].update({'ospfv3' : ospfv3})
+ elif conf.exists_effective(ospfv3_vrf_path):
+ vrf['name'][vrf_name]['protocols'].update({'ospfv3' : {'deleted' : ''}})
+
+ # We need to check the CLI if the static node is present and thus load in all the default
+ # values present on the CLI - that's why we have if conf.exists()
+ static_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'static']
+ if 'static' in vrf_config.get('protocols', []):
+ static = conf.get_config_dict(static_vrf_path, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+ # T3680 - get a list of all interfaces currently configured to use DHCP
+ tmp = get_dhcp_interfaces(conf, vrf_name)
+ if tmp: static.update({'dhcp' : tmp})
+ tmp = get_pppoe_interfaces(conf, vrf_name)
+ if tmp: static.update({'pppoe' : tmp})
+
+ vrf['name'][vrf_name]['protocols'].update({'static': static})
+ elif conf.exists_effective(static_vrf_path):
+ vrf['name'][vrf_name]['protocols'].update({'static': {'deleted' : ''}})
+
+ vrf_vni_path = ['vrf', 'name', vrf_name, 'vni']
+ if conf.exists(vrf_vni_path):
+ vrf_config.update({'vni': conf.return_value(vrf_vni_path)})
+
+ dict.update({'vrf' : vrf})
+ elif conf.exists_effective(vrf_cli_path):
+ effective_vrf = conf.get_config_dict(vrf_cli_path, key_mangling=('-', '_'),
+ get_first_key=False,
+ no_tag_node_value_mangle=True,
+ effective=True)
+ vrf = {'name' : {}}
+ for vrf_name, vrf_config in effective_vrf.get('name', {}).items():
+ vrf['name'].update({vrf_name : {}})
+ for protocol in frr_protocols:
+ if protocol in vrf_config.get('protocols', []):
+ # Create initial protocols key if not present
+ if 'protocols' not in vrf['name'][vrf_name]:
+ vrf['name'][vrf_name].update({'protocols' : {}})
+ # All routing protocols are deleted when we pass this point
+ tmp = {'deleted' : ''}
+
+ # Special treatment for BGP routing protocol
+ if protocol == 'bgp':
+ tmp['dependent_vrfs'] = {}
+ if 'name' in vrf:
+ tmp['dependent_vrfs'] = conf.get_config_dict(
+ vrf_cli_path, key_mangling=('-', '_'),
+ get_first_key=True, no_tag_node_value_mangle=True,
+ effective=True)
+ # Add dependency on possible existing default VRF to this VRF
+ if 'bgp' in dict:
+ tmp['dependent_vrfs'].update({'default': {'protocols': {
+ 'bgp': dependent_vrfs_default}}})
+ # We can safely delete ourself from the dependent VRF list
+ if vrf_name in tmp['dependent_vrfs']:
+ del tmp['dependent_vrfs'][vrf_name]
+
+ # Update VRF related dict
+ vrf['name'][vrf_name]['protocols'].update({protocol : tmp})
+
+ dict.update({'vrf' : vrf})
+
+ if os.path.exists(frr_debug_enable):
+ print('======== < BEGIN > ==========')
+ import pprint
+ pprint.pprint(dict)
+ print('========= < END > ===========')
+
+ # Use singleton instance of the FRR render class
+ if hasattr(conf, 'frrender_cls'):
+ frrender = getattr(conf, 'frrender_cls')
+ dict.update({'frrender_cls' : frrender})
+ frrender.generate(dict)
+
+ return dict
diff --git a/python/vyos/configsession.py b/python/vyos/configsession.py
index 90b96b88c..dd3ad1e3d 100644
--- a/python/vyos/configsession.py
+++ b/python/vyos/configsession.py
@@ -23,8 +23,8 @@ from vyos.utils.process import is_systemd_service_running
from vyos.utils.dict import dict_to_paths
CLI_SHELL_API = '/bin/cli-shell-api'
-SET = '/opt/vyatta/sbin/my_set'
-DELETE = '/opt/vyatta/sbin/my_delete'
+SET = '/usr/libexec/vyos/vyconf/vy_set'
+DELETE = '/usr/libexec/vyos/vyconf/vy_delete'
COMMENT = '/opt/vyatta/sbin/my_comment'
COMMIT = '/opt/vyatta/sbin/my_commit'
DISCARD = '/opt/vyatta/sbin/my_discard'
diff --git a/python/vyos/configverify.py b/python/vyos/configverify.py
index 92996f2ee..4084425b1 100644
--- a/python/vyos/configverify.py
+++ b/python/vyos/configverify.py
@@ -420,7 +420,7 @@ def verify_common_route_maps(config):
continue
tmp = config[route_map]
# Check if the specified route-map exists, if not error out
- if dict_search(f'policy.route-map.{tmp}', config) == None:
+ if dict_search(f'policy.route_map.{tmp}', config) == None:
raise ConfigError(f'Specified route-map "{tmp}" does not exist!')
if 'redistribute' in config:
@@ -434,7 +434,7 @@ def verify_route_map(route_map_name, config):
recurring validation if a specified route-map exists!
"""
# Check if the specified route-map exists, if not error out
- if dict_search(f'policy.route-map.{route_map_name}', config) == None:
+ if dict_search(f'policy.route_map.{route_map_name}', config) == None:
raise ConfigError(f'Specified route-map "{route_map_name}" does not exist!')
def verify_prefix_list(prefix_list, config, version=''):
@@ -443,7 +443,7 @@ def verify_prefix_list(prefix_list, config, version=''):
recurring validation if a specified prefix-list exists!
"""
# Check if the specified prefix-list exists, if not error out
- if dict_search(f'policy.prefix-list{version}.{prefix_list}', config) == None:
+ if dict_search(f'policy.prefix_list{version}.{prefix_list}', config) == None:
raise ConfigError(f'Specified prefix-list{version} "{prefix_list}" does not exist!')
def verify_access_list(access_list, config, version=''):
@@ -452,7 +452,7 @@ def verify_access_list(access_list, config, version=''):
recurring validation if a specified prefix-list exists!
"""
# Check if the specified ACL exists, if not error out
- if dict_search(f'policy.access-list{version}.{access_list}', config) == None:
+ if dict_search(f'policy.access_list{version}.{access_list}', config) == None:
raise ConfigError(f'Specified access-list{version} "{access_list}" does not exist!')
def verify_pki_certificate(config: dict, cert_name: str, no_password_protected: bool=False):
@@ -537,3 +537,13 @@ def verify_eapol(config: dict):
if 'ca_certificate' in config['eapol']:
for ca_cert in config['eapol']['ca_certificate']:
verify_pki_ca_certificate(config, ca_cert)
+
+def has_frr_protocol_in_dict(config_dict: dict, protocol: str) -> bool:
+ vrf = None
+ if config_dict and 'vrf_context' in config_dict:
+ vrf = config_dict['vrf_context']
+ if vrf and protocol in (dict_search(f'vrf.name.{vrf}.protocols', config_dict) or []):
+ return True
+ if config_dict and protocol in config_dict:
+ return True
+ return False
diff --git a/python/vyos/defaults.py b/python/vyos/defaults.py
index 425990967..9757a34df 100644
--- a/python/vyos/defaults.py
+++ b/python/vyos/defaults.py
@@ -42,6 +42,7 @@ directories = {
config_status = '/tmp/vyos-config-status'
api_config_state = '/run/http-api-state'
+frr_debug_enable = '/tmp/vyos.frr.debug'
cfg_group = 'vyattacfg'
diff --git a/python/vyos/frr.py b/python/vyos/frr.py
deleted file mode 100644
index 6fb81803f..000000000
--- a/python/vyos/frr.py
+++ /dev/null
@@ -1,551 +0,0 @@
-# Copyright 2020-2024 VyOS maintainers and contributors <maintainers@vyos.io>
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this library. If not, see <http://www.gnu.org/licenses/>.
-
-r"""
-A Library for interracting with the FRR daemon suite.
-It supports simple configuration manipulation and loading using the official tools
-supplied with FRR (vtysh and frr-reload)
-
-All configuration management and manipulation is done using strings and regex.
-
-
-Example Usage
-#####
-
-# Reading configuration from frr:
-```
->>> original_config = get_configuration()
->>> repr(original_config)
-'!\nfrr version 7.3.1\nfrr defaults traditional\nhostname debian\n......
-```
-
-
-# Modify a configuration section:
-```
->>> new_bgp_section = 'router bgp 65000\n neighbor 192.0.2.1 remote-as 65000\n'
->>> modified_config = replace_section(original_config, new_bgp_section, replace_re=r'router bgp \d+')
->>> repr(modified_config)
-'............router bgp 65000\n neighbor 192.0.2.1 remote-as 65000\n...........'
-```
-
-Remove a configuration section:
-```
->>> modified_config = remove_section(original_config, r'router ospf')
-```
-
-Test the new configuration:
-```
->>> try:
->>> mark_configuration(modified configuration)
->>> except ConfigurationNotValid as e:
->>> print('resulting configuration is not valid')
->>> sys.exit(1)
-```
-
-Apply the new configuration:
-```
->>> try:
->>> replace_configuration(modified_config)
->>> except CommitError as e:
->>> print('Exception while commiting the supplied configuration')
->>> print(e)
->>> exit(1)
-```
-"""
-
-import tempfile
-import re
-
-from vyos import ConfigError
-from vyos.utils.process import cmd
-from vyos.utils.process import popen
-from vyos.utils.process import STDOUT
-
-import logging
-from logging.handlers import SysLogHandler
-import os
-import sys
-
-LOG = logging.getLogger(__name__)
-DEBUG = False
-
-ch = SysLogHandler(address='/dev/log')
-ch2 = logging.StreamHandler(stream=sys.stdout)
-LOG.addHandler(ch)
-LOG.addHandler(ch2)
-
-_frr_daemons = ['zebra', 'staticd', 'bgpd', 'ospfd', 'ospf6d', 'ripd', 'ripngd',
- 'isisd', 'pimd', 'pim6d', 'ldpd', 'eigrpd', 'babeld', 'bfdd', 'fabricd']
-
-path_vtysh = '/usr/bin/vtysh'
-path_frr_reload = '/usr/lib/frr/frr-reload.py'
-path_config = '/run/frr'
-
-default_add_before = r'(ip prefix-list .*|route-map .*|line vty|end)'
-
-
-class FrrError(Exception):
- pass
-
-
-class ConfigurationNotValid(FrrError):
- """
- The configuratioin supplied to vtysh is not valid
- """
- pass
-
-
-class CommitError(FrrError):
- """
- Commiting the supplied configuration failed to commit by a unknown reason
- see commit error and/or run mark_configuration on the specified configuration
- to se error generated
-
- used by: reload_configuration()
- """
- pass
-
-
-class ConfigSectionNotFound(FrrError):
- """
- Removal of configuration failed because it is not existing in the supplied configuration
- """
- pass
-
-def init_debugging():
- global DEBUG
-
- DEBUG = os.path.exists('/tmp/vyos.frr.debug')
- if DEBUG:
- LOG.setLevel(logging.DEBUG)
-
-def get_configuration(daemon=None, marked=False):
- """ Get current running FRR configuration
- daemon: Collect only configuration for the specified FRR daemon,
- supplying daemon=None retrieves the complete configuration
- marked: Mark the configuration with "end" tags
-
- return: string containing the running configuration from frr
-
- """
- if daemon and daemon not in _frr_daemons:
- raise ValueError(f'The specified daemon type is not supported {repr(daemon)}')
-
- cmd = f"{path_vtysh} -c 'show run'"
- if daemon:
- cmd += f' -d {daemon}'
-
- output, code = popen(cmd, stderr=STDOUT)
- if code:
- raise OSError(code, output)
-
- config = output.replace('\r', '')
- # Remove first header lines from FRR config
- config = config.split("\n", 3)[-1]
- # Mark the configuration with end tags
- if marked:
- config = mark_configuration(config)
-
- return config
-
-
-def mark_configuration(config):
- """ Add end marks and Test the configuration for syntax faults
- If the configuration is valid a marked version of the configuration is returned,
- or else it failes with a ConfigurationNotValid Exception
-
- config: The configuration string to mark/test
- return: The marked configuration from FRR
- """
- output, code = popen(f"{path_vtysh} -m -f -", stderr=STDOUT, input=config)
-
- if code == 2:
- raise ConfigurationNotValid(str(output))
- elif code:
- raise OSError(code, output)
-
- config = output.replace('\r', '')
- return config
-
-
-def reload_configuration(config, daemon=None):
- """ Execute frr-reload with the new configuration
- This will try to reapply the supplied configuration inside FRR.
- The configuration needs to be a complete configuration from the integrated config or
- from a daemon.
-
- config: The configuration to apply
- daemon: Apply the conigutaion to the specified FRR daemon,
- supplying daemon=None applies to the integrated configuration
- return: None
- """
- if daemon and daemon not in _frr_daemons:
- raise ValueError(f'The specified daemon type is not supported {repr(daemon)}')
-
- f = tempfile.NamedTemporaryFile('w')
- f.write(config)
- f.flush()
-
- LOG.debug(f'reload_configuration: Reloading config using temporary file: {f.name}')
- cmd = f'{path_frr_reload} --reload'
- if daemon:
- cmd += f' --daemon {daemon}'
-
- if DEBUG:
- cmd += f' --debug --stdout'
-
- cmd += f' {f.name}'
-
- LOG.debug(f'reload_configuration: Executing command against frr-reload: "{cmd}"')
- output, code = popen(cmd, stderr=STDOUT)
- f.close()
-
- for i, e in enumerate(output.split('\n')):
- LOG.debug(f'frr-reload output: {i:3} {e}')
-
- if code == 1:
- raise ConfigError(output)
- elif code:
- raise OSError(code, output)
-
- return output
-
-
-def save_configuration():
- """ T3217: Save FRR configuration to /run/frr/config/frr.conf """
- return cmd(f'{path_vtysh} -n -w')
-
-
-def execute(command):
- """ Run commands inside vtysh
- command: str containing commands to execute inside a vtysh session
- """
- if not isinstance(command, str):
- raise ValueError(f'command needs to be a string: {repr(command)}')
-
- cmd = f"{path_vtysh} -c '{command}'"
-
- output, code = popen(cmd, stderr=STDOUT)
- if code:
- raise OSError(code, output)
-
- config = output.replace('\r', '')
- return config
-
-
-def configure(lines, daemon=False):
- """ run commands inside config mode vtysh
- lines: list or str conaining commands to execute inside a configure session
- only one command executed on each configure()
- Executing commands inside a subcontext uses the list to describe the context
- ex: ['router bgp 6500', 'neighbor 192.0.2.1 remote-as 65000']
- return: None
- """
- if isinstance(lines, str):
- lines = [lines]
- elif not isinstance(lines, list):
- raise ValueError('lines needs to be string or list of commands')
-
- if daemon and daemon not in _frr_daemons:
- raise ValueError(f'The specified daemon type is not supported {repr(daemon)}')
-
- cmd = f'{path_vtysh}'
- if daemon:
- cmd += f' -d {daemon}'
-
- cmd += " -c 'configure terminal'"
- for x in lines:
- cmd += f" -c '{x}'"
-
- output, code = popen(cmd, stderr=STDOUT)
- if code == 1:
- raise ConfigurationNotValid(f'Configuration FRR failed: {repr(output)}')
- elif code:
- raise OSError(code, output)
-
- config = output.replace('\r', '')
- return config
-
-
-def _replace_section(config, replacement, replace_re, before_re):
- r"""Replace a section of FRR config
- config: full original configuration
- replacement: replacement configuration section
- replace_re: The regex to replace
- example: ^router bgp \d+$.?*^!$
- this will replace everything between ^router bgp X$ and ^!$
- before_re: When replace_re is not existant, the config will be added before this tag
- example: ^line vty$
-
- return: modified configuration as a text file
- """
- # DEPRECATED, this is replaced by a new implementation
- # Check if block is configured, remove the existing instance else add a new one
- if re.findall(replace_re, config, flags=re.MULTILINE | re.DOTALL):
- # Section is in the configration, replace it
- return re.sub(replace_re, replacement, config, count=1,
- flags=re.MULTILINE | re.DOTALL)
- if before_re:
- if not re.findall(before_re, config, flags=re.MULTILINE | re.DOTALL):
- raise ConfigSectionNotFound(f"Config section {before_re} not found in config")
-
- # If no section is in the configuration, add it before the line vty line
- return re.sub(before_re, rf'{replacement}\n\g<1>', config, count=1,
- flags=re.MULTILINE | re.DOTALL)
-
- raise ConfigSectionNotFound(f"Config section {replacement} not found in config")
-
-
-def replace_section(config, replacement, from_re, to_re=r'!', before_re=r'line vty'):
- r"""Replace a section of FRR config
- config: full original configuration
- replacement: replacement configuration section
- from_re: Regex for the start of section matching
- example: 'router bgp \d+'
- to_re: Regex for stop of section matching
- default: '!'
- example: '!' or 'end'
- before_re: When from_re/to_re does not return a match, the config will
- be added before this tag
- default: ^line vty$
-
- startline and endline tags will be automatically added to the resulting from_re/to_re and before_re regex'es
- """
- # DEPRECATED, this is replaced by a new implementation
- return _replace_section(config, replacement, replace_re=rf'^{from_re}$.*?^{to_re}$', before_re=rf'^({before_re})$')
-
-
-def remove_section(config, from_re, to_re='!'):
- # DEPRECATED, this is replaced by a new implementation
- return _replace_section(config, '', replace_re=rf'^{from_re}$.*?^{to_re}$', before_re=None)
-
-
-def _find_first_block(config, start_pattern, stop_pattern, start_at=0):
- '''Find start and stop line numbers for a config block
- config: (list) A list conaining the configuration that is searched
- start_pattern: (raw-str) The pattern searched for a a start of block tag
- stop_pattern: (raw-str) The pattern searched for to signify the end of the block
- start_at: (int) The index to start searching at in the <config>
-
- Returns:
- None: No complete block could be found
- set(int, int): A complete block found between the line numbers returned in the set
-
- The object <config> is searched from the start for the regex <start_pattern> until the first match is found.
- On a successful match it continues the search for the regex <stop_pattern> until it is found.
- After a successful run a set is returned containing the start and stop line numbers.
- '''
- LOG.debug(f'_find_first_block: find start={repr(start_pattern)} stop={repr(stop_pattern)} start_at={start_at}')
- _start = None
- for i, element in enumerate(config[start_at:], start=start_at):
- # LOG.debug(f'_find_first_block: running line {i:3} "{element}"')
- if not _start:
- if not re.match(start_pattern, element):
- LOG.debug(f'_find_first_block: no match {i:3} "{element}"')
- continue
- _start = i
- LOG.debug(f'_find_first_block: Found start {i:3} "{element}"')
- continue
-
- if not re.match(stop_pattern, element):
- LOG.debug(f'_find_first_block: no match {i:3} "{element}"')
- continue
-
- LOG.debug(f'_find_first_block: Found stop {i:3} "{element}"')
- return (_start, i)
-
- LOG.debug('_find_first_block: exit start={repr(start_pattern)} stop={repr(stop_pattern)} start_at={start_at}')
- return None
-
-
-def _find_first_element(config, pattern, start_at=0):
- '''Find the first element that matches the current pattern in config
- config: (list) A list containing the configuration that is searched
- start_pattern: (raw-str) The pattern searched for
- start_at: (int) The index to start searching at in the <config>
-
- return: Line index of the line containing the searched pattern
-
- TODO: for now it returns -1 on a no-match because 0 also returns as False
- TODO: that means that we can not use False matching to tell if its
- '''
- LOG.debug(f'_find_first_element: find start="{pattern}" start_at={start_at}')
- for i, element in enumerate(config[start_at:], start=0):
- if re.match(pattern + '$', element):
- LOG.debug(f'_find_first_element: Found stop {i:3} "{element}"')
- return i
- LOG.debug(f'_find_first_element: no match {i:3} "{element}"')
- LOG.debug(f'_find_first_element: Did not find any match, exiting')
- return -1
-
-
-def _find_elements(config, pattern, start_at=0):
- '''Find all instances of pattern and return a list containing all element indexes
- config: (list) A list containing the configuration that is searched
- start_pattern: (raw-str) The pattern searched for
- start_at: (int) The index to start searching at in the <config>
-
- return: A list of line indexes containing the searched pattern
- TODO: refactor this to return a generator instead
- '''
- return [i for i, element in enumerate(config[start_at:], start=0) if re.match(pattern + '$', element)]
-
-
-class FRRConfig:
- '''Main FRR Configuration manipulation object
- Using this object the user could load, manipulate and commit the configuration to FRR
- '''
- def __init__(self, config=[]):
- self.imported_config = ''
-
- if isinstance(config, list):
- self.config = config.copy()
- self.original_config = config.copy()
- elif isinstance(config, str):
- self.config = config.split('\n')
- self.original_config = self.config.copy()
- else:
- raise ValueError(
- 'The config element needs to be a string or list type object')
-
- if config:
- LOG.debug(f'__init__: frr library initiated with initial config')
- for i, e in enumerate(self.config):
- LOG.debug(f'__init__: initial {i:3} {e}')
-
- def load_configuration(self, daemon=None):
- '''Load the running configuration from FRR into the config object
- daemon: str with name of the FRR Daemon to load configuration from or
- None to load the consolidated config
-
- Using this overwrites the current loaded config objects and replaces the original loaded config
- '''
- init_debugging()
-
- self.imported_config = get_configuration(daemon=daemon)
- if daemon:
- LOG.debug(f'load_configuration: Configuration loaded from FRR daemon {daemon}')
- else:
- LOG.debug(f'load_configuration: Configuration loaded from FRR integrated config')
-
- self.original_config = self.imported_config.split('\n')
- self.config = self.original_config.copy()
-
- for i, e in enumerate(self.imported_config.split('\n')):
- LOG.debug(f'load_configuration: loaded {i:3} {e}')
- return
-
- def test_configuration(self):
- '''Test the current configuration against FRR
- This will exception if FRR failes to load the current configuration object
- '''
- LOG.debug('test_configation: Testing configuration')
- mark_configuration('\n'.join(self.config))
-
- def commit_configuration(self, daemon=None):
- '''
- Commit the current configuration to FRR daemon: str with name of the
- FRR daemon to commit to or None to use the consolidated config.
-
- Configuration is automatically saved after apply
- '''
- LOG.debug('commit_configuration: Commiting configuration')
- for i, e in enumerate(self.config):
- LOG.debug(f'commit_configuration: new_config {i:3} {e}')
-
- # https://github.com/FRRouting/frr/issues/10132
- # https://github.com/FRRouting/frr/issues/10133
- count = 0
- count_max = 5
- emsg = ''
- while count < count_max:
- count += 1
- try:
- reload_configuration('\n'.join(self.config), daemon=daemon)
- break
- except ConfigError as e:
- emsg = str(e)
- except:
- # we just need to re-try the commit of the configuration
- # for the listed FRR issues above
- pass
- if count >= count_max:
- if emsg:
- raise ConfigError(emsg)
- raise ConfigurationNotValid(f'Config commit retry counter ({count_max}) exceeded for {daemon} daemon!')
-
- # Save configuration to /run/frr/config/frr.conf
- save_configuration()
-
-
- def modify_section(self, start_pattern, replacement='!', stop_pattern=r'\S+', remove_stop_mark=False, count=0):
- if isinstance(replacement, str):
- replacement = replacement.split('\n')
- elif not isinstance(replacement, list):
- return ValueError("The replacement element needs to be a string or list type object")
- LOG.debug(f'modify_section: starting search for {repr(start_pattern)} until {repr(stop_pattern)}')
-
- _count = 0
- _next_start = 0
- while True:
- if count and count <= _count:
- # Break out of the loop after specified amount of matches
- LOG.debug(f'modify_section: reached limit ({_count}), exiting loop at line {_next_start}')
- break
- # While searching, always assume that the user wants to search for the exact pattern he entered
- # To be more specific the user needs a override, eg. a "pattern.*"
- _w = _find_first_block(
- self.config, start_pattern+'$', stop_pattern, start_at=_next_start)
- if not _w:
- # Reached the end, no more elements to remove
- LOG.debug(f'modify_section: No more config sections found, exiting')
- break
- start_element, end_element = _w
- LOG.debug(f'modify_section: found match between {start_element} and {end_element}')
- for i, e in enumerate(self.config[start_element:end_element+1 if remove_stop_mark else end_element],
- start=start_element):
- LOG.debug(f'modify_section: remove {i:3} {e}')
- del self.config[start_element:end_element +
- 1 if remove_stop_mark else end_element]
- if replacement:
- # Append the replacement config at the current position
- for i, e in enumerate(replacement, start=start_element):
- LOG.debug(f'modify_section: add {i:3} {e}')
- self.config[start_element:start_element] = replacement
- _count += 1
- _next_start = start_element + len(replacement)
-
- return _count
-
- def add_before(self, before_pattern, addition):
- '''Add config block before this element in the configuration'''
- if isinstance(addition, str):
- addition = addition.split('\n')
- elif not isinstance(addition, list):
- return ValueError("The replacement element needs to be a string or list type object")
-
- start = _find_first_element(self.config, before_pattern)
- if start < 0:
- return False
- for i, e in enumerate(addition, start=start):
- LOG.debug(f'add_before: add {i:3} {e}')
- self.config[start:start] = addition
- return True
-
- def __str__(self):
- return '\n'.join(self.config)
-
- def __repr__(self):
- return f'frr({repr(str(self))})'
diff --git a/python/vyos/frrender.py b/python/vyos/frrender.py
new file mode 100644
index 000000000..95d6c7243
--- /dev/null
+++ b/python/vyos/frrender.py
@@ -0,0 +1,176 @@
+# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Library used to interface with FRRs mgmtd introduced in version 10.0
+"""
+
+import os
+
+from vyos.defaults import frr_debug_enable
+from vyos.utils.file import write_file
+from vyos.utils.process import rc_cmd
+from vyos.template import render_to_string
+from vyos import ConfigError
+
+DEBUG_ON = os.path.exists(frr_debug_enable)
+
+def debug(message):
+ if not DEBUG_ON:
+ return
+ print(message)
+
+frr_protocols = ['babel', 'bfd', 'bgp', 'eigrp', 'isis', 'mpls', 'nhrp',
+ 'openfabric', 'ospf', 'ospfv3', 'pim', 'pim6', 'rip',
+ 'ripng', 'rpki', 'segment_routing', 'static']
+
+babel_daemon = 'babeld'
+bfd_daemon = 'bfdd'
+bgp_daemon = 'bgpd'
+isis_daemon = 'isisd'
+ldpd_daemon = 'ldpd'
+mgmt_daemon = 'mgmtd'
+openfabric_daemon = 'fabricd'
+ospf_daemon = 'ospfd'
+ospf6_daemon = 'ospf6d'
+pim_daemon = 'pimd'
+pim6_daemon = 'pim6d'
+rip_daemon = 'ripd'
+ripng_daemon = 'ripngd'
+zebra_daemon = 'zebra'
+
+class FRRender:
+ def __init__(self):
+ self._frr_conf = '/run/frr/config/frr.conf'
+
+ def generate(self, config):
+ if not isinstance(config, dict):
+ tmp = type(config)
+ raise ValueError(f'Config must be of type "dict" and not "{tmp}"!')
+
+ def inline_helper(config_dict) -> str:
+ output = '!\n'
+ if 'babel' in config_dict and 'deleted' not in config_dict['babel']:
+ output += render_to_string('frr/babeld.frr.j2', config_dict['babel'])
+ output += '\n'
+ if 'bfd' in config_dict and 'deleted' not in config_dict['bfd']:
+ output += render_to_string('frr/bfdd.frr.j2', config_dict['bfd'])
+ output += '\n'
+ if 'bgp' in config_dict and 'deleted' not in config_dict['bgp']:
+ output += render_to_string('frr/bgpd.frr.j2', config_dict['bgp'])
+ output += '\n'
+ if 'eigrp' in config_dict and 'deleted' not in config_dict['eigrp']:
+ output += render_to_string('frr/eigrpd.frr.j2', config_dict['eigrp'])
+ output += '\n'
+ if 'isis' in config_dict and 'deleted' not in config_dict['isis']:
+ output += render_to_string('frr/isisd.frr.j2', config_dict['isis'])
+ output += '\n'
+ if 'mpls' in config_dict and 'deleted' not in config_dict['mpls']:
+ output += render_to_string('frr/ldpd.frr.j2', config_dict['mpls'])
+ output += '\n'
+ if 'openfabric' in config_dict and 'deleted' not in config_dict['openfabric']:
+ output += render_to_string('frr/fabricd.frr.j2', config_dict['openfabric'])
+ output += '\n'
+ if 'ospf' in config_dict and 'deleted' not in config_dict['ospf']:
+ output += render_to_string('frr/ospfd.frr.j2', config_dict['ospf'])
+ output += '\n'
+ if 'ospfv3' in config_dict and 'deleted' not in config_dict['ospfv3']:
+ output += render_to_string('frr/ospf6d.frr.j2', config_dict['ospfv3'])
+ output += '\n'
+ if 'pim' in config_dict and 'deleted' not in config_dict['pim']:
+ output += render_to_string('frr/pimd.frr.j2', config_dict['pim'])
+ output += '\n'
+ if 'pim6' in config_dict and 'deleted' not in config_dict['pim6']:
+ output += render_to_string('frr/pim6d.frr.j2', config_dict['pim6'])
+ output += '\n'
+ if 'policy' in config_dict and len(config_dict['policy']) > 0:
+ output += render_to_string('frr/policy.frr.j2', config_dict['policy'])
+ output += '\n'
+ if 'rip' in config_dict and 'deleted' not in config_dict['rip']:
+ output += render_to_string('frr/ripd.frr.j2', config_dict['rip'])
+ output += '\n'
+ if 'ripng' in config_dict and 'deleted' not in config_dict['ripng']:
+ output += render_to_string('frr/ripngd.frr.j2', config_dict['ripng'])
+ output += '\n'
+ if 'rpki' in config_dict and 'deleted' not in config_dict['rpki']:
+ output += render_to_string('frr/rpki.frr.j2', config_dict['rpki'])
+ output += '\n'
+ if 'segment_routing' in config_dict and 'deleted' not in config_dict['segment_routing']:
+ output += render_to_string('frr/zebra.segment_routing.frr.j2', config_dict['segment_routing'])
+ output += '\n'
+ if 'static' in config_dict and 'deleted' not in config_dict['static']:
+ output += render_to_string('frr/staticd.frr.j2', config_dict['static'])
+ output += '\n'
+ if 'ip' in config_dict and 'deleted' not in config_dict['ip']:
+ output += render_to_string('frr/zebra.route-map.frr.j2', config_dict['ip'])
+ output += '\n'
+ if 'ipv6' in config_dict and 'deleted' not in config_dict['ipv6']:
+ output += render_to_string('frr/zebra.route-map.frr.j2', config_dict['ipv6'])
+ output += '\n'
+ return output
+
+ debug('======< RENDERING CONFIG >======')
+ # we can not reload an empty file, thus we always embed the marker
+ output = '!\n'
+ # Enable SNMP agentx support
+ # SNMP AgentX support cannot be disabled once enabled
+ if 'snmp' in config:
+ output += 'agentx\n'
+ # Add routing protocols in global VRF
+ output += inline_helper(config)
+ # Interface configuration for EVPN is not VRF related
+ if 'interfaces' in config:
+ output += render_to_string('frr/evpn.mh.frr.j2', {'interfaces' : config['interfaces']})
+ output += '\n'
+
+ if 'vrf' in config and 'name' in config['vrf']:
+ output += render_to_string('frr/zebra.vrf.route-map.frr.j2', config['vrf']) + '\n'
+ for vrf, vrf_config in config['vrf']['name'].items():
+ if 'protocols' not in vrf_config:
+ continue
+ for protocol in vrf_config['protocols']:
+ vrf_config['protocols'][protocol]['vrf'] = vrf
+
+ output += inline_helper(vrf_config['protocols'])
+
+ debug(output)
+ debug('======< RENDERING CONFIG COMPLETE >======')
+ write_file(self._frr_conf, output)
+ if DEBUG_ON: write_file('/tmp/frr.conf.debug', output)
+
+ def apply(self):
+ count = 0
+ count_max = 5
+ emsg = ''
+ while count < count_max:
+ count += 1
+ debug(f'FRR: Reloading configuration - tries: {count} | Python class ID: {id(self)}')
+
+ cmdline = '/usr/lib/frr/frr-reload.py --reload'
+ if DEBUG_ON:
+ cmdline += ' --debug'
+ rc, emsg = rc_cmd(f'{cmdline} {self._frr_conf}')
+ if rc != 0:
+ debug('FRR configuration reload failed, retrying')
+ continue
+ debug(emsg)
+ debug('======< DONE APPLYING CONFIG >======')
+ break
+ if count >= count_max:
+ raise ConfigError(emsg)
+
+ def save_configuration():
+ """ T3217: Save FRR configuration to /run/frr/config/frr.conf """
+ return cmd('/usr/bin/vtysh -n --writeconfig')
diff --git a/python/vyos/utils/misc.py b/python/vyos/utils/misc.py
index d82655914..ac8011b8d 100644
--- a/python/vyos/utils/misc.py
+++ b/python/vyos/utils/misc.py
@@ -52,7 +52,7 @@ def install_into_config(conf, config_paths, override_prompt=True):
continue
try:
- cmd(f'/opt/vyatta/sbin/my_set {path}')
+ cmd(f'/usr/libexec/vyos/vyconf/vy_set {path}')
count += 1
except:
failed.append(path)
diff --git a/python/vyos/xml_ref/generate_cache.py b/python/vyos/xml_ref/generate_cache.py
index 5f3f84dee..093697993 100755
--- a/python/vyos/xml_ref/generate_cache.py
+++ b/python/vyos/xml_ref/generate_cache.py
@@ -55,6 +55,8 @@ def main():
parser = ArgumentParser(description='generate and save dict from xml defintions')
parser.add_argument('--xml-dir', type=str, required=True,
help='transcluded xml interface-definition directory')
+ parser.add_argument('--internal-cache', type=str, required=True,
+ help='cache as unrendered json data for loading by vyconfd')
parser.add_argument('--package-name', type=non_trivial, default='vyos-1x',
help='name of current package')
parser.add_argument('--output-path', help='path to generated cache')
@@ -66,9 +68,11 @@ def main():
out_path = args['output_path']
path = out_path if out_path is not None else pkg_cache
xml_cache = abspath(join(path, cache_name))
+ internal_cache = args['internal_cache']
try:
- reference_tree_to_json(xml_dir, xml_tmp)
+ reference_tree_to_json(xml_dir, xml_tmp,
+ internal_cache=internal_cache)
except ConfigTreeError as e:
print(e)
sys.exit(1)