summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/conf_mode/https.py29
-rwxr-xr-xsrc/conf_mode/interfaces-macsec.py8
-rwxr-xr-xsrc/conf_mode/interfaces-pseudo-ethernet.py7
-rwxr-xr-xsrc/conf_mode/ntp.py21
-rwxr-xr-xsrc/conf_mode/service_monitoring_telegraf.py60
-rwxr-xr-xsrc/conf_mode/ssh.py23
-rwxr-xr-xsrc/conf_mode/vpn_ipsec.py8
-rwxr-xr-xsrc/conf_mode/vpn_sstp.py8
-rwxr-xr-xsrc/etc/opennhrp/opennhrp-script.py315
-rwxr-xr-xsrc/op_mode/conntrack.py23
-rwxr-xr-xsrc/op_mode/ipsec.py116
-rwxr-xr-xsrc/op_mode/nat.py136
-rwxr-xr-xsrc/op_mode/show_nat66_rules.py102
-rw-r--r--src/services/api/graphql/graphql/mutations.py4
-rw-r--r--src/services/api/graphql/graphql/queries.py4
-rw-r--r--src/services/api/graphql/session/__init__.py (renamed from src/services/api/graphql/recipes/__init__.py)0
-rwxr-xr-xsrc/services/api/graphql/session/composite/system_status.py (renamed from src/services/api/graphql/recipes/queries/system_status.py)0
-rw-r--r--src/services/api/graphql/session/override/remove_firewall_address_group_members.py (renamed from src/services/api/graphql/recipes/remove_firewall_address_group_members.py)0
-rw-r--r--src/services/api/graphql/session/session.py (renamed from src/services/api/graphql/recipes/session.py)2
-rw-r--r--src/services/api/graphql/session/templates/create_dhcp_server.tmpl (renamed from src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/create_firewall_address_group.tmpl (renamed from src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl (renamed from src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/create_interface_ethernet.tmpl (renamed from src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl)0
-rw-r--r--src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl (renamed from src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl)0
-rwxr-xr-xsrc/services/api/graphql/utils/schema_from_op_mode.py5
-rwxr-xr-xsrc/services/vyos-http-api-server1
-rwxr-xr-xsrc/system/keepalived-fifo.py12
-rw-r--r--src/systemd/telegraf.service15
31 files changed, 660 insertions, 239 deletions
diff --git a/src/conf_mode/https.py b/src/conf_mode/https.py
index 3057357fc..7cd7ea42e 100755
--- a/src/conf_mode/https.py
+++ b/src/conf_mode/https.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2021 VyOS maintainers and contributors
+# Copyright (C) 2019-2022 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -29,6 +29,8 @@ from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
+from vyos.util import check_port_availability
+from vyos.util import is_listen_port_bind_service
from vyos.util import write_file
from vyos import airbag
@@ -107,6 +109,31 @@ def verify(https):
raise ConfigError("At least one 'virtual-host <id> server-name' "
"matching the 'certbot domain-name' is required.")
+ server_block_list = []
+
+ # organize by vhosts
+ vhost_dict = https.get('virtual-host', {})
+
+ if not vhost_dict:
+ # no specified virtual hosts (server blocks); use default
+ server_block_list.append(default_server_block)
+ else:
+ for vhost in list(vhost_dict):
+ server_block = deepcopy(default_server_block)
+ data = vhost_dict.get(vhost, {})
+ server_block['address'] = data.get('listen-address', '*')
+ server_block['port'] = data.get('listen-port', '443')
+ server_block_list.append(server_block)
+
+ for entry in server_block_list:
+ _address = entry.get('address')
+ _address = '0.0.0.0' if _address == '*' else _address
+ _port = entry.get('port')
+ proto = 'tcp'
+ if check_port_availability(_address, int(_port), proto) is not True and \
+ not is_listen_port_bind_service(int(_port), 'nginx'):
+ raise ConfigError(f'"{proto}" port "{_port}" is used by another service')
+
verify_vrf(https)
return None
diff --git a/src/conf_mode/interfaces-macsec.py b/src/conf_mode/interfaces-macsec.py
index 870049a88..649ea8d50 100755
--- a/src/conf_mode/interfaces-macsec.py
+++ b/src/conf_mode/interfaces-macsec.py
@@ -67,7 +67,7 @@ def get_config(config=None):
macsec.update({'shutdown_required': {}})
if 'source_interface' in macsec:
- tmp = is_source_interface(conf, macsec['source_interface'], 'macsec')
+ tmp = is_source_interface(conf, macsec['source_interface'], ['macsec', 'pseudo-ethernet'])
if tmp and tmp != ifname: macsec.update({'is_source_interface' : tmp})
return macsec
@@ -102,12 +102,6 @@ def verify(macsec):
# gcm-aes-128 requires a 128bit long key - 64 characters (string) = 32byte = 256bit
raise ConfigError('gcm-aes-128 requires a 256bit long key!')
- if 'is_source_interface' in macsec:
- tmp = macsec['is_source_interface']
- src_ifname = macsec['source_interface']
- raise ConfigError(f'Can not use source-interface "{src_ifname}", it already ' \
- f'belongs to interface "{tmp}"!')
-
if 'source_interface' in macsec:
# MACsec adds a 40 byte overhead (32 byte MACsec + 8 bytes VLAN 802.1ad
# and 802.1q) - we need to check the underlaying MTU if our configured
diff --git a/src/conf_mode/interfaces-pseudo-ethernet.py b/src/conf_mode/interfaces-pseudo-ethernet.py
index f26a50a0e..20f2b1975 100755
--- a/src/conf_mode/interfaces-pseudo-ethernet.py
+++ b/src/conf_mode/interfaces-pseudo-ethernet.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2020 VyOS maintainers and contributors
+# Copyright (C) 2019-2022 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -19,6 +19,7 @@ from sys import exit
from vyos.config import Config
from vyos.configdict import get_interface_dict
from vyos.configdict import is_node_changed
+from vyos.configdict import is_source_interface
from vyos.configverify import verify_vrf
from vyos.configverify import verify_address
from vyos.configverify import verify_bridge_delete
@@ -51,6 +52,10 @@ def get_config(config=None):
if 'source_interface' in peth:
_, peth['parent'] = get_interface_dict(conf, ['interfaces', 'ethernet'],
peth['source_interface'])
+ # test if source-interface is maybe already used by another interface
+ tmp = is_source_interface(conf, peth['source_interface'], ['macsec'])
+ if tmp and tmp != ifname: peth.update({'is_source_interface' : tmp})
+
return peth
def verify(peth):
diff --git a/src/conf_mode/ntp.py b/src/conf_mode/ntp.py
index 5490a794d..0ecb4d736 100755
--- a/src/conf_mode/ntp.py
+++ b/src/conf_mode/ntp.py
@@ -17,6 +17,7 @@
import os
from vyos.config import Config
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.configverify import verify_interface_exists
from vyos.util import call
@@ -40,6 +41,10 @@ def get_config(config=None):
ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
ntp['config_file'] = config_file
+
+ tmp = is_node_changed(conf, base + ['vrf'])
+ if tmp: ntp.update({'restart_required': {}})
+
return ntp
def verify(ntp):
@@ -78,19 +83,25 @@ def generate(ntp):
return None
def apply(ntp):
+ systemd_service = 'ntp.service'
+ # Reload systemd manager configuration
+ call('systemctl daemon-reload')
+
if not ntp:
# NTP support is removed in the commit
- call('systemctl stop ntp.service')
+ call(f'systemctl stop {systemd_service}')
if os.path.exists(config_file):
os.unlink(config_file)
if os.path.isfile(systemd_override):
os.unlink(systemd_override)
+ return
- # Reload systemd manager configuration
- call('systemctl daemon-reload')
- if ntp:
- call('systemctl restart ntp.service')
+ # we need to restart the service if e.g. the VRF name changed
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in ntp:
+ systemd_action = 'restart'
+ call(f'systemctl {systemd_action} {systemd_service}')
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/service_monitoring_telegraf.py b/src/conf_mode/service_monitoring_telegraf.py
index 62f5e1ddf..18b32edab 100755
--- a/src/conf_mode/service_monitoring_telegraf.py
+++ b/src/conf_mode/service_monitoring_telegraf.py
@@ -22,6 +22,8 @@ from shutil import rmtree
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import is_node_changed
+from vyos.configverify import verify_vrf
from vyos.ifconfig import Section
from vyos.template import render
from vyos.util import call
@@ -32,20 +34,14 @@ from vyos import ConfigError
from vyos import airbag
airbag.enable()
-
-base_dir = '/run/telegraf'
cache_dir = f'/etc/telegraf/.cache'
-config_telegraf = f'{base_dir}/vyos-telegraf.conf'
+config_telegraf = f'/run/telegraf/telegraf.conf'
custom_scripts_dir = '/etc/telegraf/custom_scripts'
syslog_telegraf = '/etc/rsyslog.d/50-telegraf.conf'
-systemd_telegraf_service = '/etc/systemd/system/vyos-telegraf.service'
-systemd_telegraf_override_dir = '/etc/systemd/system/vyos-telegraf.service.d'
-systemd_override = f'{systemd_telegraf_override_dir}/10-override.conf'
-
+systemd_override = '/etc/systemd/system/telegraf.service.d/10-override.conf'
def get_interfaces(type='', vlan=True):
"""
- Get interfaces
get_interfaces()
['dum0', 'eth0', 'eth1', 'eth1.5', 'lo', 'tun0']
@@ -62,9 +58,7 @@ def get_interfaces(type='', vlan=True):
return interfaces
def get_nft_filter_chains():
- """
- Get nft chains for table filter
- """
+ """ Get nft chains for table filter """
nft = cmd('nft --json list table ip filter')
nft = json.loads(nft)
chain_list = []
@@ -78,7 +72,6 @@ def get_nft_filter_chains():
def get_config(config=None):
-
if config:
conf = config
else:
@@ -87,8 +80,12 @@ def get_config(config=None):
if not conf.exists(base):
return None
- monitoring = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True,
- no_tag_node_value_mangle=True)
+ monitoring = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True,
+ no_tag_node_value_mangle=True)
+
+ tmp = is_node_changed(conf, base + ['vrf'])
+ if tmp: monitoring.update({'restart_required': {}})
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
@@ -131,6 +128,8 @@ def verify(monitoring):
if not monitoring:
return None
+ verify_vrf(monitoring)
+
# Verify influxdb
if 'influxdb' in monitoring:
if 'authentication' not in monitoring['influxdb'] or \
@@ -173,7 +172,7 @@ def verify(monitoring):
def generate(monitoring):
if not monitoring:
# Delete config and systemd files
- config_files = [config_telegraf, systemd_telegraf_service, systemd_override, syslog_telegraf]
+ config_files = [config_telegraf, systemd_override, syslog_telegraf]
for file in config_files:
if os.path.isfile(file):
os.unlink(file)
@@ -190,33 +189,34 @@ def generate(monitoring):
chown(cache_dir, 'telegraf', 'telegraf')
- # Create systemd override dir
- if not os.path.exists(systemd_telegraf_override_dir):
- os.mkdir(systemd_telegraf_override_dir)
-
# Create custome scripts dir
if not os.path.exists(custom_scripts_dir):
os.mkdir(custom_scripts_dir)
# Render telegraf configuration and systemd override
- render(config_telegraf, 'monitoring/telegraf.j2', monitoring)
- render(systemd_telegraf_service, 'monitoring/systemd_vyos_telegraf_service.j2', monitoring)
- render(systemd_override, 'monitoring/override.conf.j2', monitoring, permission=0o640)
- render(syslog_telegraf, 'monitoring/syslog_telegraf.j2', monitoring)
-
- chown(base_dir, 'telegraf', 'telegraf')
+ render(config_telegraf, 'telegraf/telegraf.j2', monitoring, user='telegraf', group='telegraf')
+ render(systemd_override, 'telegraf/override.conf.j2', monitoring)
+ render(syslog_telegraf, 'telegraf/syslog_telegraf.j2', monitoring)
return None
def apply(monitoring):
# Reload systemd manager configuration
+ systemd_service = 'telegraf.service'
call('systemctl daemon-reload')
- if monitoring:
- call('systemctl restart vyos-telegraf.service')
- else:
- call('systemctl stop vyos-telegraf.service')
+ if not monitoring:
+ call(f'systemctl stop {systemd_service}')
+ return
+
+ # we need to restart the service if e.g. the VRF name changed
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in monitoring:
+ systemd_action = 'restart'
+
+ call(f'systemctl {systemd_action} {systemd_service}')
+
# Telegraf include custom rsyslog config changes
- call('systemctl restart rsyslog')
+ call('systemctl reload-or-restart rsyslog')
if __name__ == '__main__':
try:
diff --git a/src/conf_mode/ssh.py b/src/conf_mode/ssh.py
index 28669694b..2bbd7142a 100755
--- a/src/conf_mode/ssh.py
+++ b/src/conf_mode/ssh.py
@@ -22,6 +22,7 @@ from syslog import LOG_INFO
from vyos.config import Config
from vyos.configdict import dict_merge
+from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.util import call
from vyos.template import render
@@ -50,6 +51,10 @@ def get_config(config=None):
return None
ssh = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ tmp = is_node_changed(conf, base + ['vrf'])
+ if tmp: ssh.update({'restart_required': {}})
+
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
default_values = defaults(base)
@@ -104,17 +109,25 @@ def generate(ssh):
return None
def apply(ssh):
+ systemd_service_ssh = 'ssh.service'
+ systemd_service_sshguard = 'sshguard.service'
if not ssh:
# SSH access is removed in the commit
- call('systemctl stop ssh.service')
- call('systemctl stop sshguard.service')
+ call(f'systemctl stop {systemd_service_ssh}')
+ call(f'systemctl stop {systemd_service_sshguard}')
return None
+
if 'dynamic_protection' not in ssh:
- call('systemctl stop sshguard.service')
+ call(f'systemctl stop {systemd_service_sshguard}')
else:
- call('systemctl restart sshguard.service')
+ call(f'systemctl reload-or-restart {systemd_service_sshguard}')
+
+ # we need to restart the service if e.g. the VRF name changed
+ systemd_action = 'reload-or-restart'
+ if 'restart_required' in ssh:
+ systemd_action = 'restart'
- call('systemctl restart ssh.service')
+ call(f'systemctl {systemd_action} {systemd_service_ssh}')
return None
if __name__ == '__main__':
diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py
index bad9cfbd8..5ca32d23e 100755
--- a/src/conf_mode/vpn_ipsec.py
+++ b/src/conf_mode/vpn_ipsec.py
@@ -595,13 +595,11 @@ def wait_for_vici_socket(timeout=5, sleep_interval=0.1):
sleep(sleep_interval)
def apply(ipsec):
+ systemd_service = 'strongswan-starter.service'
if not ipsec:
- call('sudo ipsec stop')
+ call(f'systemctl stop {systemd_service}')
else:
- call('sudo ipsec restart')
- call('sudo ipsec rereadall')
- call('sudo ipsec reload')
-
+ call(f'systemctl reload-or-restart {systemd_service}')
if wait_for_vici_socket():
call('sudo swanctl -q')
diff --git a/src/conf_mode/vpn_sstp.py b/src/conf_mode/vpn_sstp.py
index 23e5162ba..2949ab290 100755
--- a/src/conf_mode/vpn_sstp.py
+++ b/src/conf_mode/vpn_sstp.py
@@ -26,7 +26,9 @@ from vyos.pki import wrap_certificate
from vyos.pki import wrap_private_key
from vyos.template import render
from vyos.util import call
+from vyos.util import check_port_availability
from vyos.util import dict_search
+from vyos.util import is_listen_port_bind_service
from vyos.util import write_file
from vyos import ConfigError
from vyos import airbag
@@ -62,6 +64,12 @@ def verify(sstp):
if not sstp:
return None
+ port = sstp.get('port')
+ proto = 'tcp'
+ if check_port_availability('0.0.0.0', int(port), proto) is not True and \
+ not is_listen_port_bind_service(int(port), 'accel-pppd'):
+ raise ConfigError(f'"{proto}" port "{port}" is used by another service')
+
verify_accel_ppp_base_service(sstp)
if 'client_ip_pool' not in sstp and 'client_ipv6_pool' not in sstp:
diff --git a/src/etc/opennhrp/opennhrp-script.py b/src/etc/opennhrp/opennhrp-script.py
index 8274e6564..a5293c97e 100755
--- a/src/etc/opennhrp/opennhrp-script.py
+++ b/src/etc/opennhrp/opennhrp-script.py
@@ -18,44 +18,120 @@ import os
import re
import sys
import vici
+
from json import loads
+from pathlib import Path
+from vyos.logger import getLogger
from vyos.util import cmd
from vyos.util import process_named_running
-NHRP_CONFIG = "/run/opennhrp/opennhrp.conf"
+NHRP_CONFIG: str = '/run/opennhrp/opennhrp.conf'
+
+
+def vici_get_ipsec_uniqueid(conn: str, src_nbma: str,
+ dst_nbma: str) -> list[str]:
+ """ Find and return IKE SAs by src nbma and dst nbma
+
+ Args:
+ conn (str): a connection name
+ src_nbma (str): an IP address of NBMA source
+ dst_nbma (str): an IP address of NBMA destination
+
+ Returns:
+ list: a list of IKE connections that match a criteria
+ """
+ if not conn or not src_nbma or not dst_nbma:
+ logger.error(
+ f'Incomplete input data for resolving IKE unique ids: '
+ f'conn: {conn}, src_nbma: {src_nbma}, dst_nbma: {dst_nbma}')
+ return []
+
+ try:
+ logger.info(
+ f'Resolving IKE unique ids for: conn: {conn}, '
+ f'src_nbma: {src_nbma}, dst_nbma: {dst_nbma}')
+ session: vici.Session = vici.Session()
+ list_ikeid: list[str] = []
+ list_sa = session.list_sas({'ike': conn})
+ for sa in list_sa:
+ if sa[conn]['local-host'].decode('ascii') == src_nbma \
+ and sa[conn]['remote-host'].decode('ascii') == dst_nbma:
+ list_ikeid.append(sa[conn]['uniqueid'].decode('ascii'))
+ return list_ikeid
+ except Exception as err:
+ logger.error(f'Unable to find unique ids for IKE: {err}')
+ return []
+
+
+def vici_ike_terminate(list_ikeid: list[str]) -> bool:
+ """Terminating IKE SAs by list of IKE IDs
+
+ Args:
+ list_ikeid (list[str]): a list of IKE ids to terminate
+
+ Returns:
+ bool: result of termination action
+ """
+ if not list:
+ logger.warning('An empty list for termination was provided')
+ return False
+
+ try:
+ session = vici.Session()
+ for ikeid in list_ikeid:
+ logger.info(f'Terminating IKE SA with id {ikeid}')
+ session.terminate({'ike-id': ikeid, 'timeout': '-1'})
+ return True
+ except Exception as err:
+ logger.error(f'Failed to terminate SA for IKE ids {list_ikeid}: {err}')
+ return False
+
+def parse_type_ipsec(interface: str) -> tuple[str, str]:
+ """Get DMVPN Type and NHRP Profile from the configuration
-def parse_type_ipsec(interface):
- with open(NHRP_CONFIG, 'r') as f:
- lines = f.readlines()
- match = rf'^interface {interface} #(hub|spoke)(?:\s([\w-]+))?$'
- for line in lines:
- m = re.match(match, line)
- if m:
- return m[1], m[2]
- return None, None
+ Args:
+ interface (str): a name of interface
+
+ Returns:
+ tuple[str, str]: `peer_type` and `profile_name`
+ """
+ if not interface:
+ logger.error('Cannot find peer type - no input provided')
+ return '', ''
+
+ config_file: str = Path(NHRP_CONFIG).read_text()
+ regex: str = rf'^interface {interface} #(?P<peer_type>hub|spoke) ?(?P<profile_name>[^\n]*)$'
+ match = re.search(regex, config_file, re.M)
+ if match:
+ return match.groupdict()['peer_type'], match.groupdict()[
+ 'profile_name']
+ return '', ''
def add_peer_route(nbma_src: str, nbma_dst: str, mtu: str) -> None:
"""Add a route to a NBMA peer
Args:
- nmba_src (str): a local IP address
+ nbma_src (str): a local IP address
nbma_dst (str): a remote IP address
mtu (str): a MTU for a route
"""
+ logger.info(f'Adding route from {nbma_src} to {nbma_dst} with MTU {mtu}')
# Find routes to a peer
- route_get_cmd = f'sudo ip -j route get {nbma_dst} from {nbma_src}'
+ route_get_cmd: str = f'sudo ip --json route get {nbma_dst} from {nbma_src}'
try:
route_info_data = loads(cmd(route_get_cmd))
except Exception as err:
- print(f'Unable to find a route to {nbma_dst}: {err}')
+ logger.error(f'Unable to find a route to {nbma_dst}: {err}')
+ return
# Check if an output has an expected format
if not isinstance(route_info_data, list):
- print(f'Garbage returned from the "{route_get_cmd}" command: \
- {route_info_data}')
+ logger.error(
+ f'Garbage returned from the "{route_get_cmd}" '
+ f'command: {route_info_data}')
return
# Add static routes to a peer
@@ -76,104 +152,217 @@ def add_peer_route(nbma_src: str, nbma_dst: str, mtu: str) -> None:
try:
cmd(route_add_cmd)
except Exception as err:
- print(f'Unable to add a route using command "{route_add_cmd}": \
- {err}')
+ logger.error(
+ f'Unable to add a route using command "{route_add_cmd}": '
+ f'{err}')
-def vici_initiate(conn, child_sa, src_addr, dest_addr):
- try:
- session = vici.Session()
- logs = session.initiate({
- 'ike': conn,
- 'child': child_sa,
- 'timeout': '-1',
- 'my-host': src_addr,
- 'other-host': dest_addr
- })
- for log in logs:
- message = log['msg'].decode('ascii')
- print('INIT LOG:', message)
- return True
- except:
- return None
+def vici_initiate(conn: str, child_sa: str, src_addr: str,
+ dest_addr: str) -> bool:
+ """Initiate IKE SA connection with specific peer
+ Args:
+ conn (str): an IKE connection name
+ child_sa (str): a child SA profile name
+ src_addr (str): NBMA local address
+ dest_addr (str): NBMA address of a peer
-def vici_terminate(conn, child_sa, src_addr, dest_addr):
+ Returns:
+ bool: a result of initiation command
+ """
+ logger.info(
+ f'Trying to initiate connection. Name: {conn}, child sa: {child_sa}, '
+ f'src_addr: {src_addr}, dst_addr: {dest_addr}')
try:
session = vici.Session()
- logs = session.terminate({
+ session.initiate({
'ike': conn,
'child': child_sa,
'timeout': '-1',
'my-host': src_addr,
'other-host': dest_addr
})
- for log in logs:
- message = log['msg'].decode('ascii')
- print('TERM LOG:', message)
return True
- except:
- return None
+ except Exception as err:
+ logger.error(f'Unable to initiate connection {err}')
+ return False
+
+
+def vici_terminate(conn: str, src_addr: str, dest_addr: str) -> None:
+ """Find and terminate IKE SAs by local NBMA and remote NBMA addresses
+
+ Args:
+ conn (str): IKE connection name
+ src_addr (str): NBMA local address
+ dest_addr (str): NBMA address of a peer
+ """
+ logger.info(
+ f'Terminating IKE connection {conn} between {src_addr} '
+ f'and {dest_addr}')
+ ikeid_list: list[str] = vici_get_ipsec_uniqueid(conn, src_addr, dest_addr)
-def iface_up(interface):
- cmd(f'sudo ip route flush proto 42 dev {interface}')
- cmd(f'sudo ip neigh flush dev {interface}')
+ if not ikeid_list:
+ logger.warning(
+ f'No active sessions found for IKE profile {conn}, '
+ f'local NBMA {src_addr}, remote NBMA {dest_addr}')
+ else:
+ vici_ike_terminate(ikeid_list)
-def peer_up(dmvpn_type, conn):
- # src_addr = os.getenv('NHRP_SRCADDR')
+def iface_up(interface: str) -> None:
+ """Proceed tunnel interface UP event
+
+ Args:
+ interface (str): an interface name
+ """
+ if not interface:
+ logger.warning('No interface name provided for UP event')
+
+ logger.info(f'Turning up interface {interface}')
+ try:
+ cmd(f'sudo ip route flush proto 42 dev {interface}')
+ cmd(f'sudo ip neigh flush dev {interface}')
+ except Exception as err:
+ logger.error(
+ f'Unable to flush route on interface "{interface}": {err}')
+
+
+def peer_up(dmvpn_type: str, conn: str) -> None:
+ """Proceed NHRP peer UP event
+
+ Args:
+ dmvpn_type (str): a type of peer
+ conn (str): an IKE profile name
+ """
+ logger.info(f'Peer UP event for {dmvpn_type} using IKE profile {conn}')
src_nbma = os.getenv('NHRP_SRCNBMA')
- # dest_addr = os.getenv('NHRP_DESTADDR')
dest_nbma = os.getenv('NHRP_DESTNBMA')
dest_mtu = os.getenv('NHRP_DESTMTU')
+ if not src_nbma or not dest_nbma:
+ logger.error(
+ f'Can not get NHRP NBMA addresses: local {src_nbma}, '
+ f'remote {dest_nbma}')
+ return
+
+ logger.info(f'NBMA addresses: local {src_nbma}, remote {dest_nbma}')
if dest_mtu:
add_peer_route(src_nbma, dest_nbma, dest_mtu)
-
if conn and dmvpn_type == 'spoke' and process_named_running('charon'):
- vici_terminate(conn, 'dmvpn', src_nbma, dest_nbma)
+ vici_terminate(conn, src_nbma, dest_nbma)
vici_initiate(conn, 'dmvpn', src_nbma, dest_nbma)
-def peer_down(dmvpn_type, conn):
+def peer_down(dmvpn_type: str, conn: str) -> None:
+ """Proceed NHRP peer DOWN event
+
+ Args:
+ dmvpn_type (str): a type of peer
+ conn (str): an IKE profile name
+ """
+ logger.info(f'Peer DOWN event for {dmvpn_type} using IKE profile {conn}')
+
src_nbma = os.getenv('NHRP_SRCNBMA')
dest_nbma = os.getenv('NHRP_DESTNBMA')
+ if not src_nbma or not dest_nbma:
+ logger.error(
+ f'Can not get NHRP NBMA addresses: local {src_nbma}, '
+ f'remote {dest_nbma}')
+ return
+
+ logger.info(f'NBMA addresses: local {src_nbma}, remote {dest_nbma}')
if conn and dmvpn_type == 'spoke' and process_named_running('charon'):
- vici_terminate(conn, 'dmvpn', src_nbma, dest_nbma)
+ vici_terminate(conn, src_nbma, dest_nbma)
+ try:
+ cmd(f'sudo ip route del {dest_nbma} src {src_nbma} proto 42')
+ except Exception as err:
+ logger.error(
+ f'Unable to del route from {src_nbma} to {dest_nbma}: {err}')
- cmd(f'sudo ip route del {dest_nbma} src {src_nbma} proto 42')
+def route_up(interface: str) -> None:
+ """Proceed NHRP route UP event
+
+ Args:
+ interface (str): an interface name
+ """
+ logger.info(f'Route UP event for interface {interface}')
-def route_up(interface):
dest_addr = os.getenv('NHRP_DESTADDR')
dest_prefix = os.getenv('NHRP_DESTPREFIX')
next_hop = os.getenv('NHRP_NEXTHOP')
- cmd(f'sudo ip route replace {dest_addr}/{dest_prefix} proto 42 \
- via {next_hop} dev {interface}')
- cmd('sudo ip route flush cache')
+ if not dest_addr or not dest_prefix or not next_hop:
+ logger.error(
+ f'Can not get route details: dest_addr {dest_addr}, '
+ f'dest_prefix {dest_prefix}, next_hop {next_hop}')
+ return
+
+ logger.info(
+ f'Route details: dest_addr {dest_addr}, dest_prefix {dest_prefix}, '
+ f'next_hop {next_hop}')
+ try:
+ cmd(f'sudo ip route replace {dest_addr}/{dest_prefix} proto 42 \
+ via {next_hop} dev {interface}')
+ cmd('sudo ip route flush cache')
+ except Exception as err:
+ logger.error(
+ f'Unable replace or flush route to {dest_addr}/{dest_prefix} '
+ f'via {next_hop} dev {interface}: {err}')
+
+
+def route_down(interface: str) -> None:
+ """Proceed NHRP route DOWN event
+
+ Args:
+ interface (str): an interface name
+ """
+ logger.info(f'Route DOWN event for interface {interface}')
-def route_down(interface):
dest_addr = os.getenv('NHRP_DESTADDR')
dest_prefix = os.getenv('NHRP_DESTPREFIX')
- cmd(f'sudo ip route del {dest_addr}/{dest_prefix} proto 42')
- cmd('sudo ip route flush cache')
+ if not dest_addr or not dest_prefix:
+ logger.error(
+ f'Can not get route details: dest_addr {dest_addr}, '
+ f'dest_prefix {dest_prefix}')
+ return
+
+ logger.info(
+ f'Route details: dest_addr {dest_addr}, dest_prefix {dest_prefix}')
+ try:
+ cmd(f'sudo ip route del {dest_addr}/{dest_prefix} proto 42')
+ cmd('sudo ip route flush cache')
+ except Exception as err:
+ logger.error(
+ f'Unable delete or flush route to {dest_addr}/{dest_prefix}: '
+ f'{err}')
if __name__ == '__main__':
+ logger = getLogger('opennhrp-script', syslog=True)
+ logger.debug(
+ f'Running script with arguments: {sys.argv}, '
+ f'environment: {os.environ}')
+
action = sys.argv[1]
interface = os.getenv('NHRP_INTERFACE')
- dmvpn_type, profile_name = parse_type_ipsec(interface)
- dmvpn_conn = None
+ if not interface:
+ logger.error('Can not get NHRP interface name')
+ sys.exit(1)
- if profile_name:
- dmvpn_conn = f'dmvpn-{profile_name}-{interface}'
+ dmvpn_type, profile_name = parse_type_ipsec(interface)
+ if not dmvpn_type:
+ logger.info(f'Interface {interface} is not NHRP tunnel')
+ sys.exit()
+ dmvpn_conn: str = ''
+ if profile_name:
+ dmvpn_conn: str = f'dmvpn-{profile_name}-{interface}'
if action == 'interface-up':
iface_up(interface)
elif action == 'peer-register':
@@ -186,3 +375,5 @@ if __name__ == '__main__':
route_up(interface)
elif action == 'route-down':
route_down(interface)
+
+ sys.exit()
diff --git a/src/op_mode/conntrack.py b/src/op_mode/conntrack.py
index 036226418..b27aa6060 100755
--- a/src/op_mode/conntrack.py
+++ b/src/op_mode/conntrack.py
@@ -51,6 +51,21 @@ def _get_raw_data(family):
return _xml_to_dict(xml)
+def _get_raw_statistics():
+ entries = []
+ data = cmd('sudo conntrack -S')
+ data = data.replace(' \t', '').split('\n')
+ for entry in data:
+ entries.append(entry.split())
+ return entries
+
+
+def get_formatted_statistics(entries):
+ headers = ["CPU", "Found", "Invalid", "Insert", "Insert fail", "Drop", "Early drop", "Errors", "Search restart"]
+ output = tabulate(entries, headers, numalign="left")
+ return output
+
+
def get_formatted_output(dict_data):
"""
:param xml:
@@ -111,6 +126,14 @@ def show(raw: bool, family: str):
return get_formatted_output(conntrack_data)
+def show_statistics(raw: bool):
+ conntrack_statistics = _get_raw_statistics()
+ if raw:
+ return conntrack_statistics
+ else:
+ return get_formatted_statistics(conntrack_statistics)
+
+
if __name__ == '__main__':
try:
res = vyos.opmode.run(sys.modules[__name__])
diff --git a/src/op_mode/ipsec.py b/src/op_mode/ipsec.py
index 49c8e6142..a4d1b4cb1 100755
--- a/src/op_mode/ipsec.py
+++ b/src/op_mode/ipsec.py
@@ -16,13 +16,122 @@
import re
import sys
+
+from collections import OrderedDict
+from hurry import filesize
+from re import split as re_split
+from tabulate import tabulate
+
from vyos.util import call
+from vyos.util import convert_data
+from vyos.util import seconds_to_human
+
import vyos.opmode
SWANCTL_CONF = '/etc/swanctl/swanctl.conf'
+def _convert(text):
+ return int(text) if text.isdigit() else text.lower()
+
+
+def _alphanum_key(key):
+ return [_convert(c) for c in re_split('([0-9]+)', str(key))]
+
+
+def _get_vici_sas():
+ from vici import Session as vici_session
+
+ session = vici_session()
+ sas = list(session.list_sas())
+ return sas
+
+
+def _get_raw_data_sas():
+ get_sas = _get_vici_sas()
+ sas = convert_data(get_sas)
+ return sas
+
+
+def _get_formatted_output_sas(sas):
+ sa_data = []
+ for sa in sas:
+ for parent_sa in sa.values():
+ # create an item for each child-sa
+ for child_sa in parent_sa.get('child-sas', {}).values():
+ # prepare a list for output data
+ sa_out_name = sa_out_state = sa_out_uptime = sa_out_bytes = sa_out_packets = sa_out_remote_addr = sa_out_remote_id = sa_out_proposal = 'N/A'
+
+ # collect raw data
+ sa_name = child_sa.get('name')
+ sa_state = child_sa.get('state')
+ sa_uptime = child_sa.get('install-time')
+ sa_bytes_in = child_sa.get('bytes-in')
+ sa_bytes_out = child_sa.get('bytes-out')
+ sa_packets_in = child_sa.get('packets-in')
+ sa_packets_out = child_sa.get('packets-out')
+ sa_remote_addr = parent_sa.get('remote-host')
+ sa_remote_id = parent_sa.get('remote-id')
+ sa_proposal_encr_alg = child_sa.get('encr-alg')
+ sa_proposal_integ_alg = child_sa.get('integ-alg')
+ sa_proposal_encr_keysize = child_sa.get('encr-keysize')
+ sa_proposal_dh_group = child_sa.get('dh-group')
+
+ # format data to display
+ if sa_name:
+ sa_out_name = sa_name
+ if sa_state:
+ if sa_state == 'INSTALLED':
+ sa_out_state = 'up'
+ else:
+ sa_out_state = 'down'
+ if sa_uptime:
+ sa_out_uptime = seconds_to_human(sa_uptime)
+ if sa_bytes_in and sa_bytes_out:
+ bytes_in = filesize.size(int(sa_bytes_in))
+ bytes_out = filesize.size(int(sa_bytes_out))
+ sa_out_bytes = f'{bytes_in}/{bytes_out}'
+ if sa_packets_in and sa_packets_out:
+ packets_in = filesize.size(int(sa_packets_in),
+ system=filesize.si)
+ packets_out = filesize.size(int(sa_packets_out),
+ system=filesize.si)
+ packets_str = f'{packets_in}/{packets_out}'
+ sa_out_packets = re.sub(r'B', r'', packets_str)
+ if sa_remote_addr:
+ sa_out_remote_addr = sa_remote_addr
+ if sa_remote_id:
+ sa_out_remote_id = sa_remote_id
+ # format proposal
+ if sa_proposal_encr_alg:
+ sa_out_proposal = sa_proposal_encr_alg
+ if sa_proposal_encr_keysize:
+ sa_proposal_encr_keysize_str = sa_proposal_encr_keysize
+ sa_out_proposal = f'{sa_out_proposal}_{sa_proposal_encr_keysize_str}'
+ if sa_proposal_integ_alg:
+ sa_proposal_integ_alg_str = sa_proposal_integ_alg
+ sa_out_proposal = f'{sa_out_proposal}/{sa_proposal_integ_alg_str}'
+ if sa_proposal_dh_group:
+ sa_proposal_dh_group_str = sa_proposal_dh_group
+ sa_out_proposal = f'{sa_out_proposal}/{sa_proposal_dh_group_str}'
+
+ # add a new item to output data
+ sa_data.append([
+ sa_out_name, sa_out_state, sa_out_uptime, sa_out_bytes,
+ sa_out_packets, sa_out_remote_addr, sa_out_remote_id,
+ sa_out_proposal
+ ])
+
+ headers = [
+ "Connection", "State", "Uptime", "Bytes In/Out", "Packets In/Out",
+ "Remote address", "Remote ID", "Proposal"
+ ]
+ sa_data = sorted(sa_data, key=_alphanum_key)
+ output = tabulate(sa_data, headers)
+ return output
+
+
def get_peer_connections(peer, tunnel, return_all = False):
peer = peer.replace(':', '-')
search = rf'^[\s]*(peer_{peer}_(tunnel_[\d]+|vti)).*'
@@ -61,6 +170,13 @@ def reset_peer(peer: str, tunnel:str):
print('Peer reset result: ' + ('success' if result else 'failed'))
+def show_sa(raw: bool):
+ sa_data = _get_raw_data_sas()
+ if raw:
+ return sa_data
+ return _get_formatted_output_sas(sa_data)
+
+
if __name__ == '__main__':
try:
res = vyos.opmode.run(sys.modules[__name__])
diff --git a/src/op_mode/nat.py b/src/op_mode/nat.py
index 12fc4c782..1339d5b92 100755
--- a/src/op_mode/nat.py
+++ b/src/op_mode/nat.py
@@ -17,6 +17,7 @@
import jmespath
import json
import sys
+import xmltodict
from sys import exit
from tabulate import tabulate
@@ -27,7 +28,30 @@ from vyos.util import dict_search
import vyos.opmode
-def _get_json_data(direction):
+def _get_xml_translation(direction, family):
+ """
+ Get conntrack XML output --src-nat|--dst-nat
+ """
+ if direction == 'source':
+ opt = '--src-nat'
+ if direction == 'destination':
+ opt = '--dst-nat'
+ return cmd(f'sudo conntrack --dump --family {family} {opt} --output xml')
+
+
+def _xml_to_dict(xml):
+ """
+ Convert XML to dictionary
+ Return: dictionary
+ """
+ parse = xmltodict.parse(xml, attr_prefix='')
+ # If only one conntrack entry we must change dict
+ if 'meta' in parse['conntrack']['flow']:
+ return dict(conntrack={'flow': [parse['conntrack']['flow']]})
+ return parse
+
+
+def _get_json_data(direction, family):
"""
Get NAT format JSON
"""
@@ -35,14 +59,15 @@ def _get_json_data(direction):
chain = 'POSTROUTING'
if direction == 'destination':
chain = 'PREROUTING'
- return cmd(f'sudo nft --json list chain ip nat {chain}')
+ family = 'ip6' if family == 'inet6' else 'ip'
+ return cmd(f'sudo nft --json list chain {family} nat {chain}')
-def _get_raw_data_rules(direction):
+def _get_raw_data_rules(direction, family):
"""Get interested rules
:returns dict
"""
- data = _get_json_data(direction)
+ data = _get_json_data(direction, family)
data_dict = json.loads(data)
rules = []
for rule in data_dict['nftables']:
@@ -51,10 +76,28 @@ def _get_raw_data_rules(direction):
return rules
-def _get_formatted_output_rules(data, direction):
+def _get_raw_translation(direction, family):
+ """
+ Return: dictionary
+ """
+ xml = _get_xml_translation(direction, family)
+ if len(xml) == 0:
+ output = {'conntrack':
+ {
+ 'error': True,
+ 'reason': 'entries not found'
+ }
+ }
+ return output
+ return _xml_to_dict(xml)
+
+
+def _get_formatted_output_rules(data, direction, family):
# Add default values before loop
sport, dport, proto = 'any', 'any', 'any'
- saddr, daddr = '0.0.0.0/0', '0.0.0.0/0'
+ saddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
+ daddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
+
data_entries = []
for rule in data:
if 'comment' in rule['rule']:
@@ -69,11 +112,13 @@ def _get_formatted_output_rules(data, direction):
if 'prefix' in match['right'] or 'set' in match['right']:
# Merge dict src/dst l3_l4 parameters
my_dict = {**match['left']['payload'], **match['right']}
+ my_dict['op'] = match['op']
+ op = '!' if my_dict.get('op') == '!=' else ''
proto = my_dict.get('protocol').upper()
if my_dict['field'] == 'saddr':
- saddr = f'{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
+ saddr = f'{op}{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
elif my_dict['field'] == 'daddr':
- daddr = f'{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
+ daddr = f'{op}{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
elif my_dict['field'] == 'sport':
# Port range or single port
if jmespath.search('set[*].range', my_dict):
@@ -96,8 +141,8 @@ def _get_formatted_output_rules(data, direction):
if jmespath.search('left.payload.field', match) == 'daddr':
daddr = match.get('right')
else:
- saddr = '0.0.0.0/0'
- daddr = '0.0.0.0/0'
+ saddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
+ daddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
sport = 'any'
dport = 'any'
proto = 'any'
@@ -175,22 +220,83 @@ def _get_formatted_output_statistics(data, direction):
return output
-def show_rules(raw: bool, direction: str):
- nat_rules = _get_raw_data_rules(direction)
+def _get_formatted_translation(dict_data, nat_direction, family):
+ data_entries = []
+ if 'error' in dict_data['conntrack']:
+ return 'Entries not found'
+ for entry in dict_data['conntrack']['flow']:
+ orig_src, orig_dst, orig_sport, orig_dport = {}, {}, {}, {}
+ reply_src, reply_dst, reply_sport, reply_dport = {}, {}, {}, {}
+ proto = {}
+ for meta in entry['meta']:
+ direction = meta['direction']
+ if direction in ['original']:
+ if 'layer3' in meta:
+ orig_src = meta['layer3']['src']
+ orig_dst = meta['layer3']['dst']
+ if 'layer4' in meta:
+ if meta.get('layer4').get('sport'):
+ orig_sport = meta['layer4']['sport']
+ if meta.get('layer4').get('dport'):
+ orig_dport = meta['layer4']['dport']
+ proto = meta['layer4']['protoname']
+ if direction in ['reply']:
+ if 'layer3' in meta:
+ reply_src = meta['layer3']['src']
+ reply_dst = meta['layer3']['dst']
+ if 'layer4' in meta:
+ if meta.get('layer4').get('sport'):
+ reply_sport = meta['layer4']['sport']
+ if meta.get('layer4').get('dport'):
+ reply_dport = meta['layer4']['dport']
+ proto = meta['layer4']['protoname']
+ if direction == 'independent':
+ conn_id = meta['id']
+ timeout = meta['timeout']
+ orig_src = f'{orig_src}:{orig_sport}' if orig_sport else orig_src
+ orig_dst = f'{orig_dst}:{orig_dport}' if orig_dport else orig_dst
+ reply_src = f'{reply_src}:{reply_sport}' if reply_sport else reply_src
+ reply_dst = f'{reply_dst}:{reply_dport}' if reply_dport else reply_dst
+ state = meta['state'] if 'state' in meta else ''
+ mark = meta['mark']
+ zone = meta['zone'] if 'zone' in meta else ''
+ if nat_direction == 'source':
+ data_entries.append(
+ [orig_src, reply_dst, proto, timeout, mark, zone])
+ elif nat_direction == 'destination':
+ data_entries.append(
+ [orig_dst, reply_src, proto, timeout, mark, zone])
+
+ headers = ["Pre-NAT", "Post-NAT", "Proto", "Timeout", "Mark", "Zone"]
+ output = tabulate(data_entries, headers, numalign="left")
+ return output
+
+
+def show_rules(raw: bool, direction: str, family: str):
+ nat_rules = _get_raw_data_rules(direction, family)
if raw:
return nat_rules
else:
- return _get_formatted_output_rules(nat_rules, direction)
+ return _get_formatted_output_rules(nat_rules, direction, family)
-def show_statistics(raw: bool, direction: str):
- nat_statistics = _get_raw_data_rules(direction)
+def show_statistics(raw: bool, direction: str, family: str):
+ nat_statistics = _get_raw_data_rules(direction, family)
if raw:
return nat_statistics
else:
return _get_formatted_output_statistics(nat_statistics, direction)
+def show_translations(raw: bool, direction: str, family: str):
+ family = 'ipv6' if family == 'inet6' else 'ipv4'
+ nat_translation = _get_raw_translation(direction, family)
+ if raw:
+ return nat_translation
+ else:
+ return _get_formatted_translation(nat_translation, direction, family)
+
+
if __name__ == '__main__':
try:
res = vyos.opmode.run(sys.modules[__name__])
diff --git a/src/op_mode/show_nat66_rules.py b/src/op_mode/show_nat66_rules.py
deleted file mode 100755
index 967ec9d37..000000000
--- a/src/op_mode/show_nat66_rules.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2021 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import jmespath
-import json
-
-from argparse import ArgumentParser
-from jinja2 import Template
-from sys import exit
-from vyos.util import cmd
-from vyos.util import dict_search
-
-parser = ArgumentParser()
-group = parser.add_mutually_exclusive_group()
-group.add_argument("--source", help="Show statistics for configured source NAT rules", action="store_true")
-group.add_argument("--destination", help="Show statistics for configured destination NAT rules", action="store_true")
-args = parser.parse_args()
-
-if args.source or args.destination:
- tmp = cmd('sudo nft -j list table ip6 nat')
- tmp = json.loads(tmp)
-
- format_nat66_rule = '{0: <10} {1: <50} {2: <50} {3: <10}'
- print(format_nat66_rule.format("Rule", "Source" if args.source else "Destination", "Translation", "Outbound Interface" if args.source else "Inbound Interface"))
- print(format_nat66_rule.format("----", "------" if args.source else "-----------", "-----------", "------------------" if args.source else "-----------------"))
-
- data_json = jmespath.search('nftables[?rule].rule[?chain]', tmp)
- for idx in range(0, len(data_json)):
- data = data_json[idx]
-
- # The following key values must exist
- # When the rule JSON does not have some keys, this is not a rule we can work with
- continue_rule = False
- for key in ['comment', 'chain', 'expr']:
- if key not in data:
- continue_rule = True
- continue
- if continue_rule:
- continue
-
- comment = data['comment']
-
- # Check the annotation to see if the annotation format is created by VYOS
- continue_rule = True
- for comment_prefix in ['SRC-NAT66-', 'DST-NAT66-']:
- if comment_prefix in comment:
- continue_rule = False
- if continue_rule:
- continue
-
- # When log is detected from the second index of expr, then this rule should be ignored
- if 'log' in data['expr'][2]:
- continue
-
- rule = comment.replace('SRC-NAT66-','')
- rule = rule.replace('DST-NAT66-','')
- chain = data['chain']
- if not ((args.source and chain == 'POSTROUTING') or (not args.source and chain == 'PREROUTING')):
- continue
- interface = dict_search('match.right', data['expr'][0])
- srcdest = dict_search('match.right.prefix.addr', data['expr'][2])
- if srcdest:
- addr_tmp = dict_search('match.right.prefix.len', data['expr'][2])
- if addr_tmp:
- srcdest = srcdest + '/' + str(addr_tmp)
- else:
- srcdest = dict_search('match.right', data['expr'][2])
-
- tran_addr_json = dict_search('snat.addr' if args.source else 'dnat.addr', data['expr'][3])
- if tran_addr_json:
- if isinstance(srcdest_json,str):
- tran_addr = tran_addr_json
-
- if 'prefix' in tran_addr_json:
- addr_tmp = dict_search('snat.addr.prefix.addr' if args.source else 'dnat.addr.prefix.addr', data['expr'][3])
- len_tmp = dict_search('snat.addr.prefix.len' if args.source else 'dnat.addr.prefix.len', data['expr'][3])
- if addr_tmp:
- tran_addr = addr_tmp + '/' + str(len_tmp)
- else:
- if 'masquerade' in data['expr'][3]:
- tran_addr = 'masquerade'
-
- print(format_nat66_rule.format(rule, srcdest, tran_addr, interface))
-
- exit(0)
-else:
- parser.print_help()
- exit(1)
-
diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py
index 3e89fb239..c8ae0f516 100644
--- a/src/services/api/graphql/graphql/mutations.py
+++ b/src/services/api/graphql/graphql/mutations.py
@@ -21,7 +21,7 @@ from makefun import with_signature
from .. import state
from .. import key_auth
-from api.graphql.recipes.session import Session
+from api.graphql.session.session import Session
mutation = ObjectType("Mutation")
@@ -71,7 +71,7 @@ def make_mutation_resolver(mutation_name, class_name, session_func):
# one may override the session functions with a local subclass
try:
- mod = import_module(f'api.graphql.recipes.{func_base_name}')
+ mod = import_module(f'api.graphql.session.override.{func_base_name}')
klass = getattr(mod, class_name)
except ImportError:
# otherwise, dynamically generate subclass to invoke subclass
diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py
index f6544709e..921a66274 100644
--- a/src/services/api/graphql/graphql/queries.py
+++ b/src/services/api/graphql/graphql/queries.py
@@ -21,7 +21,7 @@ from makefun import with_signature
from .. import state
from .. import key_auth
-from api.graphql.recipes.session import Session
+from api.graphql.session.session import Session
query = ObjectType("Query")
@@ -71,7 +71,7 @@ def make_query_resolver(query_name, class_name, session_func):
# one may override the session functions with a local subclass
try:
- mod = import_module(f'api.graphql.recipes.{func_base_name}')
+ mod = import_module(f'api.graphql.session.override.{func_base_name}')
klass = getattr(mod, class_name)
except ImportError:
# otherwise, dynamically generate subclass to invoke subclass
diff --git a/src/services/api/graphql/recipes/__init__.py b/src/services/api/graphql/session/__init__.py
index e69de29bb..e69de29bb 100644
--- a/src/services/api/graphql/recipes/__init__.py
+++ b/src/services/api/graphql/session/__init__.py
diff --git a/src/services/api/graphql/recipes/queries/system_status.py b/src/services/api/graphql/session/composite/system_status.py
index 8dadcc9f3..8dadcc9f3 100755
--- a/src/services/api/graphql/recipes/queries/system_status.py
+++ b/src/services/api/graphql/session/composite/system_status.py
diff --git a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py b/src/services/api/graphql/session/override/remove_firewall_address_group_members.py
index b91932e14..b91932e14 100644
--- a/src/services/api/graphql/recipes/remove_firewall_address_group_members.py
+++ b/src/services/api/graphql/session/override/remove_firewall_address_group_members.py
diff --git a/src/services/api/graphql/recipes/session.py b/src/services/api/graphql/session/session.py
index ac185beb7..23bc7154c 100644
--- a/src/services/api/graphql/recipes/session.py
+++ b/src/services/api/graphql/session/session.py
@@ -149,7 +149,7 @@ class Session:
return res
def system_status(self):
- import api.graphql.recipes.queries.system_status as system_status
+ import api.graphql.session.composite.system_status as system_status
session = self._session
data = self._data
diff --git a/src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl b/src/services/api/graphql/session/templates/create_dhcp_server.tmpl
index 70de43183..70de43183 100644
--- a/src/services/api/graphql/recipes/templates/create_dhcp_server.tmpl
+++ b/src/services/api/graphql/session/templates/create_dhcp_server.tmpl
diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl b/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl
index a890d0086..a890d0086 100644
--- a/src/services/api/graphql/recipes/templates/create_firewall_address_group.tmpl
+++ b/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl
diff --git a/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl b/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl
index e9b660722..e9b660722 100644
--- a/src/services/api/graphql/recipes/templates/create_firewall_address_ipv_6_group.tmpl
+++ b/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl
diff --git a/src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl b/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl
index d9d7ed691..d9d7ed691 100644
--- a/src/services/api/graphql/recipes/templates/create_interface_ethernet.tmpl
+++ b/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl
diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl b/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl
index 458f3e5fc..458f3e5fc 100644
--- a/src/services/api/graphql/recipes/templates/remove_firewall_address_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl
diff --git a/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl
index 0efa0b226..0efa0b226 100644
--- a/src/services/api/graphql/recipes/templates/remove_firewall_address_ipv_6_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl
diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl b/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl
index f56c61231..f56c61231 100644
--- a/src/services/api/graphql/recipes/templates/update_firewall_address_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl
diff --git a/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl
index f98a5517c..f98a5517c 100644
--- a/src/services/api/graphql/recipes/templates/update_firewall_address_ipv_6_group_members.tmpl
+++ b/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl
diff --git a/src/services/api/graphql/utils/schema_from_op_mode.py b/src/services/api/graphql/utils/schema_from_op_mode.py
index d27586747..f990aae52 100755
--- a/src/services/api/graphql/utils/schema_from_op_mode.py
+++ b/src/services/api/graphql/utils/schema_from_op_mode.py
@@ -25,7 +25,10 @@ from inspect import signature, getmembers, isfunction
from jinja2 import Template
from vyos.defaults import directories
-from . util import load_as_module, is_op_mode_function_name, is_show_function_name
+if __package__ is None or __package__ == '':
+ from util import load_as_module, is_op_mode_function_name, is_show_function_name
+else:
+ from . util import load_as_module, is_op_mode_function_name, is_show_function_name
OP_MODE_PATH = directories['op_mode']
SCHEMA_PATH = directories['api_schema']
diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server
index af8837e1e..190f3409d 100755
--- a/src/services/vyos-http-api-server
+++ b/src/services/vyos-http-api-server
@@ -678,6 +678,7 @@ if __name__ == '__main__':
server_config = load_server_config()
except Exception as err:
logger.critical(f"Failed to load the HTTP API server config: {err}")
+ sys.exit(1)
config_session = ConfigSession(os.getpid())
diff --git a/src/system/keepalived-fifo.py b/src/system/keepalived-fifo.py
index a8df232ae..a0fccd1d0 100755
--- a/src/system/keepalived-fifo.py
+++ b/src/system/keepalived-fifo.py
@@ -30,6 +30,7 @@ from vyos.ifconfig.vrrp import VRRP
from vyos.configquery import ConfigTreeQuery
from vyos.util import cmd
from vyos.util import dict_search
+from vyos.util import commit_in_progress
# configure logging
logger = logging.getLogger(__name__)
@@ -63,6 +64,17 @@ class KeepalivedFifo:
# load configuration
def _config_load(self):
+ # For VRRP configuration to be read, the commit must be finished
+ count = 1
+ while commit_in_progress():
+ if ( count <= 40 ):
+ logger.debug(f'commit in progress try: {count}')
+ else:
+ logger.error(f'commit still in progress after {count} continuing anyway')
+ break
+ count += 1
+ time.sleep(0.5)
+
try:
base = ['high-availability', 'vrrp']
conf = ConfigTreeQuery()
diff --git a/src/systemd/telegraf.service b/src/systemd/telegraf.service
new file mode 100644
index 000000000..553942ac6
--- /dev/null
+++ b/src/systemd/telegraf.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=The plugin-driven server agent for reporting metrics into InfluxDB
+Documentation=https://github.com/influxdata/telegraf
+After=network.target
+
+[Service]
+EnvironmentFile=-/etc/default/telegraf
+ExecStart=/usr/bin/telegraf --config /run/telegraf/vyos-telegraf.conf --config-directory /etc/telegraf/telegraf.d
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=on-failure
+RestartForceExitStatus=SIGPIPE
+KillMode=control-group
+
+[Install]
+WantedBy=multi-user.target