diff options
61 files changed, 1876 insertions, 254 deletions
diff --git a/data/templates/container/containers.conf.j2 b/data/templates/container/containers.conf.j2 index c8b54dfbb..65436801e 100644 --- a/data/templates/container/containers.conf.j2 +++ b/data/templates/container/containers.conf.j2 @@ -172,7 +172,11 @@ default_sysctls = [ # Logging driver for the container. Available options: k8s-file and journald. # +{% if log_driver is vyos_defined %} +log_driver = "{{ log_driver }}" +{% else %} #log_driver = "k8s-file" +{% endif %} # Maximum size allowed for the container log file. Negative numbers indicate # that no size limit is imposed. If positive, it must be >= 8192 to match or diff --git a/data/templates/firewall/nftables-defines.j2 b/data/templates/firewall/nftables-defines.j2 index 3147b4c37..a1d1fa4f6 100644 --- a/data/templates/firewall/nftables-defines.j2 +++ b/data/templates/firewall/nftables-defines.j2 @@ -44,6 +44,15 @@ } {% endfor %} {% endif %} +{% if group.remote_group is vyos_defined and is_l3 and is_ipv6 %} +{% for name, name_config in group.remote_group.items() %} + set R6_{{ name }} { + type {{ ip_type }} + flags interval + auto-merge + } +{% endfor %} +{% endif %} {% if group.mac_group is vyos_defined %} {% for group_name, group_conf in group.mac_group.items() %} {% set includes = group_conf.include if group_conf.include is vyos_defined else [] %} diff --git a/data/templates/firewall/nftables.j2 b/data/templates/firewall/nftables.j2 index 67473da8e..a78119a80 100755 --- a/data/templates/firewall/nftables.j2 +++ b/data/templates/firewall/nftables.j2 @@ -47,7 +47,7 @@ table ip vyos_filter { chain VYOS_FORWARD_{{ prior }} { type filter hook forward priority {{ prior }}; policy accept; {% if global_options.state_policy is vyos_defined %} - jump VYOS_STATE_POLICY + jump VYOS_STATE_POLICY_FORWARD {% endif %} {% if conf.rule is vyos_defined %} {% for rule_id, rule_conf in conf.rule.items() if rule_conf.disable is not vyos_defined %} @@ -180,6 +180,22 @@ table ip vyos_filter { {% endif %} return } + + chain VYOS_STATE_POLICY_FORWARD { +{% if global_options.state_policy.offload is vyos_defined %} + counter flow add @VYOS_FLOWTABLE_{{ global_options.state_policy.offload.offload_target }} +{% endif %} +{% if global_options.state_policy.established is vyos_defined %} + {{ global_options.state_policy.established | nft_state_policy('established') }} +{% endif %} +{% if global_options.state_policy.invalid is vyos_defined %} + {{ global_options.state_policy.invalid | nft_state_policy('invalid') }} +{% endif %} +{% if global_options.state_policy.related is vyos_defined %} + {{ global_options.state_policy.related | nft_state_policy('related') }} +{% endif %} + return + } {% endif %} } @@ -200,7 +216,7 @@ table ip6 vyos_filter { chain VYOS_IPV6_FORWARD_{{ prior }} { type filter hook forward priority {{ prior }}; policy accept; {% if global_options.state_policy is vyos_defined %} - jump VYOS_STATE_POLICY6 + jump VYOS_STATE_POLICY6_FORWARD {% endif %} {% if conf.rule is vyos_defined %} {% for rule_id, rule_conf in conf.rule.items() if rule_conf.disable is not vyos_defined %} @@ -331,6 +347,22 @@ table ip6 vyos_filter { {% endif %} return } + + chain VYOS_STATE_POLICY6_FORWARD { +{% if global_options.state_policy.offload is vyos_defined %} + counter flow add @VYOS_FLOWTABLE_{{ global_options.state_policy.offload.offload_target }} +{% endif %} +{% if global_options.state_policy.established is vyos_defined %} + {{ global_options.state_policy.established | nft_state_policy('established') }} +{% endif %} +{% if global_options.state_policy.invalid is vyos_defined %} + {{ global_options.state_policy.invalid | nft_state_policy('invalid') }} +{% endif %} +{% if global_options.state_policy.related is vyos_defined %} + {{ global_options.state_policy.related | nft_state_policy('related') }} +{% endif %} + return + } {% endif %} } diff --git a/data/templates/load-balancing/haproxy.cfg.j2 b/data/templates/load-balancing/haproxy.cfg.j2 index 70ea5d2b0..62934c612 100644 --- a/data/templates/load-balancing/haproxy.cfg.j2 +++ b/data/templates/load-balancing/haproxy.cfg.j2 @@ -50,9 +50,29 @@ defaults errorfile 503 /etc/haproxy/errors/503.http errorfile 504 /etc/haproxy/errors/504.http +# Default ACME backend +backend buildin_acme_certbot + server localhost 127.0.0.1:{{ get_default_port('certbot_haproxy') }} + # Frontend {% if service is vyos_defined %} {% for front, front_config in service.items() %} +{% if front_config.redirect_http_to_https is vyos_defined %} +{% set certbot_backend_name = 'certbot_' ~ front ~ '_backend' %} +frontend {{ front }}-http + mode http +{% if front_config.listen_address is vyos_defined %} +{% for address in front_config.listen_address %} + bind {{ address | bracketize_ipv6 }}:80 +{% endfor %} +{% else %} + bind [::]:80 v4v6 +{% endif %} + acl acme_acl path_beg /.well-known/acme-challenge/ + use_backend buildin_acme_certbot if acme_acl + redirect scheme https code 301 if !acme_acl +{% endif %} + frontend {{ front }} {% set ssl_front = [] %} {% if front_config.ssl.certificate is vyos_defined and front_config.ssl.certificate is iterable %} @@ -68,9 +88,6 @@ frontend {{ front }} {% else %} bind [::]:{{ front_config.port }} v4v6 {{ ssl_directive }} {{ ssl_front | join(' ') }} {% endif %} -{% if front_config.redirect_http_to_https is vyos_defined %} - http-request redirect scheme https unless { ssl_fc } -{% endif %} {% if front_config.logging is vyos_defined %} {% for facility, facility_config in front_config.logging.facility.items() %} log /dev/log {{ facility }} {{ facility_config.level }} @@ -237,6 +254,5 @@ backend {{ back }} {% if back_config.timeout.server is vyos_defined %} timeout server {{ back_config.timeout.server }}s {% endif %} - {% endfor %} {% endif %} diff --git a/data/templates/prometheus/node_exporter.service.j2 b/data/templates/prometheus/node_exporter.service.j2 index 135439bd6..9a943cd75 100644 --- a/data/templates/prometheus/node_exporter.service.j2 +++ b/data/templates/prometheus/node_exporter.service.j2 @@ -9,6 +9,11 @@ After=network.target User=node_exporter {% endif %} ExecStart={{ vrf_command }}/usr/sbin/node_exporter \ +{% if collectors is vyos_defined %} +{% if collectors.textfile is vyos_defined %} + --collector.textfile.directory=/run/node_exporter/collector \ +{% endif %} +{% endif %} {% if listen_address is vyos_defined %} {% for address in listen_address %} --web.listen-address={{ address }}:{{ port }} @@ -16,10 +21,6 @@ ExecStart={{ vrf_command }}/usr/sbin/node_exporter \ {% else %} --web.listen-address=:{{ port }} {% endif %} -{% if collectors is vyos_defined %} -{% if collectors.textfile is vyos_defined %} - --collector.textfile.directory=/run/node_exporter/collector -{% endif %} -{% endif %} + [Install] WantedBy=multi-user.target diff --git a/data/templates/router-advert/radvd.conf.j2 b/data/templates/router-advert/radvd.conf.j2 index e37cfde6c..34f8e1f6d 100644 --- a/data/templates/router-advert/radvd.conf.j2 +++ b/data/templates/router-advert/radvd.conf.j2 @@ -57,12 +57,20 @@ interface {{ iface }} { }; {% endfor %} {% endif %} -{% if iface_config.auto_ignore is vyos_defined %} +{% if iface_config.prefix is vyos_defined and "::/64" in iface_config.prefix %} +{% if iface_config.auto_ignore is vyos_defined or iface_config.prefix | count > 1 %} autoignoreprefixes { -{% for auto_ignore_prefix in iface_config.auto_ignore %} +{% if iface_config.auto_ignore is vyos_defined %} +{% for auto_ignore_prefix in (iface_config.auto_ignore + iface_config.prefix | list) | reject("eq", "::/64") | unique %} {{ auto_ignore_prefix }}; -{% endfor %} +{% endfor %} +{% else %} +{% for auto_ignore_prefix in iface_config.prefix | reject("eq", "::/64") %} + {{ auto_ignore_prefix }}; +{% endfor %} +{% endif %} }; +{% endif %} {% endif %} {% if iface_config.prefix is vyos_defined %} {% for prefix, prefix_options in iface_config.prefix.items() %} diff --git a/debian/control b/debian/control index ec3147820..9b798eb97 100644 --- a/debian/control +++ b/debian/control @@ -120,7 +120,7 @@ Depends: dosfstools, grub-efi-amd64-signed [amd64], grub-efi-arm64-bin [arm64], - mokutil [amd64], + mokutil, shim-signed [amd64], sbsigntool [amd64], # Image signature verification tool diff --git a/interface-definitions/container.xml.in b/interface-definitions/container.xml.in index 3a5cfbaa6..434bf7528 100644 --- a/interface-definitions/container.xml.in +++ b/interface-definitions/container.xml.in @@ -75,6 +75,12 @@ <multi/> </properties> </leafNode> + <leafNode name="privileged"> + <properties> + <help>Grant root capabilities to the container</help> + <valueless/> + </properties> + </leafNode> <node name="sysctl"> <properties> <help>Configure namespaced kernel parameters of the container</help> @@ -621,6 +627,25 @@ </node> </children> </tagNode> + <leafNode name="log-driver"> + <properties> + <help>Configure container log driver</help> + <completionHelp> + <list>k8s-file journald</list> + </completionHelp> + <valueHelp> + <format>k8s-file</format> + <description>Logs to plain-text json file</description> + </valueHelp> + <valueHelp> + <format>journald</format> + <description>Logs to systemd's journal</description> + </valueHelp> + <constraint> + <regex>(k8s-file|journald)</regex> + </constraint> + </properties> + </leafNode> </children> </node> </interfaceDefinition> diff --git a/interface-definitions/include/firewall/common-rule-ipv6.xml.i b/interface-definitions/include/firewall/common-rule-ipv6.xml.i index bb176fe71..65ec415fb 100644 --- a/interface-definitions/include/firewall/common-rule-ipv6.xml.i +++ b/interface-definitions/include/firewall/common-rule-ipv6.xml.i @@ -16,6 +16,7 @@ #include <include/firewall/port.xml.i> #include <include/firewall/source-destination-group-ipv6.xml.i> #include <include/firewall/source-destination-dynamic-group-ipv6.xml.i> + #include <include/firewall/source-destination-remote-group.xml.i> </children> </node> <leafNode name="jump-target"> @@ -39,6 +40,7 @@ #include <include/firewall/port.xml.i> #include <include/firewall/source-destination-group-ipv6.xml.i> #include <include/firewall/source-destination-dynamic-group-ipv6.xml.i> + #include <include/firewall/source-destination-remote-group.xml.i> </children> </node> -<!-- include end -->
\ No newline at end of file +<!-- include end --> diff --git a/interface-definitions/include/firewall/global-options.xml.i b/interface-definitions/include/firewall/global-options.xml.i index 355b41fde..7393ff5c9 100644 --- a/interface-definitions/include/firewall/global-options.xml.i +++ b/interface-definitions/include/firewall/global-options.xml.i @@ -217,6 +217,14 @@ <help>Global firewall state-policy</help> </properties> <children> + <node name="offload"> + <properties> + <help>All stateful forward traffic is offloaded to a flowtable</help> + </properties> + <children> + #include <include/firewall/offload-target.xml.i> + </children> + </node> <node name="established"> <properties> <help>Global firewall policy for packets part of an established connection</help> diff --git a/interface-definitions/include/haproxy/logging.xml.i b/interface-definitions/include/haproxy/logging.xml.i index e0af54fa4..315c959bf 100644 --- a/interface-definitions/include/haproxy/logging.xml.i +++ b/interface-definitions/include/haproxy/logging.xml.i @@ -4,7 +4,137 @@ <help>Logging parameters</help> </properties> <children> - #include <include/syslog-facility.xml.i> + <tagNode name="facility"> + <properties> + <help>Facility for logging</help> + <completionHelp> + <list>auth cron daemon kern lpr mail news syslog user uucp local0 local1 local2 local3 local4 local5 local6 local7</list> + </completionHelp> + <constraint> + <regex>(auth|cron|daemon|kern|lpr|mail|news|syslog|user|uucp|local0|local1|local2|local3|local4|local5|local6|local7)</regex> + </constraint> + <constraintErrorMessage>Invalid facility type</constraintErrorMessage> + <valueHelp> + <format>auth</format> + <description>Authentication and authorization</description> + </valueHelp> + <valueHelp> + <format>cron</format> + <description>Cron daemon</description> + </valueHelp> + <valueHelp> + <format>daemon</format> + <description>System daemons</description> + </valueHelp> + <valueHelp> + <format>kern</format> + <description>Kernel</description> + </valueHelp> + <valueHelp> + <format>lpr</format> + <description>Line printer spooler</description> + </valueHelp> + <valueHelp> + <format>mail</format> + <description>Mail subsystem</description> + </valueHelp> + <valueHelp> + <format>news</format> + <description>USENET subsystem</description> + </valueHelp> + <valueHelp> + <format>syslog</format> + <description>Authentication and authorization</description> + </valueHelp> + <valueHelp> + <format>user</format> + <description>Application processes</description> + </valueHelp> + <valueHelp> + <format>uucp</format> + <description>UUCP subsystem</description> + </valueHelp> + <valueHelp> + <format>local0</format> + <description>Local facility 0</description> + </valueHelp> + <valueHelp> + <format>local1</format> + <description>Local facility 1</description> + </valueHelp> + <valueHelp> + <format>local2</format> + <description>Local facility 2</description> + </valueHelp> + <valueHelp> + <format>local3</format> + <description>Local facility 3</description> + </valueHelp> + <valueHelp> + <format>local4</format> + <description>Local facility 4</description> + </valueHelp> + <valueHelp> + <format>local5</format> + <description>Local facility 5</description> + </valueHelp> + <valueHelp> + <format>local6</format> + <description>Local facility 6</description> + </valueHelp> + <valueHelp> + <format>local7</format> + <description>Local facility 7</description> + </valueHelp> + </properties> + <children> + <leafNode name="level"> + <properties> + <help>Logging level</help> + <completionHelp> + <list>emerg alert crit err warning notice info debug</list> + </completionHelp> + <valueHelp> + <format>emerg</format> + <description>Emergency messages</description> + </valueHelp> + <valueHelp> + <format>alert</format> + <description>Urgent messages</description> + </valueHelp> + <valueHelp> + <format>crit</format> + <description>Critical messages</description> + </valueHelp> + <valueHelp> + <format>err</format> + <description>Error messages</description> + </valueHelp> + <valueHelp> + <format>warning</format> + <description>Warning messages</description> + </valueHelp> + <valueHelp> + <format>notice</format> + <description>Messages for further investigation</description> + </valueHelp> + <valueHelp> + <format>info</format> + <description>Informational messages</description> + </valueHelp> + <valueHelp> + <format>debug</format> + <description>Debug messages</description> + </valueHelp> + <constraint> + <regex>(emerg|alert|crit|err|warning|notice|info|debug)</regex> + </constraint> + <constraintErrorMessage>Invalid loglevel</constraintErrorMessage> + </properties> + <defaultValue>err</defaultValue> + </leafNode> + </children> + </tagNode> </children> </node> <!-- include end --> diff --git a/interface-definitions/include/version/reverseproxy-version.xml.i b/interface-definitions/include/version/reverseproxy-version.xml.i index 4f09f2848..71f7def1a 100644 --- a/interface-definitions/include/version/reverseproxy-version.xml.i +++ b/interface-definitions/include/version/reverseproxy-version.xml.i @@ -1,3 +1,3 @@ <!-- include start from include/version/reverseproxy-version.xml.i --> -<syntaxVersion component='reverse-proxy' version='2'></syntaxVersion> +<syntaxVersion component='reverse-proxy' version='3'></syntaxVersion> <!-- include end --> diff --git a/interface-definitions/load-balancing_haproxy.xml.in b/interface-definitions/load-balancing_haproxy.xml.in index b95e02337..f0f64e75a 100644 --- a/interface-definitions/load-balancing_haproxy.xml.in +++ b/interface-definitions/load-balancing_haproxy.xml.in @@ -4,7 +4,7 @@ <children> <node name="haproxy" owner="${vyos_conf_scripts_dir}/load-balancing_haproxy.py"> <properties> - <help>Configure haproxy</help> + <help>HAProxy TCP/HTTP Load Balancer</help> <priority>900</priority> </properties> <children> @@ -26,7 +26,7 @@ <constraintErrorMessage>Backend name must be alphanumeric and can contain hyphen and underscores</constraintErrorMessage> <valueHelp> <format>txt</format> - <description>Name of haproxy backend system</description> + <description>HAProxy backend system name</description> </valueHelp> <completionHelp> <path>load-balancing haproxy backend</path> diff --git a/interface-definitions/load-balancing_wan.xml.in b/interface-definitions/load-balancing_wan.xml.in index 310aa0343..f80440411 100644 --- a/interface-definitions/load-balancing_wan.xml.in +++ b/interface-definitions/load-balancing_wan.xml.in @@ -7,7 +7,7 @@ <children> <node name="wan" owner="${vyos_conf_scripts_dir}/load-balancing_wan.py"> <properties> - <help>Configure Wide Area Network (WAN) load-balancing</help> + <help>Wide Area Network (WAN) load-balancing</help> <priority>900</priority> </properties> <children> diff --git a/interface-definitions/nat66.xml.in b/interface-definitions/nat66.xml.in index c59725c53..2c1babd5a 100644 --- a/interface-definitions/nat66.xml.in +++ b/interface-definitions/nat66.xml.in @@ -53,6 +53,7 @@ </properties> </leafNode> #include <include/nat-port.xml.i> + #include <include/firewall/source-destination-group-ipv6.xml.i> </children> </node> <node name="source"> diff --git a/interface-definitions/system_option.xml.in b/interface-definitions/system_option.xml.in index 638ac1a3d..c0ea958a2 100644 --- a/interface-definitions/system_option.xml.in +++ b/interface-definitions/system_option.xml.in @@ -37,7 +37,145 @@ <help>Kernel boot parameters</help> </properties> <children> - <leafNode name="disable-mitigations"> + <node name="cpu"> + <properties> + <help>CPU settings</help> + </properties> + <children> + <leafNode name="disable-nmi-watchdog"> + <properties> + <help>Disable the NMI watchdog for detecting hard CPU lockups</help> + <valueless/> + </properties> + </leafNode> + <leafNode name="isolate-cpus"> + <properties> + <help>Isolate specified CPUs from the scheduler</help> + <valueHelp> + <format>u32:0-511</format> + <description>CPU core</description> + </valueHelp> + <valueHelp> + <format><start-end></format> + <description>CPU core range (examples: "1", "4-7", "1,2-5,7")</description> + </valueHelp> + <constraint> + <validator name="cpu"/> + </constraint> + </properties> + </leafNode> + <leafNode name="nohz-full"> + <properties> + <help>Enable full tickless mode for specified CPUs</help> + <valueHelp> + <format>u32:0-511</format> + <description>CPU core</description> + </valueHelp> + <valueHelp> + <format><start-end></format> + <description>CPU core range (examples: "1", "4-7", "1,2-5,7")</description> + </valueHelp> + <constraint> + <validator name="cpu"/> + </constraint> + </properties> + </leafNode> + <leafNode name="rcu-no-cbs"> + <properties> + <help>Offload Read-Copy-Update (RCU) callback processing to specified CPUs</help> + <valueHelp> + <format>u32:0-511</format> + <description>CPU core</description> + </valueHelp> + <valueHelp> + <format><start-end></format> + <description>CPU core range (examples: "1", "4-7", "1,2-5,7")</description> + </valueHelp> + <constraint> + <validator name="cpu"/> + </constraint> + </properties> + </leafNode> + </children> + </node> + <node name="memory"> + <properties> + <help>Memory settings</help> + </properties> + <children> + <leafNode name="disable-numa-balancing"> + <properties> + <help>Disable automatic NUMA memory balancing</help> + <valueless/> + </properties> + </leafNode> + <leafNode name="default-hugepage-size"> + <properties> + <help>Set default hugepage size (e.g., 2M, 1G)</help> + <completionHelp> + <list>2M 1G</list> + </completionHelp> + <valueHelp> + <format>2M</format> + <description>2 megabytes</description> + </valueHelp> + <valueHelp> + <format>1G</format> + <description>1 gigabyte</description> + </valueHelp> + <constraint> + <regex>(2M|1G)</regex> + </constraint> + </properties> + </leafNode> + <tagNode name="hugepage-size"> + <properties> + <help>Set hugepage size for allocation (e.g., 2M, 1G)</help> + <completionHelp> + <list>2M 1G</list> + </completionHelp> + <valueHelp> + <format>2M</format> + <description>2 megabytes</description> + </valueHelp> + <valueHelp> + <format>1G</format> + <description>1 gigabyte</description> + </valueHelp> + <constraint> + <regex>(2M|1G)</regex> + </constraint> + </properties> + <children> + <leafNode name="hugepage-count"> + <properties> + <help>Allocate number of hugepages for system use</help> + <valueHelp> + <format>u32</format> + <description>Number of hugepages</description> + </valueHelp> + <constraint> + <validator name="numeric" argument="--range 1-100000"/> + </constraint> + </properties> + </leafNode> + </children> + </tagNode> + </children> + </node> + <leafNode name="disable-hpet"> + <properties> + <help>Disable High Precision Event Timer (HPET)</help> + <valueless/> + </properties> + </leafNode> + <leafNode name="disable-mce"> + <properties> + <help>Disable Machine Check Exceptions (MCE) reporting and handling</help> + <valueless/> + </properties> + </leafNode> + <leafNode name="disable-mitigations"> <properties> <help>Disable all optional CPU mitigations</help> <valueless/> @@ -69,6 +207,18 @@ </valueHelp> </properties> </leafNode> + <leafNode name="disable-softlockup"> + <properties> + <help>Disable soft lockup detector for kernel threads</help> + <valueless/> + </properties> + </leafNode> + <leafNode name="quiet"> + <properties> + <help>Disable most log messages</help> + <valueless/> + </properties> + </leafNode> <node name="debug"> <properties> <help>Dynamic debugging for kernel module</help> diff --git a/op-mode-definitions/generate_tech-support_archive.xml.in b/op-mode-definitions/generate_tech-support_archive.xml.in index fc664eb90..65c93541e 100644 --- a/op-mode-definitions/generate_tech-support_archive.xml.in +++ b/op-mode-definitions/generate_tech-support_archive.xml.in @@ -11,12 +11,27 @@ <properties> <help>Generate tech support archive</help> </properties> - <command>sudo ${vyos_op_scripts_dir}/tech_support.py show --raw | gzip> $4.json.gz</command> + <command>sudo ${vyos_op_scripts_dir}/generate_tech-support_archive.py</command> </node> <tagNode name="archive"> <properties> <help>Generate tech support archive to defined location</help> <completionHelp> + <list> <file> <scp://user:passwd@host> <ftp://user:passwd@host></list> + </completionHelp> + </properties> + <command>sudo ${vyos_op_scripts_dir}/generate_tech-support_archive.py $4</command> + </tagNode> + <node name="machine-readable-archive"> + <properties> + <help>Generate tech support archive</help> + </properties> + <command>sudo ${vyos_op_scripts_dir}/tech_support.py show --raw | gzip> $4.json.gz</command> + </node> + <tagNode name="machine-readable-archive"> + <properties> + <help>Generate tech support archive to defined location</help> + <completionHelp> <list> <file> </list> </completionHelp> </properties> diff --git a/op-mode-definitions/monitor-log.xml.in b/op-mode-definitions/monitor-log.xml.in index 91e1c93ef..a6aa6f05e 100644 --- a/op-mode-definitions/monitor-log.xml.in +++ b/op-mode-definitions/monitor-log.xml.in @@ -107,6 +107,12 @@ </properties> <command>journalctl --no-hostname --boot --follow --unit frr.service</command> </leafNode> + <leafNode name="haproxy"> + <properties> + <help>Monitor last lines of HAProxy log</help> + </properties> + <command>journalctl --no-hostname --boot --follow --unit haproxy.service</command> + </leafNode> <leafNode name="ipoe-server"> <properties> <help>Monitor last lines of IP over Ethernet server log</help> diff --git a/op-mode-definitions/clear-session.xml.in b/op-mode-definitions/reset-session.xml.in index bfafe6312..1e52e278b 100644 --- a/op-mode-definitions/clear-session.xml.in +++ b/op-mode-definitions/reset-session.xml.in @@ -1,6 +1,6 @@ <?xml version="1.0"?> <interfaceDefinition> - <node name="clear"> + <node name="reset"> <children> <tagNode name="session"> <properties> diff --git a/op-mode-definitions/show-interfaces.xml.in b/op-mode-definitions/show-interfaces.xml.in index 09466647d..2d94080c7 100644 --- a/op-mode-definitions/show-interfaces.xml.in +++ b/op-mode-definitions/show-interfaces.xml.in @@ -26,6 +26,48 @@ </properties> <command>${vyos_op_scripts_dir}/interfaces.py show_summary</command> </leafNode> + <tagNode name="kernel"> + <properties> + <completionHelp> + <script>ip -j link show | jq -r '.[].ifname'</script> + </completionHelp> + </properties> + <command>${vyos_op_scripts_dir}/interfaces.py show_kernel --intf-name=$4</command> + <children> + <leafNode name="detail"> + <properties> + <help>Show system interface in JSON format</help> + </properties> + <command>${vyos_op_scripts_dir}/interfaces.py show_kernel --intf-name=$4 --detail</command> + </leafNode> + <leafNode name="json"> + <properties> + <help>Show system interface in JSON format</help> + </properties> + <command>${vyos_op_scripts_dir}/interfaces.py show_kernel --intf-name=$4 --raw</command> + </leafNode> + </children> + </tagNode> + <node name="kernel"> + <properties> + <help>Show all interfaces on this system</help> + </properties> + <command>${vyos_op_scripts_dir}/interfaces.py show_kernel</command> + <children> + <leafNode name="detail"> + <properties> + <help>Show system interface in JSON format</help> + </properties> + <command>${vyos_op_scripts_dir}/interfaces.py show_kernel --detail</command> + </leafNode> + <leafNode name="json"> + <properties> + <help>Show all interfaces in JSON format</help> + </properties> + <command>${vyos_op_scripts_dir}/interfaces.py show_kernel --raw</command> + </leafNode> + </children> + </node> </children> </node> </children> diff --git a/op-mode-definitions/show-log.xml.in b/op-mode-definitions/show-log.xml.in index ee2e2bf70..c2bc03910 100755 --- a/op-mode-definitions/show-log.xml.in +++ b/op-mode-definitions/show-log.xml.in @@ -559,6 +559,12 @@ </properties> <command>journalctl --no-hostname --boot --unit frr.service</command> </leafNode> + <leafNode name="haproxy"> + <properties> + <help>Show log for HAProxy</help> + </properties> + <command>journalctl --no-hostname --boot --unit haproxy.service</command> + </leafNode> <leafNode name="https"> <properties> <help>Show log for HTTPs</help> diff --git a/python/vyos/base.py b/python/vyos/base.py index ca96d96ce..3173ddc20 100644 --- a/python/vyos/base.py +++ b/python/vyos/base.py @@ -1,4 +1,4 @@ -# Copyright 2018-2022 VyOS maintainers and contributors <maintainers@vyos.io> +# Copyright 2018-2025 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -15,8 +15,7 @@ from textwrap import fill - -class BaseWarning: +class UserMessage: def __init__(self, header, message, **kwargs): self.message = message self.kwargs = kwargs @@ -33,7 +32,6 @@ class BaseWarning: messages = self.message.split('\n') isfirstmessage = True initial_indent = self.textinitindent - print('') for mes in messages: mes = fill(mes, initial_indent=initial_indent, subsequent_indent=self.standardindent, **self.kwargs) @@ -44,17 +42,24 @@ class BaseWarning: print('', flush=True) +class Message(): + def __init__(self, message, **kwargs): + self.Message = UserMessage('', message, **kwargs) + self.Message.print() + class Warning(): def __init__(self, message, **kwargs): - self.BaseWarn = BaseWarning('WARNING: ', message, **kwargs) - self.BaseWarn.print() + print('') + self.UserMessage = UserMessage('WARNING: ', message, **kwargs) + self.UserMessage.print() class DeprecationWarning(): def __init__(self, message, **kwargs): # Reformat the message and trim it to 72 characters in length - self.BaseWarn = BaseWarning('DEPRECATION WARNING: ', message, **kwargs) - self.BaseWarn.print() + print('') + self.UserMessage = UserMessage('DEPRECATION WARNING: ', message, **kwargs) + self.UserMessage.print() class ConfigError(Exception): diff --git a/python/vyos/defaults.py b/python/vyos/defaults.py index 7efccded6..c1e5ddc04 100644 --- a/python/vyos/defaults.py +++ b/python/vyos/defaults.py @@ -43,10 +43,15 @@ directories = { } systemd_services = { + 'haproxy' : 'haproxy.service', 'syslog' : 'syslog.service', 'snmpd' : 'snmpd.service', } +internal_ports = { + 'certbot_haproxy' : 65080, # Certbot running behing haproxy +} + config_status = '/tmp/vyos-config-status' api_config_state = '/run/http-api-state' frr_debug_enable = '/tmp/vyos.frr.debug' diff --git a/python/vyos/firewall.py b/python/vyos/firewall.py index 9c320c82d..64022db84 100755 --- a/python/vyos/firewall.py +++ b/python/vyos/firewall.py @@ -319,7 +319,10 @@ def parse_rule(rule_conf, hook, fw_name, rule_id, ip_name): if group_name[0] == '!': operator = '!=' group_name = group_name[1:] - output.append(f'{ip_name} {prefix}addr {operator} @R_{group_name}') + if ip_name == 'ip': + output.append(f'{ip_name} {prefix}addr {operator} @R_{group_name}') + elif ip_name == 'ip6': + output.append(f'{ip_name} {prefix}addr {operator} @R6_{group_name}') if 'mac_group' in group: group_name = group['mac_group'] operator = '' @@ -471,14 +474,14 @@ def parse_rule(rule_conf, hook, fw_name, rule_id, ip_name): output.append('gre version 1') if gre_key: - # The offset of the key within the packet shifts depending on the C-flag. - # nftables cannot handle complex enough expressions to match multiple + # The offset of the key within the packet shifts depending on the C-flag. + # nftables cannot handle complex enough expressions to match multiple # offsets based on bitfields elsewhere. - # We enforce a specific match for the checksum flag in validation, so the - # gre_flags dict will always have a 'checksum' key when gre_key is populated. - if not gre_flags['checksum']: + # We enforce a specific match for the checksum flag in validation, so the + # gre_flags dict will always have a 'checksum' key when gre_key is populated. + if not gre_flags['checksum']: # No "unset" child node means C is set, we offset key lookup +32 bits - output.append(f'@th,64,32 == {gre_key}') + output.append(f'@th,64,32 == {gre_key}') else: output.append(f'@th,32,32 == {gre_key}') @@ -637,7 +640,7 @@ def parse_rule(rule_conf, hook, fw_name, rule_id, ip_name): return " ".join(output) def parse_gre_flags(flags, force_keyed=False): - flag_map = { # nft does not have symbolic names for these. + flag_map = { # nft does not have symbolic names for these. 'checksum': 1<<0, 'routing': 1<<1, 'key': 1<<2, @@ -648,7 +651,7 @@ def parse_gre_flags(flags, force_keyed=False): include = 0 exclude = 0 for fl_name, fl_state in flags.items(): - if not fl_state: + if not fl_state: include |= flag_map[fl_name] else: # 'unset' child tag exclude |= flag_map[fl_name] diff --git a/python/vyos/frrender.py b/python/vyos/frrender.py index 524167d8b..73d6dd5f0 100644 --- a/python/vyos/frrender.py +++ b/python/vyos/frrender.py @@ -697,6 +697,9 @@ class FRRender: debug('FRR: START CONFIGURATION RENDERING') # we can not reload an empty file, thus we always embed the marker output = '!\n' + # Enable FRR logging + output += 'log syslog\n' + output += 'log facility local7\n' # Enable SNMP agentx support # SNMP AgentX support cannot be disabled once enabled if 'snmp' in config_dict: diff --git a/python/vyos/template.py b/python/vyos/template.py index d79e1183f..11e1cc50f 100755 --- a/python/vyos/template.py +++ b/python/vyos/template.py @@ -36,6 +36,7 @@ DEFAULT_TEMPLATE_DIR = directories["templates"] # Holds template filters registered via register_filter() _FILTERS = {} _TESTS = {} +_CLEVER_FUNCTIONS = {} # reuse Environments with identical settings to improve performance @functools.lru_cache(maxsize=2) @@ -58,6 +59,7 @@ def _get_environment(location=None): ) env.filters.update(_FILTERS) env.tests.update(_TESTS) + env.globals.update(_CLEVER_FUNCTIONS) return env @@ -77,7 +79,7 @@ def register_filter(name, func=None): "Filters can only be registered before rendering the first template" ) if name in _FILTERS: - raise ValueError(f"A filter with name {name!r} was registered already") + raise ValueError(f"A filter with name {name!r} was already registered") _FILTERS[name] = func return func @@ -97,10 +99,30 @@ def register_test(name, func=None): "Tests can only be registered before rendering the first template" ) if name in _TESTS: - raise ValueError(f"A test with name {name!r} was registered already") + raise ValueError(f"A test with name {name!r} was already registered") _TESTS[name] = func return func +def register_clever_function(name, func=None): + """Register a function to be available as test in templates under given name. + + It can also be used as a decorator, see below in this module for examples. + + :raise RuntimeError: + when trying to register a test after a template has been rendered already + :raise ValueError: when trying to register a name which was taken already + """ + if func is None: + return functools.partial(register_clever_function, name) + if _get_environment.cache_info().currsize: + raise RuntimeError( + "Clever functions can only be registered before rendering the" \ + "first template") + if name in _CLEVER_FUNCTIONS: + raise ValueError(f"A clever function with name {name!r} was already "\ + "registered") + _CLEVER_FUNCTIONS[name] = func + return func def render_to_string(template, content, formater=None, location=None): """Render a template from the template directory, raise on any errors. @@ -150,6 +172,8 @@ def render( # As we are opening the file with 'w', we are performing the rendering before # calling open() to not accidentally erase the file if rendering fails rendered = render_to_string(template, content, formater, location) + # Remove any trailing character and always add a new line at the end + rendered = rendered.rstrip() + "\n" # Write to file with open(destination, "w") as file: @@ -1050,3 +1074,21 @@ def vyos_defined(value, test_value=None, var_type=None): else: # Valid value and is matching optional argument if provided - return true return True + +@register_clever_function('get_default_port') +def get_default_port(service): + """ + Jinja2 plugin to retrieve common service port number from vyos.defaults + class form a Jinja2 template. This removes the need to hardcode, or pass in + the data using the general dictionary. + + Added to remove code complexity and make it easier to read. + + Example: + {{ get_default_port('certbot_haproxy') }} + """ + from vyos.defaults import internal_ports + if service not in internal_ports: + raise RuntimeError(f'Service "{service}" not found in internal ' \ + 'vyos.defaults.internal_ports dict!') + return internal_ports[service] diff --git a/python/vyos/utils/network.py b/python/vyos/utils/network.py index 2f666f0ee..20b6a3c9e 100644 --- a/python/vyos/utils/network.py +++ b/python/vyos/utils/network.py @@ -256,40 +256,60 @@ def mac2eui64(mac, prefix=None): except: # pylint: disable=bare-except return -def check_port_availability(ipaddress, port, protocol): +def check_port_availability(address: str=None, port: int=0, protocol: str='tcp') -> bool: """ - Check if port is available and not used by any service - Return False if a port is busy or IP address does not exists + Check if given port is available and not used by any service. + Should be used carefully for services that can start listening dynamically, because IP address may be dynamic too + + Args: + address: IPv4 or IPv6 address - if None, checks on all interfaces + port: TCP/UDP port number. + + + Returns: + False if a port is busy or IP address does not exists + True if a port is free and IP address exists """ - from socketserver import TCPServer, UDPServer + import socket from ipaddress import ip_address + # treat None as "any address" + address = address or '::' + # verify arguments try: - ipaddress = ip_address(ipaddress).compressed - except: - raise ValueError(f'The {ipaddress} is not a valid IPv4 or IPv6 address') + address = ip_address(address).compressed + except ValueError: + raise ValueError(f'{address} is not a valid IPv4 or IPv6 address') if port not in range(1, 65536): - raise ValueError(f'The port number {port} is not in the 1-65535 range') + raise ValueError(f'Port {port} is not in range 1-65535') if protocol not in ['tcp', 'udp']: - raise ValueError(f'The protocol {protocol} is not supported. Only tcp and udp are allowed') + raise ValueError(f'{protocol} is not supported - only tcp and udp are allowed') - # check port availability + protocol = socket.SOCK_STREAM if protocol == 'tcp' else socket.SOCK_DGRAM try: - if protocol == 'tcp': - server = TCPServer((ipaddress, port), None, bind_and_activate=True) - if protocol == 'udp': - server = UDPServer((ipaddress, port), None, bind_and_activate=True) - server.server_close() - except Exception as e: - # errno.h: - #define EADDRINUSE 98 /* Address already in use */ - if e.errno == 98: + addr_info = socket.getaddrinfo(address, port, socket.AF_UNSPEC, protocol) + except socket.gaierror as e: + print(f'Invalid address: {address}') + return False + + for family, socktype, proto, canonname, sockaddr in addr_info: + try: + with socket.socket(family, socktype, proto) as s: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind(sockaddr) + # port is free to use + return True + except OSError: + # port is already in use return False - return True + # if we reach this point, no socket was tested and we assume the port is + # already in use - better safe then sorry + return False + def is_listen_port_bind_service(port: int, service: str) -> bool: """Check if listen port bound to expected program name @@ -615,3 +635,19 @@ def is_valid_ipv4_address_or_range(addr: str) -> bool: return ip_network(addr).version == 4 except: return False + +def is_valid_ipv6_address_or_range(addr: str) -> bool: + """ + Validates if the provided address is a valid IPv4, CIDR or IPv4 range + :param addr: address to test + :return: bool: True if provided address is valid + """ + from ipaddress import ip_network + try: + if '-' in addr: # If we are checking a range, validate both address's individually + split = addr.split('-') + return is_valid_ipv6_address_or_range(split[0]) and is_valid_ipv6_address_or_range(split[1]) + else: + return ip_network(addr).version == 6 + except: + return False diff --git a/python/vyos/utils/process.py b/python/vyos/utils/process.py index 121b6e240..21335e6b3 100644 --- a/python/vyos/utils/process.py +++ b/python/vyos/utils/process.py @@ -14,6 +14,7 @@ # License along with this library. If not, see <http://www.gnu.org/licenses/>. import os +import shlex from subprocess import Popen from subprocess import PIPE @@ -21,20 +22,17 @@ from subprocess import STDOUT from subprocess import DEVNULL -def get_wrapper(vrf, netns, auth): - wrapper = '' +def get_wrapper(vrf, netns): + wrapper = None if vrf: - wrapper = f'ip vrf exec {vrf} ' + wrapper = ['ip', 'vrf', 'exec', vrf] elif netns: - wrapper = f'ip netns exec {netns} ' - if auth: - wrapper = f'{auth} {wrapper}' + wrapper = ['ip', 'netns', 'exec', netns] return wrapper def popen(command, flag='', shell=None, input=None, timeout=None, env=None, - stdout=PIPE, stderr=PIPE, decode='utf-8', auth='', vrf=None, - netns=None): + stdout=PIPE, stderr=PIPE, decode='utf-8', vrf=None, netns=None): """ popen is a wrapper helper around subprocess.Popen with it default setting it will return a tuple (out, err) @@ -75,28 +73,33 @@ def popen(command, flag='', shell=None, input=None, timeout=None, env=None, if not debug.enabled(flag): flag = 'command' + use_shell = shell + stdin = None + if shell is None: + use_shell = False + if ' ' in command: + use_shell = True + if env: + use_shell = True + # Must be run as root to execute command in VRF or network namespace + wrapper = get_wrapper(vrf, netns) if vrf or netns: if os.getuid() != 0: raise OSError( 'Permission denied: cannot execute commands in VRF and netns contexts as an unprivileged user' ) - wrapper = get_wrapper(vrf, netns, auth) - command = f'{wrapper} {command}' if wrapper else command + if use_shell: + command = f'{shlex.join(wrapper)} {command}' + else: + if type(command) is not list: + command = [command] + command = wrapper + command - cmd_msg = f"cmd '{command}'" + cmd_msg = f"cmd '{command}'" if use_shell else f"cmd '{shlex.join(command)}'" debug.message(cmd_msg, flag) - use_shell = shell - stdin = None - if shell is None: - use_shell = False - if ' ' in command: - use_shell = True - if env: - use_shell = True - if input: stdin = PIPE input = input.encode() if type(input) is str else input @@ -155,7 +158,7 @@ def run(command, flag='', shell=None, input=None, timeout=None, env=None, def cmd(command, flag='', shell=None, input=None, timeout=None, env=None, stdout=PIPE, stderr=PIPE, decode='utf-8', raising=None, message='', - expect=[0], auth='', vrf=None, netns=None): + expect=[0], vrf=None, netns=None): """ A wrapper around popen, which returns the stdout and will raise the error code of a command @@ -171,12 +174,11 @@ def cmd(command, flag='', shell=None, input=None, timeout=None, env=None, input=input, timeout=timeout, env=env, shell=shell, decode=decode, - auth=auth, vrf=vrf, netns=netns, ) if code not in expect: - wrapper = get_wrapper(vrf, netns, auth='') + wrapper = get_wrapper(vrf, netns) command = f'{wrapper} {command}' feedback = message + '\n' if message else '' feedback += f'failed to run command: {command}\n' diff --git a/smoketest/config-tests/basic-haproxy b/smoketest/config-tests/basic-haproxy new file mode 100644 index 000000000..7755fc4ea --- /dev/null +++ b/smoketest/config-tests/basic-haproxy @@ -0,0 +1,46 @@ +set interfaces dummy dum0 address '172.18.254.203/32' +set interfaces ethernet eth0 duplex 'auto' +set interfaces ethernet eth0 speed 'auto' +set interfaces ethernet eth0 vif 203 address '172.18.203.10/24' +set interfaces ethernet eth1 duplex 'auto' +set interfaces ethernet eth1 speed 'auto' +set interfaces ethernet eth2 duplex 'auto' +set interfaces ethernet eth2 speed 'auto' +set load-balancing haproxy backend webserver logging facility daemon +set load-balancing haproxy backend webserver logging facility user level 'info' +set load-balancing haproxy backend webserver server web01 address '192.0.2.1' +set load-balancing haproxy backend webserver server web01 port '443' +set load-balancing haproxy backend webserver ssl no-verify +set load-balancing haproxy global-parameters logging facility daemon +set load-balancing haproxy global-parameters logging facility user level 'info' +set load-balancing haproxy service frontend backend 'webserver' +set load-balancing haproxy service frontend logging facility daemon +set load-balancing haproxy service frontend logging facility user level 'info' +set load-balancing haproxy service frontend port '443' +set load-balancing haproxy service frontend ssl certificate 'dummy' +set pki certificate dummy certificate 'MIIDsTCCApmgAwIBAgIUegVgO1wIN2v44trXZ+Kb1t48uL0wDQYJKoZIhvcNAQELBQAwVzELMAkGA1UEBhMCR0IxEzARBgNVBAgMClNvbWUtU3RhdGUxEjAQBgNVBAcMCVNvbWUtQ2l0eTENMAsGA1UECgwEVnlPUzEQMA4GA1UEAwwHdnlvcy5pbzAeFw0yNTA1MDUxODIzMTdaFw0yNjA1MDUxODIzMTdaMFcxCzAJBgNVBAYTAkdCMRMwEQYDVQQIDApTb21lLVN0YXRlMRIwEAYDVQQHDAlTb21lLUNpdHkxDTALBgNVBAoMBFZ5T1MxEDAOBgNVBAMMB3Z5b3MuaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEfMAwYLKKVhGlUXr9gkVC7uBi+0O9yyEgd5QzPByePXYw0FrSLWmLRfQuByFDPIVANcEa3FgIXIAeKmxItw7IhFRsG5soSOXXgBxdAH/qzEbWhwzgafnxZKJkmrQr8YA3IFtkFPr2+5s26WdjtwEM0tzIFkq6hmWSX1axUgvYlF2uCxjututMZ6I5JCa0uR3gBRuNONuGPH3Ko9zUEATffv53j9DbYVEM0lfVNewefPoVJmWz+oT0wP/kNx6tREf+aUAF4m+eBsqnggITftW2fyeFnoBPCcPp3HUgSwZhesunqz+YeW6Pk+WWb5vl+2QbMKKtz5qK6dI3q0z9yp4FAgMBAAGjdTBzMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBSr4OYIWkb8UGuQnEFnjSbmvR+4vDAfBgNVHSMEGDAWgBSr4OYIWkb8UGuQnEFnjSbmvR+4vDANBgkqhkiG9w0BAQsFAAOCAQEAUmRWRPGXsvfuRT+53id3EufH1IJAdowrt6yBZsHobvqCXO2+YhG7oG6/UqUYiv5bHN5xEMQyWd7nyrLOUeFo2bpcMIOlpl6AoUIY65Gm2BqQ7FuPxLLO25RdpZ5WkMGX5kJsKY0/PcpamRKNz1khgFcRyxf9WGhCAIjDCWIWs8lkvPN3m75SFCW7MTuzzQOrzvI6nqqcHO4k8hRBznp26WLUW1rQKpNN09nZGOkeNYK5QbzKN/RUmtEHQZhlgLAIr09jUaA4RDLI1SdD6LR5nvpa9RJBTyS/kISF8BXKMgvUbDHN2nP+VUUrut2ZwoU+pxV4RVT2pS760HuYj4+sYQ==' +set pki certificate dummy private key 'MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDEfMAwYLKKVhGlUXr9gkVC7uBi+0O9yyEgd5QzPByePXYw0FrSLWmLRfQuByFDPIVANcEa3FgIXIAeKmxItw7IhFRsG5soSOXXgBxdAH/qzEbWhwzgafnxZKJkmrQr8YA3IFtkFPr2+5s26WdjtwEM0tzIFkq6hmWSX1axUgvYlF2uCxjututMZ6I5JCa0uR3gBRuNONuGPH3Ko9zUEATffv53j9DbYVEM0lfVNewefPoVJmWz+oT0wP/kNx6tREf+aUAF4m+eBsqnggITftW2fyeFnoBPCcPp3HUgSwZhesunqz+YeW6Pk+WWb5vl+2QbMKKtz5qK6dI3q0z9yp4FAgMBAAECgf9plqCMg2pKEWRFl183bqWAm7lnLnsUOfABFNPYa3U+uKQUKZpboTBfzDfZqNak3XNQV0mTAR8pFfoMhQjQU9hUxH7ivjw1RUCHjixCF0vLBkTB34gL7FUbiEIFhR1NW3pCJY73OXOkIZG1Obh6Syb8KubDeu4bTmb90/TnDDAs6OYXJ5yo7ZDZLvLu5a3Dli+H4K5Qb5VJ74o/vtodBo3wmKBgy2Ey6JqF+y7/3HeE66rVhYNft5pURgemWnNYqh3oDTJASqpA/8n90o8ceYPVJugQ7029UiyTp0xgBRXFszgiYPkBlsNWB+9+ospopOmYU0owBykH+RtD/bQ0mwECgYEA4gxPdYbHg/GLihHrkv0t5V0pSEhBeBeTBdWG+P/6K90vXofpp7qISdOeYMGkh8mY+PfZNHksdu67d2ks5on5/dNf5YXWCm+LMMRiSsfOo11NISNdNHS6afqs18Wq1aKawv/rwotfxrakM4Gar692+jgz/l/X+FdOQwmE6uEus8ECgYEA3oW4eGtzGq28TiqyhTHduTkas0ckPWX8ulasyPnLxBKDNNohbXXpFIJIcrnl24QFJw3MJbo+R+OxZHgzPK8r64gIGVa7vLCR2fiU/RFoUa1Jo1pPOqXXWqf/Mvdokm2p0atrjRUX9VhjoFsDLcqTgAmfSCsVSqGucX6ER7Gy60UCgYAvmhoNjNFtFquk6rsqHAjTOTgdUaH/0S8T1nBy9SzQmeaEyKhKuvxCV78NbxnfwnNlUoQ6CZ50eTefINXkwn+TlTSnl/SIBA9SuLheOQ9p1ZcNeG4DQuWStcg6NBUSoghnMg+Ky2Di7slLU2qovpGWhcllMve/A1umwFVuRPdZwQKBgHS7mYYyd/Oq6HnpFDWjbzlXp5Yc3/oFooruJT5ZLHfzbjkvpRGTJW7I2dC1jMuXekx+hHXWOg3keI7IL7jJ/DRW7Ei+o0XdKuY57Y7ErwEJ8vNq0N1nWo4IS2wlNgp61PdVAdrFEgh3EexxUj2XY8FrSs/FKio4nxaS1Dn4EnAxAoGBAKqLvuPpmCMVwlIu57WGxL5d9i7EjIGY65l6HTKQYoHCzE51rkowH1La2fuUYz0IpExq2lcrLbOUtSyhXH7Zlktiz//Gu/P90SUfR/ZGcLeZi+EDyK2OctpnWBjs2Dmfg4D6vxk39yV8AB97pYG073GcJ/P54qRUuEitbpJwH+fB' +set service ntp allow-client address '0.0.0.0/0' +set service ntp allow-client address '::/0' +set service ntp server 172.16.100.10 +set service ntp server 172.16.100.20 +set service ntp server 172.16.110.30 +set service ssh disable-host-validation +set service ssh port '22' +set system config-management commit-revisions '200' +set system conntrack modules ftp +set system conntrack modules h323 +set system conntrack modules nfs +set system conntrack modules pptp +set system conntrack modules sip +set system conntrack modules sqlnet +set system conntrack modules tftp +set system console device ttyS0 speed '115200' +set system host-name 'vyos' +set system login user vyos authentication encrypted-password '$6$O5gJRlDYQpj$MtrCV9lxMnZPMbcxlU7.FI793MImNHznxGoMFgm3Q6QP3vfKJyOSRCt3Ka/GzFQyW1yZS4NS616NLHaIPPFHc0' +set system login user vyos authentication plaintext-password '' +set system name-server '172.16.254.30' +set system option kernel disable-mitigations +set system syslog local facility all level 'info' +set system syslog local facility local7 level 'debug' +set system time-zone 'Europe/Berlin' diff --git a/smoketest/configs/basic-haproxy b/smoketest/configs/basic-haproxy new file mode 100644 index 000000000..83fffbac6 --- /dev/null +++ b/smoketest/configs/basic-haproxy @@ -0,0 +1,153 @@ +interfaces { + dummy dum0 { + address "172.18.254.203/32" + } + ethernet eth0 { + duplex "auto" + speed "auto" + vif 203 { + address "172.18.203.10/24" + } + } + ethernet eth1 { + duplex "auto" + speed "auto" + } + ethernet eth2 { + duplex "auto" + speed "auto" + } +} +load-balancing { + reverse-proxy { + backend webserver { + logging { + facility all { + level "all" + } + facility daemon { + level "all" + } + facility user { + level "info" + } + } + server web01 { + address "192.0.2.1" + port "443" + } + ssl { + no-verify + } + } + global-parameters { + logging { + facility all { + level "all" + } + facility daemon { + level "all" + } + facility user { + level "info" + } + } + } + service frontend { + backend "webserver" + logging { + facility all { + level "all" + } + facility daemon { + level "all" + } + facility user { + level "info" + } + } + port "443" + ssl { + certificate "dummy" + } + } + } +} +pki { + certificate dummy { + certificate "MIIDsTCCApmgAwIBAgIUegVgO1wIN2v44trXZ+Kb1t48uL0wDQYJKoZIhvcNAQELBQAwVzELMAkGA1UEBhMCR0IxEzARBgNVBAgMClNvbWUtU3RhdGUxEjAQBgNVBAcMCVNvbWUtQ2l0eTENMAsGA1UECgwEVnlPUzEQMA4GA1UEAwwHdnlvcy5pbzAeFw0yNTA1MDUxODIzMTdaFw0yNjA1MDUxODIzMTdaMFcxCzAJBgNVBAYTAkdCMRMwEQYDVQQIDApTb21lLVN0YXRlMRIwEAYDVQQHDAlTb21lLUNpdHkxDTALBgNVBAoMBFZ5T1MxEDAOBgNVBAMMB3Z5b3MuaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEfMAwYLKKVhGlUXr9gkVC7uBi+0O9yyEgd5QzPByePXYw0FrSLWmLRfQuByFDPIVANcEa3FgIXIAeKmxItw7IhFRsG5soSOXXgBxdAH/qzEbWhwzgafnxZKJkmrQr8YA3IFtkFPr2+5s26WdjtwEM0tzIFkq6hmWSX1axUgvYlF2uCxjututMZ6I5JCa0uR3gBRuNONuGPH3Ko9zUEATffv53j9DbYVEM0lfVNewefPoVJmWz+oT0wP/kNx6tREf+aUAF4m+eBsqnggITftW2fyeFnoBPCcPp3HUgSwZhesunqz+YeW6Pk+WWb5vl+2QbMKKtz5qK6dI3q0z9yp4FAgMBAAGjdTBzMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBSr4OYIWkb8UGuQnEFnjSbmvR+4vDAfBgNVHSMEGDAWgBSr4OYIWkb8UGuQnEFnjSbmvR+4vDANBgkqhkiG9w0BAQsFAAOCAQEAUmRWRPGXsvfuRT+53id3EufH1IJAdowrt6yBZsHobvqCXO2+YhG7oG6/UqUYiv5bHN5xEMQyWd7nyrLOUeFo2bpcMIOlpl6AoUIY65Gm2BqQ7FuPxLLO25RdpZ5WkMGX5kJsKY0/PcpamRKNz1khgFcRyxf9WGhCAIjDCWIWs8lkvPN3m75SFCW7MTuzzQOrzvI6nqqcHO4k8hRBznp26WLUW1rQKpNN09nZGOkeNYK5QbzKN/RUmtEHQZhlgLAIr09jUaA4RDLI1SdD6LR5nvpa9RJBTyS/kISF8BXKMgvUbDHN2nP+VUUrut2ZwoU+pxV4RVT2pS760HuYj4+sYQ==" + private { + key "MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDEfMAwYLKKVhGlUXr9gkVC7uBi+0O9yyEgd5QzPByePXYw0FrSLWmLRfQuByFDPIVANcEa3FgIXIAeKmxItw7IhFRsG5soSOXXgBxdAH/qzEbWhwzgafnxZKJkmrQr8YA3IFtkFPr2+5s26WdjtwEM0tzIFkq6hmWSX1axUgvYlF2uCxjututMZ6I5JCa0uR3gBRuNONuGPH3Ko9zUEATffv53j9DbYVEM0lfVNewefPoVJmWz+oT0wP/kNx6tREf+aUAF4m+eBsqnggITftW2fyeFnoBPCcPp3HUgSwZhesunqz+YeW6Pk+WWb5vl+2QbMKKtz5qK6dI3q0z9yp4FAgMBAAECgf9plqCMg2pKEWRFl183bqWAm7lnLnsUOfABFNPYa3U+uKQUKZpboTBfzDfZqNak3XNQV0mTAR8pFfoMhQjQU9hUxH7ivjw1RUCHjixCF0vLBkTB34gL7FUbiEIFhR1NW3pCJY73OXOkIZG1Obh6Syb8KubDeu4bTmb90/TnDDAs6OYXJ5yo7ZDZLvLu5a3Dli+H4K5Qb5VJ74o/vtodBo3wmKBgy2Ey6JqF+y7/3HeE66rVhYNft5pURgemWnNYqh3oDTJASqpA/8n90o8ceYPVJugQ7029UiyTp0xgBRXFszgiYPkBlsNWB+9+ospopOmYU0owBykH+RtD/bQ0mwECgYEA4gxPdYbHg/GLihHrkv0t5V0pSEhBeBeTBdWG+P/6K90vXofpp7qISdOeYMGkh8mY+PfZNHksdu67d2ks5on5/dNf5YXWCm+LMMRiSsfOo11NISNdNHS6afqs18Wq1aKawv/rwotfxrakM4Gar692+jgz/l/X+FdOQwmE6uEus8ECgYEA3oW4eGtzGq28TiqyhTHduTkas0ckPWX8ulasyPnLxBKDNNohbXXpFIJIcrnl24QFJw3MJbo+R+OxZHgzPK8r64gIGVa7vLCR2fiU/RFoUa1Jo1pPOqXXWqf/Mvdokm2p0atrjRUX9VhjoFsDLcqTgAmfSCsVSqGucX6ER7Gy60UCgYAvmhoNjNFtFquk6rsqHAjTOTgdUaH/0S8T1nBy9SzQmeaEyKhKuvxCV78NbxnfwnNlUoQ6CZ50eTefINXkwn+TlTSnl/SIBA9SuLheOQ9p1ZcNeG4DQuWStcg6NBUSoghnMg+Ky2Di7slLU2qovpGWhcllMve/A1umwFVuRPdZwQKBgHS7mYYyd/Oq6HnpFDWjbzlXp5Yc3/oFooruJT5ZLHfzbjkvpRGTJW7I2dC1jMuXekx+hHXWOg3keI7IL7jJ/DRW7Ei+o0XdKuY57Y7ErwEJ8vNq0N1nWo4IS2wlNgp61PdVAdrFEgh3EexxUj2XY8FrSs/FKio4nxaS1Dn4EnAxAoGBAKqLvuPpmCMVwlIu57WGxL5d9i7EjIGY65l6HTKQYoHCzE51rkowH1La2fuUYz0IpExq2lcrLbOUtSyhXH7Zlktiz//Gu/P90SUfR/ZGcLeZi+EDyK2OctpnWBjs2Dmfg4D6vxk39yV8AB97pYG073GcJ/P54qRUuEitbpJwH+fB" + } + } +} +service { + ntp { + allow-client { + address "0.0.0.0/0" + address "::/0" + } + server 172.16.100.10 { + } + server 172.16.100.20 { + } + server 172.16.110.30 { + } + } + ssh { + disable-host-validation + port "22" + } +} +system { + config-management { + commit-revisions "200" + } + console { + device ttyS0 { + speed 115200 + } + } + conntrack { + modules { + ftp + h323 + nfs + pptp + sip + sqlnet + tftp + } + } + host-name vyos + login { + user vyos { + authentication { + encrypted-password $6$O5gJRlDYQpj$MtrCV9lxMnZPMbcxlU7.FI793MImNHznxGoMFgm3Q6QP3vfKJyOSRCt3Ka/GzFQyW1yZS4NS616NLHaIPPFHc0 + plaintext-password "" + } + } + } + name-server "172.16.254.30" + option { + kernel { + disable-mitigations + } + } + syslog { + global { + facility all { + level "info" + } + facility local7 { + level "debug" + } + } + } + time-zone "Europe/Berlin" +} + + +// Warning: Do not remove the following line. +// vyos-config-version: "bgp@5:broadcast-relay@1:cluster@2:config-management@1:conntrack@5:conntrack-sync@2:container@2:dhcp-relay@2:dhcp-server@8:dhcpv6-server@1:dns-dynamic@4:dns-forwarding@4:firewall@15:flow-accounting@1:https@6:ids@1:interfaces@32:ipoe-server@3:ipsec@13:isis@3:l2tp@9:lldp@2:mdns@1:monitoring@1:nat@8:nat66@3:ntp@3:openconnect@3:ospf@2:pim@1:policy@8:pppoe-server@10:pptp@5:qos@2:quagga@11:reverse-proxy@1:rip@1:rpki@2:salt@1:snmp@3:ssh@2:sstp@6:system@27:vrf@3:vrrp@4:vyos-accel-ppp@2:wanloadbalance@3:webproxy@2" +// Release version: 1.4.1 diff --git a/smoketest/scripts/cli/test_container.py b/smoketest/scripts/cli/test_container.py index 36622cad1..daad3a909 100755 --- a/smoketest/scripts/cli/test_container.py +++ b/smoketest/scripts/cli/test_container.py @@ -33,11 +33,13 @@ PROCESS_PIDFILE = '/run/vyos-container-{0}.service.pid' busybox_image = 'busybox:stable' busybox_image_path = '/usr/share/vyos/busybox-stable.tar' + def cmd_to_json(command): c = cmd(command + ' --format=json') data = json.loads(c)[0] return data + class TestContainer(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): @@ -73,13 +75,26 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): cont_name = 'c1' self.cli_set(['interfaces', 'ethernet', 'eth0', 'address', '10.0.2.15/24']) - self.cli_set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop', '10.0.2.2']) + self.cli_set( + ['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop', '10.0.2.2'] + ) self.cli_set(['system', 'name-server', '1.1.1.1']) self.cli_set(['system', 'name-server', '8.8.8.8']) self.cli_set(base_path + ['name', cont_name, 'image', busybox_image]) self.cli_set(base_path + ['name', cont_name, 'allow-host-networks']) - self.cli_set(base_path + ['name', cont_name, 'sysctl', 'parameter', 'kernel.msgmax', 'value', '4096']) + self.cli_set( + base_path + + [ + 'name', + cont_name, + 'sysctl', + 'parameter', + 'kernel.msgmax', + 'value', + '4096', + ] + ) # commit changes self.cli_commit() @@ -95,6 +110,14 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): tmp = cmd(f'sudo podman exec -it {cont_name} sysctl kernel.msgmax') self.assertEqual(tmp, 'kernel.msgmax = 4096') + def test_log_driver(self): + self.cli_set(base_path + ['log-driver', 'journald']) + + self.cli_commit() + + tmp = cmd('podman info --format "{{ .Host.LogDriver }}"') + self.assertEqual(tmp, 'journald') + def test_name_server(self): cont_name = 'dns-test' net_name = 'net-test' @@ -105,7 +128,17 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): self.cli_set(base_path + ['name', cont_name, 'image', busybox_image]) self.cli_set(base_path + ['name', cont_name, 'name-server', name_server]) - self.cli_set(base_path + ['name', cont_name, 'network', net_name, 'address', str(ip_interface(prefix).ip + 2)]) + self.cli_set( + base_path + + [ + 'name', + cont_name, + 'network', + net_name, + 'address', + str(ip_interface(prefix).ip + 2), + ] + ) # verify() - name server has no effect when container network has dns enabled with self.assertRaises(ConfigSessionError): @@ -146,7 +179,17 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): for ii in range(1, 6): name = f'{base_name}-{ii}' self.cli_set(base_path + ['name', name, 'image', busybox_image]) - self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix).ip + ii)]) + self.cli_set( + base_path + + [ + 'name', + name, + 'network', + net_name, + 'address', + str(ip_interface(prefix).ip + ii), + ] + ) # verify() - first IP address of a prefix can not be used by a container with self.assertRaises(ConfigSessionError): @@ -163,8 +206,14 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): for ii in range(2, 6): name = f'{base_name}-{ii}' c = cmd_to_json(f'sudo podman container inspect {name}') - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['Gateway'] , str(ip_interface(prefix).ip + 1)) - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['IPAddress'], str(ip_interface(prefix).ip + ii)) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['Gateway'], + str(ip_interface(prefix).ip + 1), + ) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['IPAddress'], + str(ip_interface(prefix).ip + ii), + ) def test_ipv6_network(self): prefix = '2001:db8::/64' @@ -176,7 +225,17 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): for ii in range(1, 6): name = f'{base_name}-{ii}' self.cli_set(base_path + ['name', name, 'image', busybox_image]) - self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix).ip + ii)]) + self.cli_set( + base_path + + [ + 'name', + name, + 'network', + net_name, + 'address', + str(ip_interface(prefix).ip + ii), + ] + ) # verify() - first IP address of a prefix can not be used by a container with self.assertRaises(ConfigSessionError): @@ -193,8 +252,14 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): for ii in range(2, 6): name = f'{base_name}-{ii}' c = cmd_to_json(f'sudo podman container inspect {name}') - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['IPv6Gateway'] , str(ip_interface(prefix).ip + 1)) - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['GlobalIPv6Address'], str(ip_interface(prefix).ip + ii)) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['IPv6Gateway'], + str(ip_interface(prefix).ip + 1), + ) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['GlobalIPv6Address'], + str(ip_interface(prefix).ip + ii), + ) def test_dual_stack_network(self): prefix4 = '192.0.2.0/24' @@ -208,8 +273,28 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): for ii in range(1, 6): name = f'{base_name}-{ii}' self.cli_set(base_path + ['name', name, 'image', busybox_image]) - self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix4).ip + ii)]) - self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix6).ip + ii)]) + self.cli_set( + base_path + + [ + 'name', + name, + 'network', + net_name, + 'address', + str(ip_interface(prefix4).ip + ii), + ] + ) + self.cli_set( + base_path + + [ + 'name', + name, + 'network', + net_name, + 'address', + str(ip_interface(prefix6).ip + ii), + ] + ) # verify() - first IP address of a prefix can not be used by a container with self.assertRaises(ConfigSessionError): @@ -227,10 +312,22 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): for ii in range(2, 6): name = f'{base_name}-{ii}' c = cmd_to_json(f'sudo podman container inspect {name}') - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['IPv6Gateway'] , str(ip_interface(prefix6).ip + 1)) - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['GlobalIPv6Address'], str(ip_interface(prefix6).ip + ii)) - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['Gateway'] , str(ip_interface(prefix4).ip + 1)) - self.assertEqual(c['NetworkSettings']['Networks'][net_name]['IPAddress'] , str(ip_interface(prefix4).ip + ii)) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['IPv6Gateway'], + str(ip_interface(prefix6).ip + 1), + ) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['GlobalIPv6Address'], + str(ip_interface(prefix6).ip + ii), + ) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['Gateway'], + str(ip_interface(prefix4).ip + 1), + ) + self.assertEqual( + c['NetworkSettings']['Networks'][net_name]['IPAddress'], + str(ip_interface(prefix4).ip + ii), + ) def test_no_name_server(self): prefix = '192.0.2.0/24' @@ -242,7 +339,17 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): name = f'{base_name}-2' self.cli_set(base_path + ['name', name, 'image', busybox_image]) - self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix).ip + 2)]) + self.cli_set( + base_path + + [ + 'name', + name, + 'network', + net_name, + 'address', + str(ip_interface(prefix).ip + 2), + ] + ) self.cli_commit() n = cmd_to_json(f'sudo podman network inspect {net_name}') @@ -258,7 +365,17 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): name = f'{base_name}-2' self.cli_set(base_path + ['name', name, 'image', busybox_image]) - self.cli_set(base_path + ['name', name, 'network', net_name, 'address', str(ip_interface(prefix).ip + 2)]) + self.cli_set( + base_path + + [ + 'name', + name, + 'network', + net_name, + 'address', + str(ip_interface(prefix).ip + 2), + ] + ) self.cli_commit() n = cmd_to_json(f'sudo podman network inspect {net_name}') @@ -298,11 +415,14 @@ class TestContainer(VyOSUnitTestSHIM.TestCase): self.cli_commit() # Query API about running containers - tmp = cmd("sudo curl --unix-socket /run/podman/podman.sock -H 'content-type: application/json' -sf http://localhost/containers/json") + tmp = cmd( + "sudo curl --unix-socket /run/podman/podman.sock -H 'content-type: application/json' -sf http://localhost/containers/json" + ) tmp = json.loads(tmp) # We expect the same amount of containers from the API that we started above self.assertEqual(len(container_list), len(tmp)) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_firewall.py b/smoketest/scripts/cli/test_firewall.py index 2829edbfb..851a15f16 100755 --- a/smoketest/scripts/cli/test_firewall.py +++ b/smoketest/scripts/cli/test_firewall.py @@ -642,6 +642,10 @@ class TestFirewall(VyOSUnitTestSHIM.TestCase): self.verify_nftables(nftables_search, 'ip6 vyos_filter') def test_ipv4_global_state(self): + self.cli_set(['firewall', 'flowtable', 'smoketest', 'interface', 'eth0']) + self.cli_set(['firewall', 'flowtable', 'smoketest', 'offload', 'software']) + + self.cli_set(['firewall', 'global-options', 'state-policy', 'offload', 'offload-target', 'smoketest']) self.cli_set(['firewall', 'global-options', 'state-policy', 'established', 'action', 'accept']) self.cli_set(['firewall', 'global-options', 'state-policy', 'related', 'action', 'accept']) self.cli_set(['firewall', 'global-options', 'state-policy', 'invalid', 'action', 'drop']) @@ -651,6 +655,9 @@ class TestFirewall(VyOSUnitTestSHIM.TestCase): nftables_search = [ ['jump VYOS_STATE_POLICY'], ['chain VYOS_STATE_POLICY'], + ['jump VYOS_STATE_POLICY_FORWARD'], + ['chain VYOS_STATE_POLICY_FORWARD'], + ['flow add @VYOS_FLOWTABLE_smoketest'], ['ct state established', 'accept'], ['ct state invalid', 'drop'], ['ct state related', 'accept'] @@ -1288,7 +1295,7 @@ class TestFirewall(VyOSUnitTestSHIM.TestCase): ['R_group01'], ['type ipv4_addr'], ['flags interval'], - ['meta l4proto', 'daddr @R_group01', "ipv4-INP-filter-10"] + ['meta l4proto', 'daddr @R_group01', 'ipv4-INP-filter-10'] ] self.verify_nftables(nftables_search, 'ip vyos_filter') @@ -1307,5 +1314,79 @@ class TestFirewall(VyOSUnitTestSHIM.TestCase): self.cli_discard() + def test_ipv6_remote_group(self): + # Setup base config for test + self.cli_set(['firewall', 'group', 'remote-group', 'group01', 'url', 'http://127.0.0.1:80/list.txt']) + self.cli_set(['firewall', 'group', 'remote-group', 'group01', 'description', 'Example Group 01']) + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'action', 'drop']) + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'protocol', 'tcp']) + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'destination', 'group', 'remote-group', 'group01']) + + self.cli_commit() + + # Test remote-group had been loaded correctly in nft + nftables_search = [ + ['R6_group01'], + ['type ipv6_addr'], + ['flags interval'], + ['meta l4proto', 'daddr @R6_group01', 'ipv6-INP-filter-10'] + ] + self.verify_nftables(nftables_search, 'ip6 vyos_filter') + + # Test remote-group cannot be configured without a URL + self.cli_delete(['firewall', 'group', 'remote-group', 'group01', 'url']) + + with self.assertRaises(ConfigSessionError): + self.cli_commit() + self.cli_discard() + + # Test remote-group cannot be set alongside address in rules + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'destination', 'address', '2001:db8::1']) + + with self.assertRaises(ConfigSessionError): + self.cli_commit() + self.cli_discard() + + + def test_remote_group(self): + # Setup base config for test adding remote group to both ipv4 and ipv6 rules + self.cli_set(['firewall', 'group', 'remote-group', 'group01', 'url', 'http://127.0.0.1:80/list.txt']) + self.cli_set(['firewall', 'group', 'remote-group', 'group01', 'description', 'Example Group 01']) + self.cli_set(['firewall', 'ipv4', 'output', 'filter', 'rule', '10', 'action', 'drop']) + self.cli_set(['firewall', 'ipv4', 'output', 'filter', 'rule', '10', 'protocol', 'tcp']) + self.cli_set(['firewall', 'ipv4', 'output', 'filter', 'rule', '10', 'destination', 'group', 'remote-group', 'group01']) + self.cli_set(['firewall', 'ipv4', 'input', 'filter', 'rule', '10', 'action', 'drop']) + self.cli_set(['firewall', 'ipv4', 'input', 'filter', 'rule', '10', 'protocol', 'tcp']) + self.cli_set(['firewall', 'ipv4', 'input', 'filter', 'rule', '10', 'source', 'group', 'remote-group', 'group01']) + self.cli_set(['firewall', 'ipv6', 'output', 'filter', 'rule', '10', 'action', 'drop']) + self.cli_set(['firewall', 'ipv6', 'output', 'filter', 'rule', '10', 'protocol', 'tcp']) + self.cli_set(['firewall', 'ipv6', 'output', 'filter', 'rule', '10', 'destination', 'group', 'remote-group', 'group01']) + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'action', 'drop']) + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'protocol', 'tcp']) + self.cli_set(['firewall', 'ipv6', 'input', 'filter', 'rule', '10', 'source', 'group', 'remote-group', 'group01']) + + self.cli_commit() + + # Test remote-group had been loaded correctly in nft ip table + nftables_v4_search = [ + ['R_group01'], + ['type ipv4_addr'], + ['flags interval'], + ['meta l4proto', 'daddr @R_group01', 'ipv4-OUT-filter-10'], + ['meta l4proto', 'saddr @R_group01', 'ipv4-INP-filter-10'], + ] + self.verify_nftables(nftables_v4_search, 'ip vyos_filter') + + # Test remote-group had been loaded correctly in nft ip6 table + nftables_v6_search = [ + ['R6_group01'], + ['type ipv6_addr'], + ['flags interval'], + ['meta l4proto', 'daddr @R6_group01', 'ipv6-OUT-filter-10'], + ['meta l4proto', 'saddr @R6_group01', 'ipv6-INP-filter-10'], + ] + self.verify_nftables(nftables_v6_search, 'ip6 vyos_filter') + + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_interfaces_vxlan.py b/smoketest/scripts/cli/test_interfaces_vxlan.py index 694c24e4d..132496124 100755 --- a/smoketest/scripts/cli/test_interfaces_vxlan.py +++ b/smoketest/scripts/cli/test_interfaces_vxlan.py @@ -125,19 +125,17 @@ class VXLANInterfaceTest(BasicInterfaceTest.TestCase): 'source-interface eth0', 'vni 60' ] - params = [] for option in options: opts = option.split() - params.append(opts[0]) - self.cli_set(self._base_path + [ intf ] + opts) + self.cli_set(self._base_path + [intf] + opts) - with self.assertRaises(ConfigSessionError) as cm: + # verify() - Both group and remote cannot be specified + with self.assertRaises(ConfigSessionError): self.cli_commit() - exception = cm.exception - self.assertIn('Both group and remote cannot be specified', str(exception)) - for param in params: - self.cli_delete(self._base_path + [intf, param]) + # Remove blocking CLI option + self.cli_delete(self._base_path + [intf, 'group']) + self.cli_commit() def test_vxlan_external(self): diff --git a/smoketest/scripts/cli/test_load-balancing_haproxy.py b/smoketest/scripts/cli/test_load-balancing_haproxy.py index 077f1974f..833e0a92b 100755 --- a/smoketest/scripts/cli/test_load-balancing_haproxy.py +++ b/smoketest/scripts/cli/test_load-balancing_haproxy.py @@ -14,11 +14,14 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import re +import textwrap import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError +from vyos.template import get_default_port from vyos.utils.process import process_named_running from vyos.utils.file import read_file @@ -131,7 +134,25 @@ ZXLrtgVJR9W020qTurO2f91qfU8646n11hR9ObBB1IYbagOU0Pw1Nrq/FRp/u2tx 7i7xFz2WEiQeSCPaKYOiqM3t """ +haproxy_service_name = 'https_front' +haproxy_backend_name = 'bk-01' +def parse_haproxy_config() -> dict: + config_str = read_file(HAPROXY_CONF) + section_pattern = re.compile(r'^(global|defaults|frontend\s+\S+|backend\s+\S+)', re.MULTILINE) + sections = {} + + matches = list(section_pattern.finditer(config_str)) + + for i, match in enumerate(matches): + section_name = match.group(1).strip() + start = match.end() + end = matches[i + 1].start() if i + 1 < len(matches) else len(config_str) + section_body = config_str[start:end] + dedented_body = textwrap.dedent(section_body).strip() + sections[section_name] = dedented_body + + return sections class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): def tearDown(self): # Check for running process @@ -146,14 +167,14 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertFalse(process_named_running(PROCESS_NAME)) def base_config(self): - self.cli_set(base_path + ['service', 'https_front', 'mode', 'http']) - self.cli_set(base_path + ['service', 'https_front', 'port', '4433']) - self.cli_set(base_path + ['service', 'https_front', 'backend', 'bk-01']) + self.cli_set(base_path + ['service', haproxy_service_name, 'mode', 'http']) + self.cli_set(base_path + ['service', haproxy_service_name, 'port', '4433']) + self.cli_set(base_path + ['service', haproxy_service_name, 'backend', haproxy_backend_name]) - self.cli_set(base_path + ['backend', 'bk-01', 'mode', 'http']) - self.cli_set(base_path + ['backend', 'bk-01', 'server', 'bk-01', 'address', '192.0.2.11']) - self.cli_set(base_path + ['backend', 'bk-01', 'server', 'bk-01', 'port', '9090']) - self.cli_set(base_path + ['backend', 'bk-01', 'server', 'bk-01', 'send-proxy']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'mode', 'http']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'server', haproxy_backend_name, 'address', '192.0.2.11']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'server', haproxy_backend_name, 'port', '9090']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'server', haproxy_backend_name, 'send-proxy']) self.cli_set(base_path + ['global-parameters', 'max-connections', '1000']) @@ -167,15 +188,15 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.cli_set(['pki', 'certificate', 'smoketest', 'certificate', valid_cert.replace('\n','')]) self.cli_set(['pki', 'certificate', 'smoketest', 'private', 'key', valid_cert_private_key.replace('\n','')]) - def test_01_lb_reverse_proxy_domain(self): + def test_reverse_proxy_domain(self): domains_bk_first = ['n1.example.com', 'n2.example.com', 'n3.example.com'] domain_bk_second = 'n5.example.com' - frontend = 'https_front' + frontend = 'vyos_smoketest' front_port = '4433' bk_server_first = '192.0.2.11' bk_server_second = '192.0.2.12' - bk_first_name = 'bk-01' - bk_second_name = 'bk-02' + bk_first_name = 'vyosbk-01' + bk_second_name = 'vyosbk-02' bk_server_port = '9090' mode = 'http' rule_ten = '10' @@ -241,9 +262,9 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn(f'server {bk_second_name} {bk_server_second}:{bk_server_port}', config) self.assertIn(f'server {bk_second_name} {bk_server_second}:{bk_server_port} backup', config) - def test_02_lb_reverse_proxy_cert_not_exists(self): + def test_reverse_proxy_cert_not_exists(self): self.base_config() - self.cli_set(base_path + ['service', 'https_front', 'ssl', 'certificate', 'cert']) + self.cli_set(base_path + ['service', haproxy_service_name, 'ssl', 'certificate', 'cert']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() @@ -253,19 +274,19 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.configure_pki() self.base_config() - self.cli_set(base_path + ['service', 'https_front', 'ssl', 'certificate', 'cert']) + self.cli_set(base_path + ['service', haproxy_service_name, 'ssl', 'certificate', 'cert']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() # self.assertIn('\nCertificate "cert" does not exist\n', str(e.exception)) - self.cli_delete(base_path + ['service', 'https_front', 'ssl', 'certificate', 'cert']) - self.cli_set(base_path + ['service', 'https_front', 'ssl', 'certificate', 'smoketest']) + self.cli_delete(base_path + ['service', haproxy_service_name, 'ssl', 'certificate', 'cert']) + self.cli_set(base_path + ['service', haproxy_service_name, 'ssl', 'certificate', 'smoketest']) self.cli_commit() - def test_03_lb_reverse_proxy_ca_not_exists(self): + def test_reverse_proxy_ca_not_exists(self): self.base_config() - self.cli_set(base_path + ['backend', 'bk-01', 'ssl', 'ca-certificate', 'ca-test']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'ssl', 'ca-certificate', 'ca-test']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() @@ -275,40 +296,40 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.configure_pki() self.base_config() - self.cli_set(base_path + ['backend', 'bk-01', 'ssl', 'ca-certificate', 'ca-test']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'ssl', 'ca-certificate', 'ca-test']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() # self.assertIn('\nCA certificate "ca-test" does not exist\n', str(e.exception)) - self.cli_delete(base_path + ['backend', 'bk-01', 'ssl', 'ca-certificate', 'ca-test']) - self.cli_set(base_path + ['backend', 'bk-01', 'ssl', 'ca-certificate', 'smoketest']) + self.cli_delete(base_path + ['backend', haproxy_backend_name, 'ssl', 'ca-certificate', 'ca-test']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'ssl', 'ca-certificate', 'smoketest']) self.cli_commit() - def test_04_lb_reverse_proxy_backend_ssl_no_verify(self): + def test_reverse_proxy_backend_ssl_no_verify(self): # Setup base self.configure_pki() self.base_config() # Set no-verify option - self.cli_set(base_path + ['backend', 'bk-01', 'ssl', 'no-verify']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'ssl', 'no-verify']) self.cli_commit() # Test no-verify option config = read_file(HAPROXY_CONF) - self.assertIn('server bk-01 192.0.2.11:9090 send-proxy ssl verify none', config) + self.assertIn(f'server {haproxy_backend_name} 192.0.2.11:9090 send-proxy ssl verify none', config) # Test setting ca-certificate alongside no-verify option fails, to test config validation - self.cli_set(base_path + ['backend', 'bk-01', 'ssl', 'ca-certificate', 'smoketest']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'ssl', 'ca-certificate', 'smoketest']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() - def test_05_lb_reverse_proxy_backend_http_check(self): + def test_reverse_proxy_backend_http_check(self): # Setup base self.base_config() # Set http-check - self.cli_set(base_path + ['backend', 'bk-01', 'http-check', 'method', 'get']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'http-check', 'method', 'get']) self.cli_commit() # Test http-check @@ -317,8 +338,8 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('http-check send meth GET', config) # Set http-check with uri and status - self.cli_set(base_path + ['backend', 'bk-01', 'http-check', 'uri', '/health']) - self.cli_set(base_path + ['backend', 'bk-01', 'http-check', 'expect', 'status', '200']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'http-check', 'uri', '/health']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'http-check', 'expect', 'status', '200']) self.cli_commit() # Test http-check with uri and status @@ -328,8 +349,8 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('http-check expect status 200', config) # Set http-check with string - self.cli_delete(base_path + ['backend', 'bk-01', 'http-check', 'expect', 'status', '200']) - self.cli_set(base_path + ['backend', 'bk-01', 'http-check', 'expect', 'string', 'success']) + self.cli_delete(base_path + ['backend', haproxy_backend_name, 'http-check', 'expect', 'status', '200']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'http-check', 'expect', 'string', 'success']) self.cli_commit() # Test http-check with string @@ -339,11 +360,11 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('http-check expect string success', config) # Test configuring both http-check & health-check fails validation script - self.cli_set(base_path + ['backend', 'bk-01', 'health-check', 'ldap']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'health-check', 'ldap']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() - def test_06_lb_reverse_proxy_tcp_mode(self): + def test_reverse_proxy_tcp_mode(self): frontend = 'tcp_8443' mode = 'tcp' front_port = '8433' @@ -390,27 +411,27 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn(f'mode {mode}', config) self.assertIn(f'server {bk_name} {bk_server}:{bk_server_port}', config) - def test_07_lb_reverse_proxy_http_response_headers(self): + def test_reverse_proxy_http_response_headers(self): # Setup base self.configure_pki() self.base_config() # Set example headers in both frontend and backend - self.cli_set(base_path + ['service', 'https_front', 'http-response-headers', 'Cache-Control', 'value', 'max-age=604800']) - self.cli_set(base_path + ['backend', 'bk-01', 'http-response-headers', 'Proxy-Backend-ID', 'value', 'bk-01']) + self.cli_set(base_path + ['service', haproxy_service_name, 'http-response-headers', 'Cache-Control', 'value', 'max-age=604800']) + self.cli_set(base_path + ['backend', haproxy_backend_name, 'http-response-headers', 'Proxy-Backend-ID', 'value', haproxy_backend_name]) self.cli_commit() # Test headers are present in generated configuration file config = read_file(HAPROXY_CONF) self.assertIn('http-response set-header Cache-Control \'max-age=604800\'', config) - self.assertIn('http-response set-header Proxy-Backend-ID \'bk-01\'', config) + self.assertIn(f'http-response set-header Proxy-Backend-ID \'{haproxy_backend_name}\'', config) # Test setting alongside modes other than http is blocked by validation conditions - self.cli_set(base_path + ['service', 'https_front', 'mode', 'tcp']) + self.cli_set(base_path + ['service', haproxy_service_name, 'mode', 'tcp']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() - def test_08_lb_reverse_proxy_tcp_health_checks(self): + def test_reverse_proxy_tcp_health_checks(self): # Setup PKI self.configure_pki() @@ -458,7 +479,7 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): config = read_file(HAPROXY_CONF) self.assertIn(f'option smtpchk', config) - def test_09_lb_reverse_proxy_logging(self): + def test_reverse_proxy_logging(self): # Setup base self.base_config() self.cli_commit() @@ -477,7 +498,7 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('log /dev/log local2 warning', config) # Test backend logging options - backend_path = base_path + ['backend', 'bk-01'] + backend_path = base_path + ['backend', haproxy_backend_name] self.cli_set(backend_path + ['logging', 'facility', 'local3', 'level', 'debug']) self.cli_set(backend_path + ['logging', 'facility', 'local4', 'level', 'info']) self.cli_commit() @@ -488,7 +509,7 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('log /dev/log local4 info', config) # Test service logging options - service_path = base_path + ['service', 'https_front'] + service_path = base_path + ['service', haproxy_service_name] self.cli_set(service_path + ['logging', 'facility', 'local5', 'level', 'notice']) self.cli_set(service_path + ['logging', 'facility', 'local6', 'level', 'crit']) self.cli_commit() @@ -498,16 +519,17 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('log /dev/log local5 notice', config) self.assertIn('log /dev/log local6 crit', config) - def test_10_lb_reverse_proxy_http_compression(self): + def test_reverse_proxy_http_compression(self): # Setup base self.configure_pki() self.base_config() # Configure compression in frontend - self.cli_set(base_path + ['service', 'https_front', 'http-compression', 'algorithm', 'gzip']) - self.cli_set(base_path + ['service', 'https_front', 'http-compression', 'mime-type', 'text/html']) - self.cli_set(base_path + ['service', 'https_front', 'http-compression', 'mime-type', 'text/javascript']) - self.cli_set(base_path + ['service', 'https_front', 'http-compression', 'mime-type', 'text/plain']) + http_comp_path = base_path + ['service', haproxy_service_name, 'http-compression'] + self.cli_set(http_comp_path + ['algorithm', 'gzip']) + self.cli_set(http_comp_path + ['mime-type', 'text/html']) + self.cli_set(http_comp_path + ['mime-type', 'text/javascript']) + self.cli_set(http_comp_path + ['mime-type', 'text/plain']) self.cli_commit() # Test compression is present in generated configuration file @@ -517,11 +539,11 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.assertIn('compression type text/html text/javascript text/plain', config) # Test setting compression without specifying any mime-types fails verification - self.cli_delete(base_path + ['service', 'https_front', 'http-compression', 'mime-type']) + self.cli_delete(base_path + ['service', haproxy_service_name, 'http-compression', 'mime-type']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() - def test_11_lb_haproxy_timeout(self): + def test_reverse_proxy_timeout(self): t_default_check = '5' t_default_client = '50' t_default_connect = '10' @@ -551,7 +573,7 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): self.cli_set(base_path + ['timeout', 'client', t_client]) self.cli_set(base_path + ['timeout', 'connect', t_connect]) self.cli_set(base_path + ['timeout', 'server', t_server]) - self.cli_set(base_path + ['service', 'https_front', 'timeout', 'client', t_front_client]) + self.cli_set(base_path + ['service', haproxy_service_name, 'timeout', 'client', t_front_client]) self.cli_commit() @@ -569,5 +591,25 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase): for config_entry in config_entries: self.assertIn(config_entry, config) + def test_reverse_proxy_http_redirect(self): + self.base_config() + self.cli_set(base_path + ['service', haproxy_service_name, 'redirect-http-to-https']) + + self.cli_commit() + + config = parse_haproxy_config() + frontend_name = f'frontend {haproxy_service_name}-http' + self.assertIn(frontend_name, config.keys()) + self.assertIn('mode http', config[frontend_name]) + self.assertIn('bind [::]:80 v4v6', config[frontend_name]) + self.assertIn('acl acme_acl path_beg /.well-known/acme-challenge/', config[frontend_name]) + self.assertIn('use_backend buildin_acme_certbot if acme_acl', config[frontend_name]) + self.assertIn('redirect scheme https code 301 if !acme_acl', config[frontend_name]) + + backend_name = 'backend buildin_acme_certbot' + self.assertIn(backend_name, config.keys()) + port = get_default_port('certbot_haproxy') + self.assertIn(f'server localhost 127.0.0.1:{port}', config[backend_name]) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_nat66.py b/smoketest/scripts/cli/test_nat66.py index 52ad8e3ef..d4b5d6aa4 100755 --- a/smoketest/scripts/cli/test_nat66.py +++ b/smoketest/scripts/cli/test_nat66.py @@ -227,6 +227,35 @@ class TestNAT66(VyOSUnitTestSHIM.TestCase): self.verify_nftables(nftables_search, 'ip6 vyos_nat') + def test_source_nat66_network_group(self): + address_group = 'smoketest_addr' + address_group_member = 'fc00::1' + network_group = 'smoketest_net' + network_group_member = 'fc00::/64' + translation_prefix = 'fc01::/64' + + self.cli_set(['firewall', 'group', 'ipv6-address-group', address_group, 'address', address_group_member]) + self.cli_set(['firewall', 'group', 'ipv6-network-group', network_group, 'network', network_group_member]) + + self.cli_set(src_path + ['rule', '1', 'destination', 'group', 'address-group', address_group]) + self.cli_set(src_path + ['rule', '1', 'translation', 'address', translation_prefix]) + + self.cli_set(src_path + ['rule', '2', 'destination', 'group', 'network-group', network_group]) + self.cli_set(src_path + ['rule', '2', 'translation', 'address', translation_prefix]) + + self.cli_commit() + + nftables_search = [ + [f'set A6_{address_group}'], + [f'elements = {{ {address_group_member} }}'], + [f'set N6_{network_group}'], + [f'elements = {{ {network_group_member} }}'], + ['ip6 daddr', f'@A6_{address_group}', 'snat prefix to fc01::/64'], + ['ip6 daddr', f'@N6_{network_group}', 'snat prefix to fc01::/64'] + ] + + self.verify_nftables(nftables_search, 'ip6 vyos_nat') + def test_nat66_no_rules(self): # T3206: deleting all rules but keep the direction 'destination' or # 'source' resulteds in KeyError: 'rule'. diff --git a/smoketest/scripts/cli/test_service_https.py b/smoketest/scripts/cli/test_service_https.py index 04c4a2e51..b4fe35d81 100755 --- a/smoketest/scripts/cli/test_service_https.py +++ b/smoketest/scripts/cli/test_service_https.py @@ -16,6 +16,7 @@ import unittest import json +import psutil from requests import request from urllib3.exceptions import InsecureRequestWarning @@ -113,6 +114,29 @@ class TestHTTPSService(VyOSUnitTestSHIM.TestCase): # Check for stopped process self.assertFalse(process_named_running(PROCESS_NAME)) + def test_listen_address(self): + test_prefix = ['192.0.2.1/26', '2001:db8:1::ffff/64'] + test_addr = [ i.split('/')[0] for i in test_prefix ] + for i, addr in enumerate(test_prefix): + self.cli_set(['interfaces', 'dummy', f'dum{i}', 'address', addr]) + + key = 'MySuperSecretVyOS' + self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key]) + # commit base config first, for testing update of listen-address + self.cli_commit() + + for addr in test_addr: + self.cli_set(base_path + ['listen-address', addr]) + self.cli_commit() + + res = set() + t = psutil.net_connections(kind="tcp") + for c in t: + if c.laddr.port == 443: + res.add(c.laddr.ip) + + self.assertEqual(res, set(test_addr)) + def test_certificate(self): cert_name = 'test_https' dh_name = 'dh-test' diff --git a/smoketest/scripts/cli/test_service_router-advert.py b/smoketest/scripts/cli/test_service_router-advert.py index 83342bc72..33ce93ef4 100755 --- a/smoketest/scripts/cli/test_service_router-advert.py +++ b/smoketest/scripts/cli/test_service_router-advert.py @@ -256,12 +256,17 @@ class TestServiceRADVD(VyOSUnitTestSHIM.TestCase): isp_prefix = '2001:db8::/64' ula_prefixes = ['fd00::/64', 'fd01::/64'] + # configure wildcard prefix + self.cli_set(base_path + ['prefix', '::/64']) + + # test auto-ignore CLI behaviors with no prefix overrides + # set auto-ignore for all three prefixes self.cli_set(base_path + ['auto-ignore', isp_prefix]) for ula_prefix in ula_prefixes: self.cli_set(base_path + ['auto-ignore', ula_prefix]) - # commit changes + # commit and reload config self.cli_commit() config = read_file(RADVD_CONF) @@ -277,6 +282,7 @@ class TestServiceRADVD(VyOSUnitTestSHIM.TestCase): # remove a prefix and verify it's gone self.cli_delete(base_path + ['auto-ignore', ula_prefixes[1]]) + # commit and reload config self.cli_commit() config = read_file(RADVD_CONF) @@ -290,9 +296,72 @@ class TestServiceRADVD(VyOSUnitTestSHIM.TestCase): self.cli_delete(base_path + ['auto-ignore', ula_prefixes[0]]) self.cli_delete(base_path + ['auto-ignore', isp_prefix]) + # commit and reload config + self.cli_commit() + config = read_file(RADVD_CONF) + + tmp = f'autoignoreprefixes' + ' {' + self.assertNotIn(tmp, config) + + # test wildcard prefix overrides, with and without auto-ignore CLI configuration + newline = '\n' + left_curly = '{' + right_curly = '}' + + # override ULA prefixes + for ula_prefix in ula_prefixes: + self.cli_set(base_path + ['prefix', ula_prefix]) + + # commit and reload config + self.cli_commit() + config = read_file(RADVD_CONF) + + # ensure autoignoreprefixes block is generated in config file with both prefixes + tmp = f'autoignoreprefixes' + f' {left_curly}{newline} {ula_prefixes[0]};{newline} {ula_prefixes[1]};{newline} {right_curly};' + self.assertIn(tmp, config) + + # remove a ULA prefix and ensure there is only one prefix in the config block + self.cli_delete(base_path + ['prefix', ula_prefixes[0]]) + + # commit and reload config + self.cli_commit() + config = read_file(RADVD_CONF) + + # ensure autoignoreprefixes block is generated in config file with only one prefix + tmp = f'autoignoreprefixes' + f' {left_curly}{newline} {ula_prefixes[1]};{newline} {right_curly};' + self.assertIn(tmp, config) + + # exclude a prefix with auto-ignore CLI syntax + self.cli_set(base_path + ['auto-ignore', ula_prefixes[0]]) + + # commit and reload config + self.cli_commit() + config = read_file(RADVD_CONF) + + # verify that both prefixes appear in config block once again + tmp = f'autoignoreprefixes' + f' {left_curly}{newline} {ula_prefixes[0]};{newline} {ula_prefixes[1]};{newline} {right_curly};' + self.assertIn(tmp, config) + + # override first ULA prefix again + # first ULA is auto-ignored in CLI, it must appear only once in config + self.cli_set(base_path + ['prefix', ula_prefixes[0]]) + + # commit and reload config + self.cli_commit() + config = read_file(RADVD_CONF) + + # verify that both prefixes appear uniquely + tmp = f'autoignoreprefixes' + f' {left_curly}{newline} {ula_prefixes[0]};{newline} {ula_prefixes[1]};{newline} {right_curly};' + self.assertIn(tmp, config) + + # remove wildcard prefix and verify config block is gone + self.cli_delete(base_path + ['prefix', '::/64']) + + # commit and reload config self.cli_commit() config = read_file(RADVD_CONF) + # verify config block is gone tmp = f'autoignoreprefixes' + ' {' self.assertNotIn(tmp, config) diff --git a/smoketest/scripts/cli/test_system_option.py b/smoketest/scripts/cli/test_system_option.py index f3112cf0b..c7f8c1f3e 100755 --- a/smoketest/scripts/cli/test_system_option.py +++ b/smoketest/scripts/cli/test_system_option.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2024 VyOS maintainers and contributors +# Copyright (C) 2024-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -16,14 +16,18 @@ import os import unittest + from base_vyostest_shim import VyOSUnitTestSHIM + +from vyos.configsession import ConfigSessionError +from vyos.utils.cpu import get_cpus from vyos.utils.file import read_file from vyos.utils.process import is_systemd_service_active from vyos.utils.system import sysctl_read +from vyos.system import image base_path = ['system', 'option'] - class TestSystemOption(VyOSUnitTestSHIM.TestCase): def tearDown(self): self.cli_delete(base_path) @@ -96,6 +100,60 @@ class TestSystemOption(VyOSUnitTestSHIM.TestCase): self.cli_commit() self.assertFalse(os.path.exists(ssh_client_opt_file)) + def test_kernel_options(self): + amd_pstate_mode = 'active' + isolate_cpus = '1,2,3' + nohz_full = '2' + rcu_no_cbs = '1,2,4-5' + default_hp_size = '2M' + hp_size_1g = '1G' + hp_size_2m = '2M' + hp_count_1g = '2' + hp_count_2m = '512' + + self.cli_set(['system', 'option', 'kernel', 'cpu', 'disable-nmi-watchdog']) + self.cli_set(['system', 'option', 'kernel', 'cpu', 'isolate-cpus', isolate_cpus]) + self.cli_set(['system', 'option', 'kernel', 'cpu', 'nohz-full', nohz_full]) + self.cli_set(['system', 'option', 'kernel', 'cpu', 'rcu-no-cbs', rcu_no_cbs]) + self.cli_set(['system', 'option', 'kernel', 'disable-hpet']) + self.cli_set(['system', 'option', 'kernel', 'disable-mce']) + self.cli_set(['system', 'option', 'kernel', 'disable-mitigations']) + self.cli_set(['system', 'option', 'kernel', 'disable-power-saving']) + self.cli_set(['system', 'option', 'kernel', 'disable-softlockup']) + self.cli_set(['system', 'option', 'kernel', 'memory', 'disable-numa-balancing']) + self.cli_set(['system', 'option', 'kernel', 'memory', 'default-hugepage-size', default_hp_size]) + self.cli_set(['system', 'option', 'kernel', 'memory', 'hugepage-size', hp_size_1g, 'hugepage-count', hp_count_1g]) + self.cli_set(['system', 'option', 'kernel', 'memory', 'hugepage-size', hp_size_2m, 'hugepage-count', hp_count_2m]) + self.cli_set(['system', 'option', 'kernel', 'quiet']) + + self.cli_set(['system', 'option', 'kernel', 'amd-pstate-driver', amd_pstate_mode]) + cpu_vendor = get_cpus()[0]['vendor_id'] + if cpu_vendor != 'AuthenticAMD': + with self.assertRaises(ConfigSessionError): + self.cli_commit() + self.cli_delete(['system', 'option', 'kernel', 'amd-pstate-driver']) + + self.cli_commit() + + # Read GRUB config file for current running image + tmp = read_file(f'{image.grub.GRUB_DIR_VYOS_VERS}/{image.get_running_image()}.cfg') + self.assertIn(' mitigations=off', tmp) + self.assertIn(' intel_idle.max_cstate=0 processor.max_cstate=1', tmp) + self.assertIn(' quiet', tmp) + self.assertIn(' nmi_watchdog=0', tmp) + self.assertIn(' hpet=disable', tmp) + self.assertIn(' mce=off', tmp) + self.assertIn(' nosoftlockup', tmp) + self.assertIn(f' isolcpus={isolate_cpus}', tmp) + self.assertIn(f' nohz_full={nohz_full}', tmp) + self.assertIn(f' rcu_nocbs={rcu_no_cbs}', tmp) + self.assertIn(f' default_hugepagesz={default_hp_size}', tmp) + self.assertIn(f' hugepagesz={hp_size_1g} hugepages={hp_count_1g}', tmp) + self.assertIn(f' hugepagesz={hp_size_2m} hugepages={hp_count_2m}', tmp) + self.assertIn(' numa_balancing=disable', tmp) + + if cpu_vendor == 'AuthenticAMD': + self.assertIn(f' initcall_blacklist=acpi_cpufreq_init amd_pstate={amd_pstate_mode}', tmp) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/system/test_kernel_options.py b/smoketest/scripts/system/test_kernel_options.py index b51b0be1d..84e9c145d 100755 --- a/smoketest/scripts/system/test_kernel_options.py +++ b/smoketest/scripts/system/test_kernel_options.py @@ -134,5 +134,14 @@ class TestKernelModules(unittest.TestCase): tmp = re.findall(f'{option}=y', self._config_data) self.assertTrue(tmp) + def test_amd_pstate(self): + # AMD pstate driver required as we have "set system option kernel amd-pstate-driver" + for option in ['CONFIG_X86_AMD_PSTATE']: + tmp = re.findall(f'{option}=y', self._config_data) + self.assertTrue(tmp) + for option in ['CONFIG_X86_AMD_PSTATE_DEFAULT_MODE']: + tmp = re.findall(f'{option}=3', self._config_data) + self.assertTrue(tmp) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py index 18d660a4e..94882fc14 100755 --- a/src/conf_mode/container.py +++ b/src/conf_mode/container.py @@ -324,6 +324,11 @@ def generate_run_arguments(name, container_config): cap = cap.upper().replace('-', '_') capabilities += f' --cap-add={cap}' + # Grant root capabilities to the container + privileged = '' + if 'privileged' in container_config: + privileged = '--privileged' + # Add a host device to the container /dev/x:/dev/x device = '' if 'device' in container_config: @@ -402,7 +407,7 @@ def generate_run_arguments(name, container_config): for ns in container_config['name_server']: name_server += f'--dns {ns}' - container_base_cmd = f'--detach --interactive --tty --replace {capabilities} --cpus {cpu_quota} {sysctl_opt} ' \ + container_base_cmd = f'--detach --interactive --tty --replace {capabilities} {privileged} --cpus {cpu_quota} {sysctl_opt} ' \ f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \ f'--name {name} {hostname} {device} {port} {name_server} {volume} {tmpfs} {env_opt} {label} {uid} {host_pid}' diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py index 72f2d39f4..274ca2ce6 100755 --- a/src/conf_mode/firewall.py +++ b/src/conf_mode/firewall.py @@ -205,7 +205,7 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): if 'jump' not in rule_conf['action']: raise ConfigError('jump-target defined, but action jump needed and it is not defined') target = rule_conf['jump_target'] - if hook != 'name': # This is a bit clumsy, but consolidates a chunk of code. + if hook != 'name': # This is a bit clumsy, but consolidates a chunk of code. verify_jump_target(firewall, hook, target, family, recursive=True) else: verify_jump_target(firewall, hook, target, family, recursive=False) @@ -268,12 +268,12 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): if dict_search_args(rule_conf, 'gre', 'flags', 'checksum') is None: # There is no builtin match in nftables for the GRE key, so we need to do a raw lookup. - # The offset of the key within the packet shifts depending on the C-flag. - # 99% of the time, nobody will have checksums enabled - it's usually a manual config option. - # We can either assume it is unset unless otherwise directed + # The offset of the key within the packet shifts depending on the C-flag. + # 99% of the time, nobody will have checksums enabled - it's usually a manual config option. + # We can either assume it is unset unless otherwise directed # (confusing, requires doco to explain why it doesn't work sometimes) - # or, demand an explicit selection to be made for this specific match rule. - # This check enforces the latter. The user is free to create rules for both cases. + # or, demand an explicit selection to be made for this specific match rule. + # This check enforces the latter. The user is free to create rules for both cases. raise ConfigError('Matching GRE tunnel key requires an explicit checksum flag match. For most cases, use "gre flags checksum unset"') if dict_search_args(rule_conf, 'gre', 'flags', 'key', 'unset') is not None: @@ -286,7 +286,7 @@ def verify_rule(firewall, family, hook, priority, rule_id, rule_conf): if gre_inner_value < 0 or gre_inner_value > 65535: raise ConfigError('inner-proto outside valid ethertype range 0-65535') except ValueError: - pass # Symbolic constant, pre-validated before reaching here. + pass # Symbolic constant, pre-validated before reaching here. tcp_flags = dict_search_args(rule_conf, 'tcp', 'flags') if tcp_flags: @@ -437,6 +437,16 @@ def verify(firewall): for ifname in interfaces: verify_hardware_offload(ifname) + if 'offload' in firewall.get('global_options', {}).get('state_policy', {}): + offload_path = firewall['global_options']['state_policy']['offload'] + if 'offload_target' not in offload_path: + raise ConfigError('offload-target must be specified') + + offload_target = offload_path['offload_target'] + + if not dict_search_args(firewall, 'flowtable', offload_target): + raise ConfigError(f'Invalid offload-target. Flowtable "{offload_target}" does not exist on the system') + if 'group' in firewall: for group_type in nested_group_types: if group_type in firewall['group']: diff --git a/src/conf_mode/interfaces_wireguard.py b/src/conf_mode/interfaces_wireguard.py index 192937dba..3ca6ecdca 100755 --- a/src/conf_mode/interfaces_wireguard.py +++ b/src/conf_mode/interfaces_wireguard.py @@ -97,7 +97,7 @@ def verify(wireguard): if 'port' in wireguard and 'port_changed' in wireguard: listen_port = int(wireguard['port']) - if check_port_availability('0.0.0.0', listen_port, 'udp') is not True: + if check_port_availability(None, listen_port, protocol='udp') is not True: raise ConfigError(f'UDP port {listen_port} is busy or unavailable and ' 'cannot be used for the interface!') diff --git a/src/conf_mode/load-balancing_haproxy.py b/src/conf_mode/load-balancing_haproxy.py index 5fd1beec9..504a90596 100644 --- a/src/conf_mode/load-balancing_haproxy.py +++ b/src/conf_mode/load-balancing_haproxy.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2023-2024 VyOS maintainers and contributors +# Copyright (C) 2023-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -19,6 +19,7 @@ import os from sys import exit from shutil import rmtree +from vyos.defaults import systemd_services from vyos.config import Config from vyos.configverify import verify_pki_certificate from vyos.configverify import verify_pki_ca_certificate @@ -39,7 +40,6 @@ airbag.enable() load_balancing_dir = '/run/haproxy' load_balancing_conf_file = f'{load_balancing_dir}/haproxy.cfg' -systemd_service = 'haproxy.service' systemd_override = '/run/systemd/system/haproxy.service.d/10-override.conf' def get_config(config=None): @@ -65,18 +65,18 @@ def verify(lb): return None if 'backend' not in lb or 'service' not in lb: - raise ConfigError(f'"service" and "backend" must be configured!') + raise ConfigError('Both "service" and "backend" must be configured!') for front, front_config in lb['service'].items(): if 'port' not in front_config: raise ConfigError(f'"{front} service port" must be configured!') # Check if bind address:port are used by another service - tmp_address = front_config.get('address', '0.0.0.0') + tmp_address = front_config.get('address', None) tmp_port = front_config['port'] if check_port_availability(tmp_address, int(tmp_port), 'tcp') is not True and \ not is_listen_port_bind_service(int(tmp_port), 'haproxy'): - raise ConfigError(f'"TCP" port "{tmp_port}" is used by another service') + raise ConfigError(f'TCP port "{tmp_port}" is used by another service') if 'http_compression' in front_config: if front_config['mode'] != 'http': @@ -85,16 +85,19 @@ def verify(lb): raise ConfigError(f'service {front} must have at least one mime-type configured to use' f'http_compression!') + for cert in dict_search('ssl.certificate', front_config) or []: + verify_pki_certificate(lb, cert) + for back, back_config in lb['backend'].items(): if 'http_check' in back_config: http_check = back_config['http_check'] if 'expect' in http_check and 'status' in http_check['expect'] and 'string' in http_check['expect']: - raise ConfigError(f'"expect status" and "expect string" can not be configured together!') + raise ConfigError('"expect status" and "expect string" can not be configured together!') if 'health_check' in back_config: if back_config['mode'] != 'tcp': raise ConfigError(f'backend "{back}" can only be configured with {back_config["health_check"]} ' + - f'health-check whilst in TCP mode!') + 'health-check whilst in TCP mode!') if 'http_check' in back_config: raise ConfigError(f'backend "{back}" cannot be configured with both http-check and health-check!') @@ -112,20 +115,15 @@ def verify(lb): if {'no_verify', 'ca_certificate'} <= set(back_config['ssl']): raise ConfigError(f'backend {back} cannot have both ssl options no-verify and ca-certificate set!') + tmp = dict_search('ssl.ca_certificate', back_config) + if tmp: verify_pki_ca_certificate(lb, tmp) + # Check if http-response-headers are configured in any frontend/backend where mode != http for group in ['service', 'backend']: for config_name, config in lb[group].items(): if 'http_response_headers' in config and config['mode'] != 'http': raise ConfigError(f'{group} {config_name} must be set to http mode to use http_response_headers!') - for front, front_config in lb['service'].items(): - for cert in dict_search('ssl.certificate', front_config) or []: - verify_pki_certificate(lb, cert) - - for back, back_config in lb['backend'].items(): - tmp = dict_search('ssl.ca_certificate', back_config) - if tmp: verify_pki_ca_certificate(lb, tmp) - def generate(lb): if not lb: @@ -193,12 +191,11 @@ def generate(lb): return None def apply(lb): + action = 'stop' + if lb: + action = 'reload-or-restart' call('systemctl daemon-reload') - if not lb: - call(f'systemctl stop {systemd_service}') - else: - call(f'systemctl reload-or-restart {systemd_service}') - + call(f'systemctl {action} {systemd_services["haproxy"]}') return None diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py index 95dfae3a5..c65950c9e 100755 --- a/src/conf_mode/nat66.py +++ b/src/conf_mode/nat66.py @@ -92,6 +92,10 @@ def verify(nat): if prefix != None: if not is_ipv6(prefix): raise ConfigError(f'{err_msg} source-prefix not specified') + + if 'destination' in config and 'group' in config['destination']: + if len({'address_group', 'network_group', 'domain_group'} & set(config['destination']['group'])) > 1: + raise ConfigError('Only one address-group, network-group or domain-group can be specified') if dict_search('destination.rule', nat): for rule, config in dict_search('destination.rule', nat).items(): diff --git a/src/conf_mode/pki.py b/src/conf_mode/pki.py index 724f97555..869518dd9 100755 --- a/src/conf_mode/pki.py +++ b/src/conf_mode/pki.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2024 VyOS maintainers and contributors +# Copyright (C) 2021-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -19,6 +19,7 @@ import os from sys import argv from sys import exit +from vyos.base import Message from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdep import set_dependents @@ -27,6 +28,8 @@ from vyos.configdict import node_changed from vyos.configdiff import Diff from vyos.configdiff import get_config_diff from vyos.defaults import directories +from vyos.defaults import internal_ports +from vyos.defaults import systemd_services from vyos.pki import encode_certificate from vyos.pki import is_ca_certificate from vyos.pki import load_certificate @@ -42,9 +45,11 @@ from vyos.utils.dict import dict_search from vyos.utils.dict import dict_search_args from vyos.utils.dict import dict_search_recursive from vyos.utils.file import read_file +from vyos.utils.network import check_port_availability from vyos.utils.process import call from vyos.utils.process import cmd from vyos.utils.process import is_systemd_service_active +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError from vyos import airbag airbag.enable() @@ -128,8 +133,20 @@ def certbot_request(name: str, config: dict, dry_run: bool=True): f'--standalone --agree-tos --no-eff-email --expand --server {config["url"]} '\ f'--email {config["email"]} --key-type rsa --rsa-key-size {config["rsa_key_size"]} '\ f'{domains}' + + listen_address = None if 'listen_address' in config: - tmp += f' --http-01-address {config["listen_address"]}' + listen_address = config['listen_address'] + + # When ACME is used behind a reverse proxy, we always bind to localhost + # whatever the CLI listen-address is configured for. + if ('haproxy' in dict_search('used_by', config) and + is_systemd_service_running(systemd_services['haproxy']) and + not check_port_availability(listen_address, 80)): + tmp += f' --http-01-address 127.0.0.1 --http-01-port {internal_ports["certbot_haproxy"]}' + elif listen_address: + tmp += f' --http-01-address {listen_address}' + # verify() does not need to actually request a cert but only test for plausability if dry_run: tmp += ' --dry-run' @@ -150,14 +167,18 @@ def get_config(config=None): if len(argv) > 1 and argv[1] == 'certbot_renew': pki['certbot_renew'] = {} - changed_keys = ['ca', 'certificate', 'dh', 'key-pair', 'openssh', 'openvpn'] + # Walk through the list of sync_translate mapping and build a list + # which is later used to check if the node was changed in the CLI config + changed_keys = [] + for value in sync_translate.values(): + if value not in changed_keys: + changed_keys.append(value) + # Check for changes to said given keys in the CLI config for key in changed_keys: tmp = node_changed(conf, base + [key], recursive=True, expand_nodes=Diff.DELETE | Diff.ADD) - if 'changed' not in pki: pki.update({'changed':{}}) - pki['changed'].update({key.replace('-', '_') : tmp}) # We only merge on the defaults of there is a configuration at all @@ -219,8 +240,8 @@ def get_config(config=None): continue path = search['path'] - path_str = ' '.join(path + found_path) - #print(f'PKI: Updating config: {path_str} {item_name}') + path_str = ' '.join(path + found_path).replace('_','-') + Message(f'Updating configuration: "{path_str} {item_name}"') if path[0] == 'interfaces': ifname = found_path[0] @@ -230,6 +251,24 @@ def get_config(config=None): if not D.node_changed_presence(path): set_dependents(path[1], conf) + # Check PKI certificates if they are auto-generated by ACME. If they are, + # traverse the current configuration and determine the service where the + # certificate is used by. + # Required to check if we might need to run certbot behing a reverse proxy. + if 'certificate' in pki: + for name, cert_config in pki['certificate'].items(): + if 'acme' not in cert_config: + continue + if not dict_search('system.load_balancing.haproxy', pki): + continue + used_by = [] + for cert_list, _ in dict_search_recursive( + pki['system']['load_balancing']['haproxy'], 'certificate'): + if name in cert_list: + used_by.append('haproxy') + if used_by: + pki['certificate'][name]['acme'].update({'used_by': used_by}) + return pki def is_valid_certificate(raw_data): @@ -321,6 +360,15 @@ def verify(pki): raise ConfigError(f'An email address is required to request '\ f'certificate for "{name}" via ACME!') + listen_address = None + if 'listen_address' in cert_conf['acme']: + listen_address = cert_conf['acme']['listen_address'] + + if 'used_by' not in cert_conf['acme']: + if not check_port_availability(listen_address, 80): + raise ConfigError('Port 80 is already in use and not available '\ + f'to provide ACME challenge for "{name}"!') + if 'certbot_renew' not in pki: # Only run the ACME command if something on this entity changed, # as this is time intensive @@ -374,27 +422,35 @@ def verify(pki): for search in sync_search: for key in search['keys']: changed_key = sync_translate[key] - if changed_key not in pki['changed']: continue - for item_name in pki['changed'][changed_key]: node_present = False if changed_key == 'openvpn': node_present = dict_search_args(pki, 'openvpn', 'shared_secret', item_name) else: node_present = dict_search_args(pki, changed_key, item_name) + # If the node is still present, we can skip the check + # as we are not deleting it + if node_present: + continue - if not node_present: - search_dict = dict_search_args(pki['system'], *search['path']) - - if not search_dict: - continue + search_dict = dict_search_args(pki['system'], *search['path']) + if not search_dict: + continue - for found_name, found_path in dict_search_recursive(search_dict, key): - if found_name == item_name: - path_str = " ".join(search['path'] + found_path) - raise ConfigError(f'PKI object "{item_name}" still in use by "{path_str}"') + for found_name, found_path in dict_search_recursive(search_dict, key): + # Check if the name matches either by string compare, or beeing + # part of a list + if ((isinstance(found_name, str) and found_name == item_name) or + (isinstance(found_name, list) and item_name in found_name)): + # We do not support _ in CLI paths - this is only a convenience + # as we mangle all - to _, now it's time to reverse this! + path_str = ' '.join(search['path'] + found_path).replace('_','-') + object = changed_key.replace('_','-') + tmp = f'Embedded PKI {object} with name "{item_name}" is still '\ + f'in use by CLI path "{path_str}"' + raise ConfigError(tmp) return None @@ -490,7 +546,7 @@ def generate(pki): if not ca_cert_present: tmp = dict_search_args(pki, 'ca', f'{autochain_prefix}{cert}', 'certificate') if not bool(tmp) or tmp != cert_chain_base64: - print(f'Adding/replacing automatically imported CA certificate for "{cert}" ...') + Message(f'Add/replace automatically imported CA certificate for "{cert}"...') add_cli_node(['pki', 'ca', f'{autochain_prefix}{cert}', 'certificate'], value=cert_chain_base64) return None diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index 53e83c3b4..99d8eb9d1 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -413,15 +413,19 @@ def verify(config_dict): verify_route_map(afi_config['route_map'][tmp], bgp) if 'route_reflector_client' in afi_config: - peer_group_as = peer_config.get('remote_as') + peer_as = peer_config.get('remote_as') - if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): + if peer_as is not None and (peer_as != 'internal' and peer_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') else: + # Check into the peer group for the remote as, if we are in a peer group, check in peer itself if 'peer_group' in peer_config: peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', bgp) - if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): - raise ConfigError('route-reflector-client only supported for iBGP peers') + elif neighbor == 'peer_group': + peer_group_as = peer_config.get('remote_as') + + if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): + raise ConfigError('route-reflector-client only supported for iBGP peers') # T5833 not all AFIs are supported for VRF if 'vrf' in bgp and 'address_family' in peer_config: @@ -527,7 +531,7 @@ def verify(config_dict): or dict_search('import.vrf', afi_config) is not None): # FRR error: please unconfigure vpn to vrf commands before # using import vrf commands - if ('vpn' in afi_config['import'] + if (dict_search('import.vpn', afi_config) is not None or dict_search('export.vpn', afi_config) is not None): raise ConfigError('Please unconfigure VPN to VRF commands before '\ 'using "import vrf" commands!') diff --git a/src/conf_mode/service_https.py b/src/conf_mode/service_https.py index 9e58b4c72..2123823f4 100755 --- a/src/conf_mode/service_https.py +++ b/src/conf_mode/service_https.py @@ -28,6 +28,7 @@ from vyos.configverify import verify_vrf from vyos.configverify import verify_pki_certificate from vyos.configverify import verify_pki_ca_certificate from vyos.configverify import verify_pki_dh_parameters +from vyos.configdiff import get_config_diff from vyos.defaults import api_config_state from vyos.pki import wrap_certificate from vyos.pki import wrap_private_key @@ -79,6 +80,14 @@ def get_config(config=None): # merge CLI and default dictionary https = config_dict_merge(default_values, https) + + # some settings affecting nginx will require a restart: + # for example, a reload will not suffice when binding the listen address + # after nginx has started and dropped privileges; add flag here + diff = get_config_diff(conf) + children_changed = diff.node_changed_children(base) + https['nginx_restart_required'] = bool(set(children_changed) != set(['api'])) + return https def verify(https): @@ -208,7 +217,10 @@ def apply(https): elif is_systemd_service_active(http_api_service_name): call(f'systemctl stop {http_api_service_name}') - call(f'systemctl reload-or-restart {https_service_name}') + if https['nginx_restart_required']: + call(f'systemctl restart {https_service_name}') + else: + call(f'systemctl reload-or-restart {https_service_name}') if __name__ == '__main__': try: diff --git a/src/conf_mode/system_option.py b/src/conf_mode/system_option.py index b45a9d8a6..5acad6599 100755 --- a/src/conf_mode/system_option.py +++ b/src/conf_mode/system_option.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2024 VyOS maintainers and contributors +# Copyright (C) 2019-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -127,6 +127,9 @@ def generate(options): # occurance is used for having the appropriate options passed to GRUB # when re-configuring options on the CLI. cmdline_options = [] + kernel_opts = options.get('kernel', {}) + k_cpu_opts = kernel_opts.get('cpu', {}) + k_memory_opts = kernel_opts.get('memory', {}) if 'kernel' in options: if 'disable_mitigations' in options['kernel']: cmdline_options.append('mitigations=off') @@ -136,6 +139,50 @@ def generate(options): mode = options['kernel']['amd_pstate_driver'] cmdline_options.append( f'initcall_blacklist=acpi_cpufreq_init amd_pstate={mode}') + if 'quiet' in options['kernel']: + cmdline_options.append('quiet') + + if 'disable_hpet' in kernel_opts: + cmdline_options.append('hpet=disable') + + if 'disable_mce' in kernel_opts: + cmdline_options.append('mce=off') + + if 'disable_softlockup' in kernel_opts: + cmdline_options.append('nosoftlockup') + + # CPU options + isol_cpus = k_cpu_opts.get('isolate_cpus') + if isol_cpus: + cmdline_options.append(f'isolcpus={isol_cpus}') + + nohz_full = k_cpu_opts.get('nohz_full') + if nohz_full: + cmdline_options.append(f'nohz_full={nohz_full}') + + rcu_nocbs = k_cpu_opts.get('rcu_no_cbs') + if rcu_nocbs: + cmdline_options.append(f'rcu_nocbs={rcu_nocbs}') + + if 'disable_nmi_watchdog' in k_cpu_opts: + cmdline_options.append('nmi_watchdog=0') + + # Memory options + if 'disable_numa_balancing' in k_memory_opts: + cmdline_options.append('numa_balancing=disable') + + default_hp_size = k_memory_opts.get('default_hugepage_size') + if default_hp_size: + cmdline_options.append(f'default_hugepagesz={default_hp_size}') + + hp_sizes = k_memory_opts.get('hugepage_size') + if hp_sizes: + for size, settings in hp_sizes.items(): + cmdline_options.append(f'hugepagesz={size}') + count = settings.get('hugepage_count') + if count: + cmdline_options.append(f'hugepages={count}') + grub_util.update_kernel_cmdline_options(' '.join(cmdline_options)) return None diff --git a/src/etc/systemd/system/frr.service.d/override.conf b/src/etc/systemd/system/frr.service.d/override.conf index 614b4f7ed..a4a73ecd9 100644 --- a/src/etc/systemd/system/frr.service.d/override.conf +++ b/src/etc/systemd/system/frr.service.d/override.conf @@ -3,9 +3,11 @@ After=vyos-router.service [Service] LimitNOFILE=4096 -ExecStartPre=/bin/bash -c 'mkdir -p /run/frr/config; \ +ExecStartPre=/bin/bash -c 'if [ ! -f /run/frr/config/frr.conf ]; then \ + mkdir -p /run/frr/config; \ echo "log syslog" > /run/frr/config/frr.conf; \ echo "log facility local7" >> /run/frr/config/frr.conf; \ chown frr:frr /run/frr/config/frr.conf; \ chmod 664 /run/frr/config/frr.conf; \ - mount --bind /run/frr/config/frr.conf /etc/frr/frr.conf' + mount --bind /run/frr/config/frr.conf /etc/frr/frr.conf; \ +fi;' diff --git a/src/init/vyos-router b/src/init/vyos-router index 8584234b3..6f1d386d6 100755 --- a/src/init/vyos-router +++ b/src/init/vyos-router @@ -417,7 +417,6 @@ gen_duid () start () { - echo -e "Initializing VyOS router\033[0m" # reset and clean config files security_reset || log_failure_msg "security reset failed" @@ -526,6 +525,7 @@ start () cleanup_post_commit_hooks + log_daemon_msg "Starting VyOS router" disabled migrate || migrate_bootfile restore_if_missing_preconfig_script diff --git a/src/migration-scripts/reverse-proxy/2-to-3 b/src/migration-scripts/reverse-proxy/2-to-3 new file mode 100755 index 000000000..ac539618e --- /dev/null +++ b/src/migration-scripts/reverse-proxy/2-to-3 @@ -0,0 +1,66 @@ +# Copyright 2025 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T7429: logging facility "all" unavailable in code + +from vyos.configtree import ConfigTree + +base = ['load-balancing', 'haproxy'] +unsupported_facilities = ['all', 'authpriv', 'mark'] + +def config_migrator(config, config_path: list) -> None: + if not config.exists(config_path): + return + # Remove unsupported backend HAProxy syslog facilities form CLI + # Works for both backend and service CLI nodes + for service_backend in config.list_nodes(config_path): + log_path = config_path + [service_backend, 'logging', 'facility'] + if not config.exists(log_path): + continue + # Remove unsupported syslog facilities form CLI + for facility in config.list_nodes(log_path): + if facility in unsupported_facilities: + config.delete(log_path + [facility]) + continue + # Remove unsupported facility log level form CLI. VyOS will fallback + # to default log level if not set + if config.exists(log_path + [facility, 'level']): + tmp = config.return_value(log_path + [facility, 'level']) + if tmp == 'all': + config.delete(log_path + [facility, 'level']) + +def migrate(config: ConfigTree) -> None: + if not config.exists(base): + # Nothing to do + return + + # Remove unsupported syslog facilities form CLI + global_path = base + ['global-parameters', 'logging', 'facility'] + if config.exists(global_path): + for facility in config.list_nodes(global_path): + if facility in unsupported_facilities: + config.delete(global_path + [facility]) + continue + # Remove unsupported facility log level form CLI. VyOS will fallback + # to default log level if not set + if config.exists(global_path + [facility, 'level']): + tmp = config.return_value(global_path + [facility, 'level']) + if tmp == 'all': + config.delete(global_path + [facility, 'level']) + + # Remove unsupported backend HAProxy syslog facilities from CLI + config_migrator(config, base + ['backend']) + # Remove unsupported service HAProxy syslog facilities from CLI + config_migrator(config, base + ['service']) diff --git a/src/migration-scripts/vrf/1-to-2 b/src/migration-scripts/vrf/1-to-2 index 557a9ec58..89b0f708a 100644 --- a/src/migration-scripts/vrf/1-to-2 +++ b/src/migration-scripts/vrf/1-to-2 @@ -37,7 +37,10 @@ def migrate(config: ConfigTree) -> None: new_static_base = vrf_base + [vrf, 'protocols'] config.set(new_static_base) config.copy(static_base, new_static_base + ['static']) - config.set_tag(new_static_base + ['static', 'route']) + if config.exists(new_static_base + ['static', 'route']): + config.set_tag(new_static_base + ['static', 'route']) + if config.exists(new_static_base + ['static', 'route6']): + config.set_tag(new_static_base + ['static', 'route6']) # Now delete the old configuration config.delete(base) diff --git a/src/migration-scripts/vrf/2-to-3 b/src/migration-scripts/vrf/2-to-3 index acacffb41..5f396e7ed 100644 --- a/src/migration-scripts/vrf/2-to-3 +++ b/src/migration-scripts/vrf/2-to-3 @@ -76,7 +76,8 @@ def migrate(config: ConfigTree) -> None: # Get a list of all currently used VRFs and tables vrfs_current = {} for vrf in config.list_nodes(base): - vrfs_current[vrf] = int(config.return_value(base + [vrf, 'table'])) + if config.exists(base + [vrf, 'table']): + vrfs_current[vrf] = int(config.return_value(base + [vrf, 'table'])) # Check VRF names and table numbers name_regex = re.compile(r'^\d.*$') diff --git a/src/op_mode/firewall.py b/src/op_mode/firewall.py index ac47e3273..f3309ee34 100755 --- a/src/op_mode/firewall.py +++ b/src/op_mode/firewall.py @@ -18,6 +18,7 @@ import argparse import ipaddress import json import re +from signal import signal, SIGPIPE, SIG_DFL import tabulate import textwrap @@ -25,6 +26,9 @@ from vyos.config import Config from vyos.utils.process import cmd from vyos.utils.dict import dict_search_args +signal(SIGPIPE, SIG_DFL) + + def get_config_node(conf, node=None, family=None, hook=None, priority=None): if node == 'nat': if family == 'ipv6': @@ -648,12 +652,14 @@ def show_firewall_group(name=None): references = find_references(group_type, remote_name) row = [remote_name, textwrap.fill(remote_conf.get('description') or '', 50), group_type, '\n'.join(references) or 'N/D'] members = get_nftables_remote_group_members("ipv4", 'vyos_filter', f'R_{remote_name}') + members6 = get_nftables_remote_group_members("ipv6", 'vyos_filter', f'R6_{remote_name}') if 'url' in remote_conf: # display only the url if no members are found for both views - if not members: + if not members and not members6: if args.detail: - header_tail = ['Remote URL'] + header_tail = ['IPv6 Members', 'Remote URL'] + row.append('N/D') row.append('N/D') row.append(remote_conf['url']) else: @@ -662,8 +668,15 @@ def show_firewall_group(name=None): else: # display all table elements in detail view if args.detail: - header_tail = ['Remote URL'] - row += [' '.join(members)] + header_tail = ['IPv6 Members', 'Remote URL'] + if members: + row.append(' '.join(members)) + else: + row.append('N/D') + if members6: + row.append(' '.join(members6)) + else: + row.append('N/D') row.append(remote_conf['url']) rows.append(row) else: diff --git a/src/op_mode/image_installer.py b/src/op_mode/image_installer.py index 2660309a5..ac5a84419 100755 --- a/src/op_mode/image_installer.py +++ b/src/op_mode/image_installer.py @@ -491,6 +491,8 @@ def get_cli_kernel_options(config_file: str) -> list: config = ConfigTree(read_file(config_file)) config_dict = loads(config.to_json()) kernel_options = dict_search('system.option.kernel', config_dict) + if kernel_options is None: + kernel_options = {} cmdline_options = [] # XXX: This code path and if statements must be kept in sync with the Kernel @@ -504,6 +506,8 @@ def get_cli_kernel_options(config_file: str) -> list: mode = kernel_options['amd-pstate-driver'] cmdline_options.append( f'initcall_blacklist=acpi_cpufreq_init amd_pstate={mode}') + if 'quiet' in kernel_options: + cmdline_options.append('quiet') return cmdline_options @@ -561,21 +565,18 @@ def validate_signature(file_path: str, sign_type: str) -> None: print('Signature is valid') def download_file(local_file: str, remote_path: str, vrf: str, - username: str, password: str, progressbar: bool = False, check_space: bool = False): - environ['REMOTE_USERNAME'] = username - environ['REMOTE_PASSWORD'] = password + # Server credentials are implicitly passed in environment variables + # that are set by add_image if vrf is None: download(local_file, remote_path, progressbar=progressbar, check_space=check_space, raise_error=True) else: - remote_auth = f'REMOTE_USERNAME={username} REMOTE_PASSWORD={password}' vrf_cmd = f'ip vrf exec {vrf} {external_download_script} \ --local-file {local_file} --remote-path {remote_path}' - cmd(vrf_cmd, auth=remote_auth) + cmd(vrf_cmd, env=environ) def image_fetch(image_path: str, vrf: str = None, - username: str = '', password: str = '', no_prompt: bool = False) -> Path: """Fetch an ISO image @@ -594,9 +595,8 @@ def image_fetch(image_path: str, vrf: str = None, if image_path == 'latest': command = external_latest_image_url_script if vrf: - command = f'REMOTE_USERNAME={username} REMOTE_PASSWORD={password} \ - ip vrf exec {vrf} ' + command - code, output = rc_cmd(command) + command = f'ip vrf exec {vrf} {command}' + code, output = rc_cmd(command, env=environ) if code: print(output) exit(MSG_INFO_INSTALL_EXIT) @@ -608,7 +608,6 @@ def image_fetch(image_path: str, vrf: str = None, # Download the image file ISO_DOWNLOAD_PATH = os.path.join(os.path.expanduser("~"), '{0}.iso'.format(uuid4())) download_file(ISO_DOWNLOAD_PATH, image_path, vrf, - username, password, progressbar=True, check_space=True) # Download the image signature @@ -619,8 +618,7 @@ def image_fetch(image_path: str, vrf: str = None, for sign_type in ['minisig']: try: download_file(f'{ISO_DOWNLOAD_PATH}.{sign_type}', - f'{image_path}.{sign_type}', vrf, - username, password) + f'{image_path}.{sign_type}', vrf) sign_file = (True, sign_type) break except Exception: @@ -924,8 +922,7 @@ def install_image() -> None: for disk_target in l: disk.partition_mount(disk_target.partition['efi'], f'{DIR_DST_ROOT}/boot/efi') grub.install(disk_target.name, f'{DIR_DST_ROOT}/boot/', - f'{DIR_DST_ROOT}/boot/efi', - id=f'VyOS (RAID disk {l.index(disk_target) + 1})') + f'{DIR_DST_ROOT}/boot/efi') disk.partition_umount(disk_target.partition['efi']) else: print('Installing GRUB to the drive') @@ -977,8 +974,11 @@ def add_image(image_path: str, vrf: str = None, username: str = '', if image.is_live_boot(): exit(MSG_ERR_LIVE) + environ['REMOTE_USERNAME'] = username + environ['REMOTE_PASSWORD'] = password + # fetch an image - iso_path: Path = image_fetch(image_path, vrf, username, password, no_prompt) + iso_path: Path = image_fetch(image_path, vrf, no_prompt) try: # mount an ISO Path(DIR_ISO_MOUNT).mkdir(mode=0o755, parents=True) diff --git a/src/op_mode/interfaces.py b/src/op_mode/interfaces.py index e7afc4caa..c97f3b129 100755 --- a/src/op_mode/interfaces.py +++ b/src/op_mode/interfaces.py @@ -29,6 +29,7 @@ from vyos.ifconfig import Section from vyos.ifconfig import Interface from vyos.ifconfig import VRRP from vyos.utils.process import cmd +from vyos.utils.network import interface_exists from vyos.utils.process import rc_cmd from vyos.utils.process import call @@ -84,6 +85,14 @@ def filtered_interfaces(ifnames: typing.Union[str, list], yield interface +def detailed_output(dataset, headers): + for data in dataset: + adjusted_rule = data + [""] * (len(headers) - len(data)) # account for different header length, like default-action + transformed_rule = [[header, adjusted_rule[i]] for i, header in enumerate(headers) if i < len(adjusted_rule)] # create key-pair list from headers and rules lists; wrap at 100 char + + print(tabulate(transformed_rule, tablefmt="presto")) + print() + def _split_text(text, used=0): """ take a string and attempt to split it to fit with the width of the screen @@ -296,6 +305,114 @@ def _get_counter_data(ifname: typing.Optional[str], return ret +def _get_kernel_data(raw, ifname = None, detail = False): + if ifname: + # Check if the interface exists + if not interface_exists(ifname): + raise vyos.opmode.IncorrectValue(f"{ifname} does not exist!") + int_name = f'dev {ifname}' + else: + int_name = '' + + kernel_interface = json.loads(cmd(f'ip -j -d -s address show {int_name}')) + + # Return early if raw + if raw: + return kernel_interface, None + + # Format the kernel data + kernel_interface_out = _format_kernel_data(kernel_interface, detail) + + return kernel_interface, kernel_interface_out + +def _format_kernel_data(data, detail): + output_list = [] + tmpInfo = {} + + # Sort interfaces by name + for interface in sorted(data, key=lambda x: x.get('ifname', '')): + if interface.get('linkinfo', {}).get('info_kind') == 'vrf': + continue + + # Get the device model; ex. Intel Corporation Ethernet Controller I225-V + dev_model = interface.get('parentdev', '') + if 'parentdev' in interface: + parentdev = interface['parentdev'] + if re.match(r'^[0-9a-fA-F]{4}:', parentdev): + dev_model = cmd(f'lspci -nn -s {parentdev}').split(']:')[1].strip() + + # Get the IP addresses on interface + ip_list = [] + has_global = False + + for ip in interface['addr_info']: + if ip.get('scope') in ('global', 'host'): + has_global = True + local = ip.get('local', '-') + prefixlen = ip.get('prefixlen', '') + ip_list.append(f"{local}/{prefixlen}") + + + # If no global IP address, add '-'; indicates no IP address on interface + if not has_global: + ip_list.append('-') + + sl_status = ('A' if not 'UP' in interface['flags'] else 'u') + '/' + ('D' if interface['operstate'] == 'DOWN' else 'u') + + # Generate temporary dict to hold data + tmpInfo['ifname'] = interface.get('ifname', '') + tmpInfo['ip'] = ip_list + tmpInfo['mac'] = interface.get('address', '') + tmpInfo['mtu'] = interface.get('mtu', '') + tmpInfo['vrf'] = interface.get('master', 'default') + tmpInfo['status'] = sl_status + tmpInfo['description'] = interface.get('ifalias', '') + tmpInfo['device'] = dev_model + tmpInfo['alternate_names'] = interface.get('altnames', '') + tmpInfo['minimum_mtu'] = interface.get('min_mtu', '') + tmpInfo['maximum_mtu'] = interface.get('max_mtu', '') + rx_stats = interface.get('stats64', {}).get('rx') + tx_stats = interface.get('stats64', {}).get('tx') + tmpInfo['rx_packets'] = rx_stats.get('packets', "") + tmpInfo['rx_bytes'] = rx_stats.get('bytes', "") + tmpInfo['rx_errors'] = rx_stats.get('errors', "") + tmpInfo['rx_dropped'] = rx_stats.get('dropped', "") + tmpInfo['rx_over_errors'] = rx_stats.get('over_errors', '') + tmpInfo['multicast'] = rx_stats.get('multicast', "") + tmpInfo['tx_packets'] = tx_stats.get('packets', "") + tmpInfo['tx_bytes'] = tx_stats.get('bytes', "") + tmpInfo['tx_errors'] = tx_stats.get('errors', "") + tmpInfo['tx_dropped'] = tx_stats.get('dropped', "") + tmpInfo['tx_carrier_errors'] = tx_stats.get('carrier_errors', "") + tmpInfo['tx_collisions'] = tx_stats.get('collisions', "") + + # Generate output list; detail adds more fields + output_list.append([tmpInfo['ifname'], + '\n'.join(tmpInfo['ip']), + tmpInfo['mac'], + tmpInfo['vrf'], + tmpInfo['mtu'], + tmpInfo['status'], + tmpInfo['description'], + *([tmpInfo['device']] if detail else []), + *(['\n'.join(tmpInfo['alternate_names'])] if detail else []), + *([tmpInfo['minimum_mtu']] if detail else []), + *([tmpInfo['maximum_mtu']] if detail else []), + *([tmpInfo['rx_packets']] if detail else []), + *([tmpInfo['rx_bytes']] if detail else []), + *([tmpInfo['rx_errors']] if detail else []), + *([tmpInfo['rx_dropped']] if detail else []), + *([tmpInfo['rx_over_errors']] if detail else []), + *([tmpInfo['multicast']] if detail else []), + *([tmpInfo['tx_packets']] if detail else []), + *([tmpInfo['tx_bytes']] if detail else []), + *([tmpInfo['tx_errors']] if detail else []), + *([tmpInfo['tx_dropped']] if detail else []), + *([tmpInfo['tx_carrier_errors']] if detail else []), + *([tmpInfo['tx_collisions']] if detail else [])]) + + return output_list + @catch_broken_pipe def _format_show_data(data: list): unhandled = [] @@ -445,6 +562,27 @@ def _format_show_counters(data: list): print (output) return output +def show_kernel(raw: bool, intf_name: typing.Optional[str], detail: bool): + raw_data, data = _get_kernel_data(raw, intf_name, detail) + + # Return early if raw + if raw: + return raw_data + + # Normal headers; show interfaces kernel + headers = ['Interface', 'IP Address', 'MAC', 'VRF', 'MTU', 'S/L', 'Description'] + + # Detail headers; show interfaces kernel detail + detail_header = ['Interface', 'IP Address', 'MAC', 'VRF', 'MTU', 'S/L', 'Description', + 'Device', 'Alternate Names','Minimum MTU', 'Maximum MTU', 'RX_Packets', + 'RX_Bytes', 'RX_Errors', 'RX_Dropped', 'Receive Overrun Errors', 'Received Multicast', + 'TX_Packets', 'TX_Bytes', 'TX_Errors', 'TX_Dropped', 'Transmit Carrier Errors', + 'Transmit Collisions'] + + if detail: + detailed_output(data, detail_header) + else: + print(tabulate(data, headers)) def _show_raw(data: list, intf_name: str): if intf_name is not None and len(data) <= 1: diff --git a/src/op_mode/tech_support.py b/src/op_mode/tech_support.py index 24ac0af1b..c4496dfa3 100644 --- a/src/op_mode/tech_support.py +++ b/src/op_mode/tech_support.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (C) 2024 VyOS maintainers and contributors +# Copyright (C) 2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -20,6 +20,7 @@ import json import vyos.opmode from vyos.utils.process import cmd +from vyos.base import Warning def _get_version_data(): from vyos.version import get_version_data @@ -51,7 +52,12 @@ def _get_storage(): def _get_devices(): devices = {} devices["pci"] = cmd("lspci") - devices["usb"] = cmd("lsusb") + + try: + devices["usb"] = cmd("lsusb") + except OSError: + Warning("Could not retrieve information about USB devices") + devices["usb"] = {} return devices diff --git a/src/services/vyos-domain-resolver b/src/services/vyos-domain-resolver index 4419fc4a7..fb18724af 100755 --- a/src/services/vyos-domain-resolver +++ b/src/services/vyos-domain-resolver @@ -28,7 +28,7 @@ from vyos.utils.commit import commit_in_progress from vyos.utils.dict import dict_search_args from vyos.utils.kernel import WIREGUARD_REKEY_AFTER_TIME from vyos.utils.file import makedir, chmod_775, write_file, read_file -from vyos.utils.network import is_valid_ipv4_address_or_range +from vyos.utils.network import is_valid_ipv4_address_or_range, is_valid_ipv6_address_or_range from vyos.utils.process import cmd from vyos.utils.process import run from vyos.xml_ref import get_defaults @@ -143,10 +143,11 @@ def update_remote_group(config): for set_name, remote_config in remote_groups.items(): if 'url' not in remote_config: continue - nft_set_name = f'R_{set_name}' + nft_ip_set_name = f'R_{set_name}' + nft_ip6_set_name = f'R6_{set_name}' # Create list file if necessary - list_file = os.path.join(firewall_config_dir, f"{nft_set_name}.txt") + list_file = os.path.join(firewall_config_dir, f"{nft_ip_set_name}.txt") if not os.path.exists(list_file): write_file(list_file, '', user="root", group="vyattacfg", mode=0o644) @@ -159,16 +160,32 @@ def update_remote_group(config): # Read list file ip_list = [] + ip6_list = [] + invalid_list = [] for line in read_file(list_file).splitlines(): line_first_word = line.strip().partition(' ')[0] if is_valid_ipv4_address_or_range(line_first_word): ip_list.append(line_first_word) + elif is_valid_ipv6_address_or_range(line_first_word): + ip6_list.append(line_first_word) + else: + if line_first_word[0].isalnum(): + invalid_list.append(line_first_word) - # Load tables + # Load ip tables for table in ipv4_tables: - if (table, nft_set_name) in valid_sets: - conf_lines += nft_output(table, nft_set_name, ip_list) + if (table, nft_ip_set_name) in valid_sets: + conf_lines += nft_output(table, nft_ip_set_name, ip_list) + + # Load ip6 tables + for table in ipv6_tables: + if (table, nft_ip6_set_name) in valid_sets: + conf_lines += nft_output(table, nft_ip6_set_name, ip6_list) + + invalid_str = ", ".join(invalid_list) + if invalid_str: + logger.info(f'Invalid address for set {set_name}: {invalid_str}') count += 1 diff --git a/src/tests/test_template.py b/src/tests/test_template.py index 6377f6da5..7cae867a0 100644 --- a/src/tests/test_template.py +++ b/src/tests/test_template.py @@ -190,3 +190,12 @@ class TestVyOSTemplate(TestCase): for group_name, group_config in data['ike_group'].items(): ciphers = vyos.template.get_esp_ike_cipher(group_config) self.assertIn(IKEv2_DEFAULT, ','.join(ciphers)) + + def test_get_default_port(self): + from vyos.defaults import internal_ports + + with self.assertRaises(RuntimeError): + vyos.template.get_default_port('UNKNOWN') + + self.assertEqual(vyos.template.get_default_port('certbot_haproxy'), + internal_ports['certbot_haproxy']) diff --git a/src/tests/test_utils_network.py b/src/tests/test_utils_network.py index d68dec16f..92fde447d 100644 --- a/src/tests/test_utils_network.py +++ b/src/tests/test_utils_network.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020-2024 VyOS maintainers and contributors +# Copyright (C) 2020-2025 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as @@ -43,3 +43,12 @@ class TestVyOSUtilsNetwork(TestCase): self.assertFalse(vyos.utils.network.is_loopback_addr('::2')) self.assertFalse(vyos.utils.network.is_loopback_addr('192.0.2.1')) + + def test_check_port_availability(self): + self.assertTrue(vyos.utils.network.check_port_availability('::1', 8080)) + self.assertTrue(vyos.utils.network.check_port_availability('127.0.0.1', 8080)) + self.assertTrue(vyos.utils.network.check_port_availability(None, 8080, protocol='udp')) + # We do not have 192.0.2.1 configured on this system + self.assertFalse(vyos.utils.network.check_port_availability('192.0.2.1', 443)) + # We do not have 2001:db8::1 configured on this system + self.assertFalse(vyos.utils.network.check_port_availability('2001:db8::1', 80, protocol='udp')) diff --git a/src/validators/cpu b/src/validators/cpu new file mode 100755 index 000000000..959a49248 --- /dev/null +++ b/src/validators/cpu @@ -0,0 +1,43 @@ +#!/usr/bin/python3 + +import re +import sys + +MAX_CPU = 511 + + +def validate_isolcpus(value): + pattern = re.compile(r'^(\d{1,3}(-\d{1,3})?)(,(\d{1,3}(-\d{1,3})?))*$') + if not pattern.fullmatch(value): + return False + + flat_list = [] + for part in value.split(','): + if '-' in part: + start, end = map(int, part.split('-')) + if start > end or start < 0 or end > MAX_CPU: + return False + flat_list.extend(range(start, end + 1)) + else: + num = int(part) + if num < 0 or num > MAX_CPU: + return False + flat_list.append(num) + + for i in range(1, len(flat_list)): + if flat_list[i] <= flat_list[i - 1]: + return False + + return True + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python3 cpu.py <cpu_list>") + sys.exit(1) + + input_value = sys.argv[1] + if validate_isolcpus(input_value): + sys.exit(0) + else: + sys.exit(1) |