summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/labeler.yml12
-rw-r--r--.github/workflows/add-pr-labels.yml19
-rw-r--r--.github/workflows/auto-author-assign.yml11
-rw-r--r--.github/workflows/build-package.yml17
-rw-r--r--.github/workflows/chceck-pr-message.yml18
-rw-r--r--.github/workflows/check-pr-conflicts.yml14
-rw-r--r--.github/workflows/check-stale.yml13
-rw-r--r--.github/workflows/check-unused-imports.yml15
-rw-r--r--.github/workflows/codeql.yml13
-rw-r--r--.github/workflows/label-backport.yml12
-rw-r--r--.github/workflows/linit-j2.yml18
-rw-r--r--.github/workflows/mergifyio_backport.yml22
-rw-r--r--.github/workflows/pr-conflicts.yml18
-rw-r--r--.github/workflows/pull-request-labels.yml20
-rw-r--r--.github/workflows/pull-request-management.yml25
-rw-r--r--.github/workflows/pull-request-message-check.yml23
-rw-r--r--.github/workflows/repo-sync.yml17
-rw-r--r--.github/workflows/stale.yml22
-rw-r--r--.github/workflows/unused-imports.yml22
-rw-r--r--CODEOWNERS1
-rw-r--r--Makefile1
-rw-r--r--data/config.boot.default53
-rw-r--r--data/configd-include.json1
-rw-r--r--data/op-mode-standardized.json2
-rw-r--r--data/templates/accel-ppp/config_chap_secrets_radius.j215
-rw-r--r--data/templates/accel-ppp/ipoe.config.j24
-rw-r--r--data/templates/accel-ppp/l2tp.config.j24
-rw-r--r--data/templates/accel-ppp/pppoe.config.j24
-rw-r--r--data/templates/accel-ppp/pptp.config.j24
-rw-r--r--data/templates/accel-ppp/sstp.config.j24
-rw-r--r--data/templates/firewall/upnpd.conf.j2227
-rw-r--r--data/templates/frr/isisd.frr.j28
-rw-r--r--data/templates/ids/suricata.j21280
-rw-r--r--data/templates/ids/suricata_logrotate.j217
-rw-r--r--data/templates/ipsec/ios_profile.j211
-rw-r--r--data/templates/load-balancing/haproxy.cfg.j219
-rw-r--r--data/templates/login/default_motd.j23
-rw-r--r--debian/control5
-rwxr-xr-xdebian/rules5
-rw-r--r--debian/vyos-1x.install1
-rw-r--r--interface-definitions/container.xml.in26
-rw-r--r--interface-definitions/include/accel-ppp/log.xml.i42
-rw-r--r--interface-definitions/include/accel-ppp/radius-additions.xml.i7
-rw-r--r--interface-definitions/include/haproxy/http-response-headers.xml.i29
-rw-r--r--interface-definitions/include/haproxy/timeout.xml.i2
-rw-r--r--interface-definitions/include/isis/protocol-common-config.xml.i35
-rw-r--r--interface-definitions/include/nat-translation-options.xml.i8
-rw-r--r--interface-definitions/include/qos/class-match-group.xml.i15
-rw-r--r--interface-definitions/include/qos/class-match-ipv4.xml.i31
-rw-r--r--interface-definitions/include/qos/class-match-ipv6.xml.i31
-rw-r--r--interface-definitions/include/qos/class-match-mark.xml.i14
-rw-r--r--interface-definitions/include/qos/class-match-vif.xml.i15
-rw-r--r--interface-definitions/include/qos/class-match.xml.i89
-rw-r--r--interface-definitions/include/radius-priority.xml.i14
-rw-r--r--interface-definitions/include/version/nat-version.xml.i2
-rw-r--r--interface-definitions/include/version/reverseproxy-version.xml.i3
-rw-r--r--interface-definitions/load-balancing_reverse-proxy.xml.in46
-rw-r--r--interface-definitions/nat.xml.in1
-rw-r--r--interface-definitions/nat_cgnat.xml.in1
-rw-r--r--interface-definitions/policy.xml.in4
-rw-r--r--interface-definitions/qos.xml.in39
-rw-r--r--interface-definitions/service_dns_forwarding.xml.in1
-rw-r--r--interface-definitions/service_ipoe-server.xml.in1
-rw-r--r--interface-definitions/service_pppoe-server.xml.in1
-rw-r--r--interface-definitions/service_suricata.xml.in238
-rw-r--r--interface-definitions/service_upnp.xml.in229
-rw-r--r--interface-definitions/system_conntrack.xml.in8
-rw-r--r--interface-definitions/system_domain-name.xml.in1
-rw-r--r--interface-definitions/system_host-name.xml.in1
-rw-r--r--interface-definitions/system_login.xml.in11
-rw-r--r--interface-definitions/vpn_l2tp.xml.in1
-rw-r--r--interface-definitions/vpn_pptp.xml.in1
-rw-r--r--interface-definitions/vpn_sstp.xml.in1
-rw-r--r--interface-definitions/xml-component-version.xml.in1
-rw-r--r--op-mode-definitions/force-commit-archive.xml.in2
-rw-r--r--op-mode-definitions/include/vni-tagnode-all.xml.i5
-rw-r--r--op-mode-definitions/include/vni-tagnode.xml.i5
-rw-r--r--op-mode-definitions/mtr.xml.in4
-rw-r--r--op-mode-definitions/nat.xml.in33
-rw-r--r--op-mode-definitions/pki.xml.in12
-rw-r--r--op-mode-definitions/reverse-proxy.xml.in23
-rw-r--r--op-mode-definitions/show-evpn.xml.in59
-rw-r--r--op-mode-definitions/show-log.xml.in50
-rw-r--r--op-mode-definitions/show-reverse-proxy.xml.in13
-rw-r--r--op-mode-definitions/suricata.xml.in23
-rw-r--r--python/vyos/base.py2
-rw-r--r--python/vyos/compose_config.py84
-rw-r--r--python/vyos/config_mgmt.py2
-rw-r--r--python/vyos/configsession.py6
-rw-r--r--python/vyos/configtree.py10
-rw-r--r--python/vyos/defaults.py3
-rw-r--r--python/vyos/ifconfig/interface.py12
-rw-r--r--python/vyos/ifconfig/vxlan.py7
-rw-r--r--python/vyos/nat.py6
-rw-r--r--python/vyos/qos/base.py11
-rw-r--r--python/vyos/system/image.py10
-rw-r--r--python/vyos/utils/io.py2
-rw-r--r--python/vyos/version.py12
-rwxr-xr-xscripts/check-pr-title-and-commit-messages.py51
-rw-r--r--smoketest/config-tests/container-simple1
-rw-r--r--smoketest/config-tests/nat-basic85
-rw-r--r--smoketest/configs/container-simple1
-rw-r--r--smoketest/configs/nat-basic256
-rw-r--r--smoketest/scripts/cli/base_accel_ppp_test.py41
-rwxr-xr-xsmoketest/scripts/cli/test_cgnat.py138
-rwxr-xr-xsmoketest/scripts/cli/test_container.py16
-rwxr-xr-xsmoketest/scripts/cli/test_load-balancing_reverse-proxy.py78
-rwxr-xr-xsmoketest/scripts/cli/test_protocols_isis.py17
-rwxr-xr-xsmoketest/scripts/cli/test_protocols_ospf.py3
-rwxr-xr-xsmoketest/scripts/cli/test_qos.py116
-rwxr-xr-xsmoketest/scripts/cli/test_service_dns_forwarding.py10
-rwxr-xr-xsmoketest/scripts/cli/test_service_https.py41
-rwxr-xr-xsmoketest/scripts/cli/test_service_upnp.py103
-rwxr-xr-xsmoketest/scripts/cli/test_vpn_l2tp.py23
-rwxr-xr-xsmoketest/scripts/system/test_kernel_options.py17
-rwxr-xr-xsrc/activation-scripts/20-ethernet_offload.py103
-rwxr-xr-xsrc/completion/list_esi.sh20
-rwxr-xr-xsrc/completion/list_vni.sh20
-rw-r--r--src/completion/qos/list_traffic_match_group.py35
-rwxr-xr-xsrc/conf_mode/container.py21
-rwxr-xr-xsrc/conf_mode/interfaces_openvpn.py8
-rwxr-xr-xsrc/conf_mode/interfaces_tunnel.py19
-rwxr-xr-xsrc/conf_mode/load-balancing_reverse-proxy.py86
-rwxr-xr-xsrc/conf_mode/nat.py18
-rwxr-xr-xsrc/conf_mode/nat64.py10
-rwxr-xr-xsrc/conf_mode/nat66.py22
-rwxr-xr-xsrc/conf_mode/nat_cgnat.py125
-rwxr-xr-xsrc/conf_mode/protocols_bfd.py2
-rwxr-xr-xsrc/conf_mode/qos.py77
-rwxr-xr-xsrc/conf_mode/service_dhcpv6-server.py8
-rwxr-xr-xsrc/conf_mode/service_dns_forwarding.py15
-rwxr-xr-xsrc/conf_mode/service_suricata.py161
-rwxr-xr-xsrc/conf_mode/service_upnp.py157
-rw-r--r--src/etc/systemd/system/suricata.service.d/10-override.conf9
-rwxr-xr-xsrc/helpers/run-config-activation.py83
-rwxr-xr-xsrc/init/vyos-router23
-rwxr-xr-xsrc/migration-scripts/nat/7-to-862
-rwxr-xr-xsrc/migration-scripts/reverse-proxy/0-to-148
-rwxr-xr-xsrc/op_mode/cgnat.py96
-rw-r--r--src/op_mode/evpn.py46
-rwxr-xr-xsrc/op_mode/ikev2_profile_generator.py19
-rwxr-xr-xsrc/op_mode/image_installer.py34
-rwxr-xr-xsrc/op_mode/nat.py35
-rwxr-xr-xsrc/op_mode/pki.py13
-rwxr-xr-xsrc/op_mode/snmp_v3.py3
-rwxr-xr-xsrc/op_mode/version.py6
-rwxr-xr-xsrc/services/vyos-http-api-server46
-rw-r--r--src/systemd/miniupnpd.service13
-rwxr-xr-xsrc/validators/port-range-exclude7
149 files changed, 4471 insertions, 1321 deletions
diff --git a/.github/labeler.yml b/.github/labeler.yml
deleted file mode 100644
index e0b9ee430..000000000
--- a/.github/labeler.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-equuleus:
- - any:
- - base-branch: 'equuleus'
-current:
- - any:
- - base-branch: 'current'
-crux:
- - any:
- - base-branch: 'crux'
-sagitta:
- - any:
- - base-branch: 'sagitta'
diff --git a/.github/workflows/add-pr-labels.yml b/.github/workflows/add-pr-labels.yml
new file mode 100644
index 000000000..1723cceb0
--- /dev/null
+++ b/.github/workflows/add-pr-labels.yml
@@ -0,0 +1,19 @@
+---
+name: Add pull request labels
+
+on:
+ pull_request_target:
+ branches:
+ - current
+ - crux
+ - equuleus
+ - sagitta
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ add-pr-label:
+ uses: vyos/.github/.github/workflows/add-pr-labels.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/auto-author-assign.yml b/.github/workflows/auto-author-assign.yml
index 0bfe972c0..c3696ea47 100644
--- a/.github/workflows/auto-author-assign.yml
+++ b/.github/workflows/auto-author-assign.yml
@@ -3,15 +3,12 @@ on:
pull_request_target:
types: [opened, reopened, ready_for_review, locked]
+
permissions:
pull-requests: write
+ contents: read
jobs:
- # https://github.com/marketplace/actions/auto-author-assign
assign-author:
- runs-on: ubuntu-latest
- steps:
- - name: "Assign Author to PR"
- uses: toshimaru/auto-author-assign@v1.6.2
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
+ uses: vyos/.github/.github/workflows/assign-author.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/build-package.yml b/.github/workflows/build-package.yml
new file mode 100644
index 000000000..0200aceb4
--- /dev/null
+++ b/.github/workflows/build-package.yml
@@ -0,0 +1,17 @@
+name: Debian Package Build
+on:
+ pull_request:
+ branches:
+ - current
+
+jobs:
+ package-build:
+ runs-on: ubuntu-latest
+ container:
+ image: vyos/vyos-build:current
+ options: --sysctl net.ipv6.conf.lo.disable_ipv6=0
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Build Debian package
+ run: dpkg-buildpackage -uc -us -tc -b
diff --git a/.github/workflows/chceck-pr-message.yml b/.github/workflows/chceck-pr-message.yml
new file mode 100644
index 000000000..e7e456961
--- /dev/null
+++ b/.github/workflows/chceck-pr-message.yml
@@ -0,0 +1,18 @@
+---
+name: Check pull request message format
+
+on:
+ pull_request:
+ branches:
+ - current
+ - crux
+ - equuleus
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ check-pr-title:
+ uses: vyos/.github/.github/workflows/check-pr-message.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/check-pr-conflicts.yml b/.github/workflows/check-pr-conflicts.yml
new file mode 100644
index 000000000..0c659e6ed
--- /dev/null
+++ b/.github/workflows/check-pr-conflicts.yml
@@ -0,0 +1,14 @@
+
+name: "PR Conflicts checker"
+on:
+ pull_request_target:
+ types: [synchronize]
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ check-pr-conflict-call:
+ uses: vyos/.github/.github/workflows/check-pr-merge-conflict.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/check-stale.yml b/.github/workflows/check-stale.yml
new file mode 100644
index 000000000..b5ec533f1
--- /dev/null
+++ b/.github/workflows/check-stale.yml
@@ -0,0 +1,13 @@
+name: "Issue and PR stale management"
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ stale:
+ uses: vyos/.github/.github/workflows/check-stale.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/check-unused-imports.yml b/.github/workflows/check-unused-imports.yml
new file mode 100644
index 000000000..aada264f7
--- /dev/null
+++ b/.github/workflows/check-unused-imports.yml
@@ -0,0 +1,15 @@
+name: Check for unused imports using Pylint
+on:
+ pull_request:
+ branches:
+ - current
+ - sagitta
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+jobs:
+ check-unused-imports:
+ uses: vyos/.github/.github/workflows/check-unused-imports.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 9e2e4bf0f..f6472784d 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -1,14 +1,3 @@
-# For most projects, this workflow file will not need changing; you simply need
-# to commit it to your repository.
-#
-# You may wish to alter this file to override the set of languages analyzed,
-# or to provide custom queries or build logic.
-#
-# ******** NOTE ********
-# We have attempted to detect the languages in your repository. Please check
-# the `language` matrix defined below to confirm you have the correct set of
-# supported CodeQL languages.
-#
name: "Perform CodeQL Analysis"
on:
@@ -27,7 +16,7 @@ permissions:
jobs:
codeql-analysis-call:
- uses: vyos/vyos-github-actions/.github/workflows/codeql-analysis.yml@current
+ uses: vyos/.github/.github/workflows/codeql-analysis.yml@feature/T6349-reusable-workflows
secrets: inherit
with:
languages: "['python']"
diff --git a/.github/workflows/label-backport.yml b/.github/workflows/label-backport.yml
new file mode 100644
index 000000000..9192b8184
--- /dev/null
+++ b/.github/workflows/label-backport.yml
@@ -0,0 +1,12 @@
+name: Mergifyio backport
+
+on: [issue_comment]
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ mergifyio-backport:
+ uses: vyos/.github/.github/workflows/label-backport.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/linit-j2.yml b/.github/workflows/linit-j2.yml
new file mode 100644
index 000000000..364a65a14
--- /dev/null
+++ b/.github/workflows/linit-j2.yml
@@ -0,0 +1,18 @@
+---
+name: J2 Lint
+
+on:
+ pull_request:
+ branches:
+ - current
+ - crux
+ - equuleus
+
+permissions:
+ pull-requests: write
+ contents: read
+
+jobs:
+ j2lint:
+ uses: vyos/.github/.github/workflows/lint-j2.yml@feature/T6349-reusable-workflows
+ secrets: inherit
diff --git a/.github/workflows/mergifyio_backport.yml b/.github/workflows/mergifyio_backport.yml
deleted file mode 100644
index d9f863d9a..000000000
--- a/.github/workflows/mergifyio_backport.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Mergifyio backport
-
-on: [issue_comment]
-
-jobs:
- mergifyio_backport:
- if: github.repository == 'vyos/vyos-1x'
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
-
- - uses: actions-ecosystem/action-regex-match@v2
- id: regex-match
- with:
- text: ${{ github.event.comment.body }}
- regex: '@[Mm][Ee][Rr][Gg][Ii][Ff][Yy][Ii][Oo] backport '
-
- - uses: actions-ecosystem/action-add-labels@v1
- if: ${{ steps.regex-match.outputs.match != '' }}
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- labels: backport
diff --git a/.github/workflows/pr-conflicts.yml b/.github/workflows/pr-conflicts.yml
deleted file mode 100644
index 2fd0bb42d..000000000
--- a/.github/workflows/pr-conflicts.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: "PR Conflicts checker"
-on:
- pull_request_target:
- types: [synchronize]
-
-jobs:
- Conflict_Check:
- name: 'Check PR status: conflicts and resolution'
- runs-on: ubuntu-latest
- steps:
- - name: check if PRs are dirty
- uses: eps1lon/actions-label-merge-conflict@v3
- with:
- dirtyLabel: "state: conflict"
- removeOnDirtyLabel: "state: conflict resolved"
- repoToken: "${{ secrets.GITHUB_TOKEN }}"
- commentOnDirty: "This pull request has conflicts, please resolve those before we can evaluate the pull request."
- commentOnClean: "Conflicts have been resolved. A maintainer will review the pull request shortly."
diff --git a/.github/workflows/pull-request-labels.yml b/.github/workflows/pull-request-labels.yml
deleted file mode 100644
index 43856beaa..000000000
--- a/.github/workflows/pull-request-labels.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Add pull request labels
-
-on:
- pull_request_target:
- branches:
- - current
- - crux
- - equuleus
- - sagitta
-
-jobs:
- add-pr-label:
- name: Add PR Labels
- runs-on: ubuntu-latest
- permissions:
- contents: read
- pull-requests: write
- steps:
- - uses: actions/labeler@v5
diff --git a/.github/workflows/pull-request-management.yml b/.github/workflows/pull-request-management.yml
deleted file mode 100644
index 3a855c107..000000000
--- a/.github/workflows/pull-request-management.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-name: Build Pull Request Package
-
-on:
- pull_request:
- branches:
- - current
- - crux
- - equuleus
-
-jobs:
- j2lint:
- name: Validate j2 files
- runs-on: ubuntu-20.04
- steps:
- - uses: actions/checkout@v2
- timeout-minutes: 2
- - name: Setup J2Lint
- timeout-minutes: 2
- run: |
- sudo pip install git+https://github.com/aristanetworks/j2lint.git@341b5d5db86e095b622f09770cb6367a1583620e
- - name: Run J2lint
- timeout-minutes: 2
- run: |
- j2lint $GITHUB_WORKSPACE/data
diff --git a/.github/workflows/pull-request-message-check.yml b/.github/workflows/pull-request-message-check.yml
deleted file mode 100644
index 8c206a5ab..000000000
--- a/.github/workflows/pull-request-message-check.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-name: Check pull request message format
-
-on:
- pull_request:
- branches:
- - current
- - crux
- - equuleus
-
-jobs:
- check-pr-title:
- name: Check pull request title
- runs-on: ubuntu-20.04
- steps:
- - uses: actions/checkout@v2
- timeout-minutes: 2
- - name: Install the requests library
- run: pip3 install requests
- - name: Check the PR title
- timeout-minutes: 2
- run: |
- ./scripts/check-pr-title-and-commit-messages.py '${{ github.event.pull_request.url }}'
diff --git a/.github/workflows/repo-sync.yml b/.github/workflows/repo-sync.yml
new file mode 100644
index 000000000..36f323cdd
--- /dev/null
+++ b/.github/workflows/repo-sync.yml
@@ -0,0 +1,17 @@
+name: Repo-sync
+
+on:
+ pull_request_target:
+ types:
+ - closed
+ branches:
+ - current
+ workflow_dispatch:
+
+jobs:
+ trigger-sync:
+ uses: vyos/.github/.github/workflows/trigger-repo-sync.yml@feature/T6349-reusable-workflows
+ secrets:
+ REMOTE_REPO: ${{ secrets.REMOTE_REPO }}
+ REMOTE_OWNER: ${{ secrets.REMOTE_OWNER }}
+ PAT: ${{ secrets.PAT }}
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
deleted file mode 100644
index d21d151f7..000000000
--- a/.github/workflows/stale.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: "Issue and PR stale management"
-on:
- schedule:
- - cron: "0 0 * * *"
-
-jobs:
- stale:
- runs-on: ubuntu-latest
- if: github.repository == 'vyos/vyos-1x'
- steps:
- # Issue stale management
- - uses: actions/stale@v6
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- days-before-stale: 90
- days-before-close: -1
- stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. The issue will be reviewed by a maintainer and may be closed'
- stale-issue-label: 'state: stale'
- exempt-issue-labels: 'state: accepted, state: in-progress'
- stale-pr-message: 'This PR is stale because it has been open 30 days with no activity. The PR will be reviewed by a maintainer and may be closed'
- stale-pr-label: 'state: stale'
- exempt-pr-labels: 'state: accepted, state: in-progress'
diff --git a/.github/workflows/unused-imports.yml b/.github/workflows/unused-imports.yml
deleted file mode 100644
index da57bd270..000000000
--- a/.github/workflows/unused-imports.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Check for unused imports using Pylint
-on:
- pull_request_target:
- branches:
- - current
- - sagitta
-
-jobs:
- Check-Unused-Imports:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python
- uses: actions/setup-python@v3
- with:
- python-version: 3.11
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install pylint
- - name: Analysing the code with pylint
- run: make unused-imports
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 000000000..191394298
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1 @@
+* @vyos/reviewers \ No newline at end of file
diff --git a/Makefile b/Makefile
index 44a6e35ed..3b26273d6 100644
--- a/Makefile
+++ b/Makefile
@@ -66,6 +66,7 @@ op_mode_definitions: $(op_xml_obj)
ln -s ../node.tag $(OP_TMPL_DIR)/ping/node.tag/node.tag/
ln -s ../node.tag $(OP_TMPL_DIR)/traceroute/node.tag/node.tag/
ln -s ../node.tag $(OP_TMPL_DIR)/mtr/node.tag/node.tag/
+ ln -s ../node.tag $(OP_TMPL_DIR)/monitor/traceroute/node.tag/node.tag/
# XXX: test if there are empty node.def files - this is not allowed as these
# could mask help strings or mandatory priority statements
diff --git a/data/config.boot.default b/data/config.boot.default
new file mode 100644
index 000000000..93369d9b7
--- /dev/null
+++ b/data/config.boot.default
@@ -0,0 +1,53 @@
+interfaces {
+ loopback lo {
+ }
+}
+service {
+ ntp {
+ allow-client {
+ address "127.0.0.0/8"
+ address "169.254.0.0/16"
+ address "10.0.0.0/8"
+ address "172.16.0.0/12"
+ address "192.168.0.0/16"
+ address "::1/128"
+ address "fe80::/10"
+ address "fc00::/7"
+ }
+ server time1.vyos.net {
+ }
+ server time2.vyos.net {
+ }
+ server time3.vyos.net {
+ }
+ }
+}
+system {
+ config-management {
+ commit-revisions "100"
+ }
+ console {
+ device ttyS0 {
+ speed "115200"
+ }
+ }
+ host-name "vyos"
+ login {
+ user vyos {
+ authentication {
+ encrypted-password "$6$QxPS.uk6mfo$9QBSo8u1FkH16gMyAVhus6fU3LOzvLR9Z9.82m3tiHFAxTtIkhaZSWssSgzt4v4dGAL8rhVQxTg0oAG9/q11h/"
+ plaintext-password ""
+ }
+ }
+ }
+ syslog {
+ global {
+ facility all {
+ level "info"
+ }
+ facility local7 {
+ level "debug"
+ }
+ }
+ }
+}
diff --git a/data/configd-include.json b/data/configd-include.json
index 0c767f987..dcee50306 100644
--- a/data/configd-include.json
+++ b/data/configd-include.json
@@ -81,7 +81,6 @@
"service_sla.py",
"service_ssh.py",
"service_tftp-server.py",
-"service_upnp.py",
"service_webproxy.py",
"system_acceleration.py",
"system_config-management.py",
diff --git a/data/op-mode-standardized.json b/data/op-mode-standardized.json
index a4ed2bcf4..c14133127 100644
--- a/data/op-mode-standardized.json
+++ b/data/op-mode-standardized.json
@@ -3,12 +3,14 @@
"bgp.py",
"bonding.py",
"bridge.py",
+"cgnat.py",
"config_mgmt.py",
"conntrack.py",
"container.py",
"cpu.py",
"dhcp.py",
"dns.py",
+"evpn.py",
"interfaces.py",
"ipsec.py",
"lldp.py",
diff --git a/data/templates/accel-ppp/config_chap_secrets_radius.j2 b/data/templates/accel-ppp/config_chap_secrets_radius.j2
index 595e3a565..e343ce461 100644
--- a/data/templates/accel-ppp/config_chap_secrets_radius.j2
+++ b/data/templates/accel-ppp/config_chap_secrets_radius.j2
@@ -5,7 +5,20 @@ chap-secrets={{ chap_secrets_file }}
[radius]
verbose=1
{% for server, options in authentication.radius.server.items() if not options.disable is vyos_defined %}
-server={{ server }},{{ options.key }},auth-port={{ options.port }},acct-port={{ options.acct_port }},req-limit=0,fail-time={{ options.fail_time }}
+{% set _server_cfg = "server=" %}
+{% set _server_cfg = _server_cfg + server %}
+{% set _server_cfg = _server_cfg + "," + options.key %}
+{% set _server_cfg = _server_cfg + ",auth-port=" + options.port %}
+{% set _server_cfg = _server_cfg + ",acct-port=" + options.acct_port %}
+{% set _server_cfg = _server_cfg + ",req-limit=0" %}
+{% set _server_cfg = _server_cfg + ",fail-time=" + options.fail_time %}
+{% if options.priority is vyos_defined %}
+{% set _server_cfg = _server_cfg + ",weight=" + options.priority %}
+{% endif %}
+{% if options.backup is vyos_defined %}
+{% set _server_cfg = _server_cfg + ",backup" %}
+{% endif %}
+{{ _server_cfg }}
{% endfor %}
{% if authentication.radius.accounting_interim_interval is vyos_defined %}
acct-interim-interval={{ authentication.radius.accounting_interim_interval }}
diff --git a/data/templates/accel-ppp/ipoe.config.j2 b/data/templates/accel-ppp/ipoe.config.j2
index c89812985..d87b90473 100644
--- a/data/templates/accel-ppp/ipoe.config.j2
+++ b/data/templates/accel-ppp/ipoe.config.j2
@@ -29,7 +29,9 @@ max-starting={{ max_concurrent_sessions }}
[log]
syslog=accel-ipoe,daemon
copy=1
-level=5
+{% if log.level is vyos_defined %}
+level={{ log.level }}
+{% endif %}
[ipoe]
verbose=1
diff --git a/data/templates/accel-ppp/l2tp.config.j2 b/data/templates/accel-ppp/l2tp.config.j2
index 4ce9042c2..db4db66a7 100644
--- a/data/templates/accel-ppp/l2tp.config.j2
+++ b/data/templates/accel-ppp/l2tp.config.j2
@@ -28,7 +28,9 @@ max-starting={{ max_concurrent_sessions }}
[log]
syslog=accel-l2tp,daemon
copy=1
-level=5
+{% if log.level is vyos_defined %}
+level={{ log.level }}
+{% endif %}
[client-ip-range]
0.0.0.0/0
diff --git a/data/templates/accel-ppp/pppoe.config.j2 b/data/templates/accel-ppp/pppoe.config.j2
index 42bc8440c..6711f2ec9 100644
--- a/data/templates/accel-ppp/pppoe.config.j2
+++ b/data/templates/accel-ppp/pppoe.config.j2
@@ -27,7 +27,9 @@ thread-count={{ thread_count }}
[log]
syslog=accel-pppoe,daemon
copy=1
-level=5
+{% if log.level is vyos_defined %}
+level={{ log.level }}
+{% endif %}
{% if authentication.mode is vyos_defined("noauth") %}
[auth]
diff --git a/data/templates/accel-ppp/pptp.config.j2 b/data/templates/accel-ppp/pptp.config.j2
index a04bd40c0..44f35998b 100644
--- a/data/templates/accel-ppp/pptp.config.j2
+++ b/data/templates/accel-ppp/pptp.config.j2
@@ -28,7 +28,9 @@ max-starting={{ max_concurrent_sessions }}
[log]
syslog=accel-pptp,daemon
copy=1
-level=5
+{% if log.level is vyos_defined %}
+level={{ log.level }}
+{% endif %}
[client-ip-range]
0.0.0.0/0
diff --git a/data/templates/accel-ppp/sstp.config.j2 b/data/templates/accel-ppp/sstp.config.j2
index 22fb55506..38da829f3 100644
--- a/data/templates/accel-ppp/sstp.config.j2
+++ b/data/templates/accel-ppp/sstp.config.j2
@@ -29,7 +29,9 @@ max-starting={{ max_concurrent_sessions }}
[log]
syslog=accel-sstp,daemon
copy=1
-level=5
+{% if log.level is vyos_defined %}
+level={{ log.level }}
+{% endif %}
[client-ip-range]
0.0.0.0/0
diff --git a/data/templates/firewall/upnpd.conf.j2 b/data/templates/firewall/upnpd.conf.j2
deleted file mode 100644
index 616e8869f..000000000
--- a/data/templates/firewall/upnpd.conf.j2
+++ /dev/null
@@ -1,227 +0,0 @@
-# This is the UPNP configuration file
-
-# WAN network interface
-ext_ifname={{ wan_interface }}
-{% if wan_ip is vyos_defined %}
-
-# if the WAN network interface for IPv6 is different than for IPv4,
-# set ext_ifname6
-#ext_ifname6=eth2
-
-# If the WAN interface has several IP addresses, you
-# can specify the one to use below.
-# Setting ext_ip is also useful in double NAT setup, you can declare here
-# the public IP address.
-{% for addr in wan_ip %}
-ext_ip={{ addr }}
-{% endfor %}
-{% endif %}
-
-{% if stun is vyos_defined %}
-# WAN interface must have public IP address. Otherwise it is behind NAT
-# and port forwarding is impossible. In some cases WAN interface can be
-# behind unrestricted full-cone NAT 1:1 when all incoming traffic is NAT-ed and
-# routed to WAN interfaces without any filtering. In this cases miniupnpd
-# needs to know public IP address and it can be learnt by asking external
-# server via STUN protocol. Following option enable retrieving external
-# public IP address from STUN server and detection of NAT type. You need
-# to specify also external STUN server in stun_host option below.
-# This option is disabled by default.
-ext_perform_stun=yes
-# Specify STUN server, either hostname or IP address
-# Some public STUN servers:
-# stun.stunprotocol.org
-# stun.sipgate.net
-# stun.xten.com
-# stun.l.google.com (on non standard port 19302)
-ext_stun_host={{ stun.host }}
-# Specify STUN UDP port, by default it is standard port 3478.
-ext_stun_port={{ stun.port }}
-{% endif %}
-
-# LAN network interfaces IPs / networks
-{% if listen is vyos_defined %}
-# There can be multiple listening IPs for SSDP traffic, in that case
-# use multiple 'listening_ip=...' lines, one for each network interface.
-# It can be IP address or network interface name (ie. "eth0")
-# It is mandatory to use the network interface name in order to enable IPv6
-# HTTP is available on all interfaces.
-# When MULTIPLE_EXTERNAL_IP is enabled, the external IP
-# address associated with the subnet follows. For example:
-# listening_ip=192.168.0.1/24 88.22.44.13
-# When MULTIPLE_EXTERNAL_IP is disabled, you can list associated network
-# interfaces (for bridges)
-# listening_ip=bridge0 em0 wlan0
-{% for addr in listen %}
-{% if addr | is_ipv4 %}
-listening_ip={{ addr }}
-{% elif addr | is_ipv6 %}
-ipv6_listening_ip={{ addr }}
-{% else %}
-listening_ip={{ addr }}
-{% endif %}
-{% endfor %}
-{% endif %}
-
-# CAUTION: mixing up WAN and LAN interfaces may introduce security risks!
-# Be sure to assign the correct interfaces to LAN and WAN and consider
-# implementing UPnP permission rules at the bottom of this configuration file
-
-# Port for HTTP (descriptions and SOAP) traffic. Set to 0 for autoselect.
-#http_port=0
-# Port for HTTPS. Set to 0 for autoselect (default)
-#https_port=0
-
-# Path to the UNIX socket used to communicate with MiniSSDPd
-# If running, MiniSSDPd will manage M-SEARCH answering.
-# default is /var/run/minissdpd.sock
-#minissdpdsocket=/var/run/minissdpd.sock
-
-{% if nat_pmp is vyos_defined %}
-# Enable NAT-PMP support (default is no)
-enable_natpmp=yes
-{% endif %}
-
-# Enable UPNP support (default is yes)
-enable_upnp=yes
-
-{% if pcp_lifetime is vyos_defined %}
-# PCP
-# Configure the minimum and maximum lifetime of a port mapping in seconds
-# 120s and 86400s (24h) are suggested values from PCP-base
-{% if pcp_lifetime.max is vyos_defined %}
-max_lifetime={{ pcp_lifetime.max }}
-{% endif %}
-{% if pcp_lifetime.min is vyos_defined %}
-min_lifetime={{ pcp_lifetime.min }}
-{% endif %}
-{% endif %}
-
-# table names for netfilter nft. Default is "filter" for both
-#upnp_table_name=
-#upnp_nat_table_name=
-# chain names for netfilter and netfilter nft
-# netfilter : default are MINIUPNPD, MINIUPNPD, MINIUPNPD-POSTROUTING
-# netfilter nft : default are miniupnpd, prerouting_miniupnpd, postrouting_miniupnpd
-#upnp_forward_chain=forwardUPnP
-#upnp_nat_chain=UPnP
-#upnp_nat_postrouting_chain=UPnP-Postrouting
-
-# Lease file location
-lease_file=/config/upnp.leases
-
-# To enable the next few runtime options, see compile time
-# ENABLE_MANUFACTURER_INFO_CONFIGURATION (config.h)
-
-{% if friendly_name is vyos_defined %}
-# Name of this service, default is "`uname -s` router"
-friendly_name={{ friendly_name }}
-{% endif %}
-
-# Manufacturer name, default is "`uname -s`"
-manufacturer_name=VyOS
-
-# Manufacturer URL, default is URL of OS vendor
-manufacturer_url=https://vyos.io/
-
-# Model name, default is "`uname -s` router"
-model_name=VyOS Router Model
-
-# Model description, default is "`uname -s` router"
-model_description=Vyos open source enterprise router/firewall operating system
-
-# Model URL, default is URL of OS vendor
-model_url=https://vyos.io/
-
-# Bitrates reported by daemon in bits per second
-# by default miniupnpd tries to get WAN interface speed
-#bitrate_up=1000000
-#bitrate_down=10000000
-
-{% if secure_mode is vyos_defined %}
-# Secure Mode, UPnP clients can only add mappings to their own IP
-secure_mode=yes
-{% else %}
-# Secure Mode, UPnP clients can only add mappings to their own IP
-secure_mode=no
-{% endif %}
-
-{% if presentation_url is vyos_defined %}
-# Default presentation URL is HTTP address on port 80
-# If set to an empty string, no presentationURL element will appear
-# in the XML description of the device, which prevents MS Windows
-# from displaying an icon in the "Network Connections" panel.
-#presentation_url= {{ presentation_url }}
-{% endif %}
-
-# Report system uptime instead of daemon uptime
-system_uptime=yes
-
-# Notify interval in seconds. default is 30 seconds.
-#notify_interval=240
-notify_interval=60
-
-# Unused rules cleaning.
-# never remove any rule before this threshold for the number
-# of redirections is exceeded. default to 20
-clean_ruleset_threshold=10
-# Clean process work interval in seconds. default to 0 (disabled).
-# a 600 seconds (10 minutes) interval makes sense
-clean_ruleset_interval=600
-
-############################################################################
-## The next 5 config parameters (packet_log, anchor, queue, tag, quickrules)
-## are specific to BSD's pf(4) packet filter and hence cannot be enabled in
-## VyOS.
-# Log packets in pf (default is no)
-#packet_log=no
-
-# Anchor name in pf (default is miniupnpd)
-#anchor=miniupnpd
-
-# ALTQ queue in pf
-# Filter rules must be used for this to be used.
-# compile with PF_ENABLE_FILTER_RULES (see config.h file)
-#queue=queue_name1
-
-# Tag name in pf
-#tag=tag_name1
-
-# Make filter rules in pf quick or not. default is yes
-# active when compiled with PF_ENABLE_FILTER_RULES (see config.h file)
-#quickrules=no
-##
-## End of pf(4)-specific configuration not to be set in VyOS.
-############################################################################
-
-# UUID, generate your own UUID with "make genuuid"
-uuid={{ uuid }}
-
-# Daemon's serial and model number when reporting to clients
-# (in XML description)
-#serial=12345678
-#model_number=1
-
-# If compiled with IGD_V2 defined, force reporting IGDv1 in rootDesc (default
-# is no)
-#force_igd_desc_v1=no
-
-{% if rule is vyos_defined %}
-# UPnP permission rules (also enforced for NAT-PMP and PCP)
-# (allow|deny) (external port range) IP/mask (internal port range) (optional regex filter)
-# A port range is <min port>-<max port> or <port> if there is only
-# one port in the range.
-# IP/mask format must be nnn.nnn.nnn.nnn/nn
-# It is advised to only allow redirection of port >= 1024
-# and end the rule set with "deny 0-65535 0.0.0.0/0 0-65535"
-# The following default ruleset allows specific LAN side IP addresses
-# to request only ephemeral ports. It is recommended that users
-# modify the IP ranges to match their own internal networks, and
-# also consider implementing network-specific restrictions
-# CAUTION: failure to enforce any rules may permit insecure requests to be made!
-{% for rule, config in rule.items() %}
-{% if config.disable is not vyos_defined %}
-{{ config.action }} {{ config.external_port_range }} {{ config.ip }}{{ '/32' if '/' not in config.ip else '' }} {{ config.internal_port_range }}
-{% endif %}
-{% endfor %}
-{% endif %}
diff --git a/data/templates/frr/isisd.frr.j2 b/data/templates/frr/isisd.frr.j2
index 1e1cc3c27..eb14aade6 100644
--- a/data/templates/frr/isisd.frr.j2
+++ b/data/templates/frr/isisd.frr.j2
@@ -76,6 +76,9 @@ advertise-passive-only
{% if set_overload_bit is vyos_defined %}
set-overload-bit
{% endif %}
+{% if metric_style is vyos_defined %}
+ metric-style {{ metric_style }}
+{% endif %}
{% if domain_password.md5 is vyos_defined %}
domain-password md5 {{ domain_password.plaintext_password }}
{% elif domain_password.plaintext_password is vyos_defined %}
@@ -178,7 +181,7 @@ advertise-passive-only
{% for priority, priority_limit_options in fast_reroute.lfa.local.priority_limit.items() %}
{% for level in priority_limit_options %}
fast-reroute priority-limit {{ priority }} {{ level | replace('_', '-') }}
-{% endfor %}
+{% endfor %}
{% endfor %}
{% endif %}
{% if fast_reroute.lfa.local.tiebreaker is vyos_defined %}
@@ -233,6 +236,9 @@ fast-reroute remote-lfa prefix-list {{ prefix_list }}
{% endfor %}
{% endfor %}
{% endif %}
+{% if topology is vyos_defined %}
+topology {{ topology }}
+{% endif %}
{% if level is vyos_defined('level-2') %}
is-type level-2-only
{% elif level is vyos_defined %}
diff --git a/data/templates/ids/suricata.j2 b/data/templates/ids/suricata.j2
new file mode 100644
index 000000000..585db93eb
--- /dev/null
+++ b/data/templates/ids/suricata.j2
@@ -0,0 +1,1280 @@
+%YAML 1.1
+---
+
+# Suricata configuration file. In addition to the comments describing all
+# options in this file, full documentation can be found at:
+# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
+#
+# This configuration file generated by:
+# Suricata 6.0.10
+
+##
+## Step 1: Inform Suricata about your network
+##
+
+vars:
+ # more specific is better for alert accuracy and performance
+ address-groups:
+{% for (name, value) in suricata['address_group'] %}
+ {{ name }}: "[{{ value | join(',') }}]"
+{% endfor %}
+
+ port-groups:
+{% for (name, value) in suricata['port_group'] %}
+ {{ name }}: "[{{ value | join(',') }}]"
+{% endfor %}
+
+##
+## Step 2: Select outputs to enable
+##
+
+# The default logging directory. Any log or output file will be
+# placed here if it's not specified with a full path name. This can be
+# overridden with the -l command line parameter.
+default-log-dir: /var/log/suricata/
+
+# Configure the type of alert (and other) logging you would like.
+{% if suricata.log is vyos_defined %}
+outputs:
+{% if suricata.log.eve is vyos_defined %}
+ # Extensible Event Format (nicknamed EVE) event log in JSON format
+ - eve-log:
+ enabled: yes
+ filetype: {{ suricata.log.eve.filetype }} #regular|syslog|unix_dgram|unix_stream|redis
+ filename: {{ suricata.log.eve.filename }}
+
+ types:
+{% if suricata.log.eve.type is not vyos_defined or "alert" in suricata.log.eve.type %}
+ - alert:
+ tagged-packets: yes
+{% endif %}
+{% if "http" in suricata.log.eve.type %}
+ - http:
+ enabled: yes
+ extended: yes
+{% endif %}
+{% if "tls" in suricata.log.eve.type %}
+ - tls:
+ enabled: yes
+ extended: yes # enable this for extended logging information
+{% endif %}
+{% for protocol in suricata.log.eve.type %}
+{% if protocol not in ["alert","http","tls"] %}
+ - {{ protocol }}:
+ enabled: yes
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endif %}
+
+##
+## Step 3: Configure common capture settings
+##
+## See "Advanced Capture Options" below for more options, including Netmap
+## and PF_RING.
+##
+
+# Linux high speed capture support
+af-packet:
+{% for interface in suricata.interface %}
+ - interface: {{ interface }}
+ # Default clusterid. AF_PACKET will load balance packets based on flow.
+ cluster-id: 99
+ # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
+ # This is only supported for Linux kernel > 3.1
+ # possible value are:
+ # * cluster_flow: all packets of a given flow are sent to the same socket
+ # * cluster_cpu: all packets treated in kernel by a CPU are sent to the same socket
+ # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
+ # socket. Requires at least Linux 3.14.
+ # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
+ # more info.
+ # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
+ # with capture card using RSS (requires cpu affinity tuning and system IRQ tuning)
+ cluster-type: cluster_flow
+ # In some fragmentation cases, the hash can not be computed. If "defrag" is set
+ # to yes, the kernel will do the needed defragmentation before sending the packets.
+ defrag: yes
+{% endfor %}
+
+# Cross platform libpcap capture support
+pcap:
+{% for interface in suricata.interface %}
+ - interface: {{ interface }}
+{% endfor %}
+
+# Settings for reading pcap files
+pcap-file:
+ # Possible values are:
+ # - yes: checksum validation is forced
+ # - no: checksum validation is disabled
+ # - auto: Suricata uses a statistical approach to detect when
+ # checksum off-loading is used. (default)
+ # Warning: 'checksum-validation' must be set to yes to have checksum tested
+ checksum-checks: auto
+
+# See "Advanced Capture Options" below for more options, including Netmap
+# and PF_RING.
+
+
+##
+## Step 4: App Layer Protocol configuration
+##
+
+# Configure the app-layer parsers.
+#
+# The error-policy setting applies to all app-layer parsers. Values can be
+# "drop-flow", "pass-flow", "bypass", "drop-packet", "pass-packet", "reject" or
+# "ignore" (the default).
+#
+# The protocol's section details each protocol.
+#
+# The option "enabled" takes 3 values - "yes", "no", "detection-only".
+# "yes" enables both detection and the parser, "no" disables both, and
+# "detection-only" enables protocol detection only (parser disabled).
+app-layer:
+ # error-policy: ignore
+ protocols:
+ rfb:
+ enabled: yes
+ detection-ports:
+ dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
+ # MQTT, disabled by default.
+ mqtt:
+ enabled: yes
+ # max-msg-length: 1mb
+ # subscribe-topic-match-limit: 100
+ # unsubscribe-topic-match-limit: 100
+ # Maximum number of live MQTT transactions per flow
+ # max-tx: 4096
+ krb5:
+ enabled: yes
+ snmp:
+ enabled: yes
+ ikev2:
+ enabled: yes
+ tls:
+ enabled: yes
+ detection-ports:
+ dp: 443
+
+ # Generate JA3 fingerprint from client hello. If not specified it
+ # will be disabled by default, but enabled if rules require it.
+ #ja3-fingerprints: auto
+
+ # What to do when the encrypted communications start:
+ # - default: keep tracking TLS session, check for protocol anomalies,
+ # inspect tls_* keywords. Disables inspection of unmodified
+ # 'content' signatures.
+ # - bypass: stop processing this flow as much as possible. No further
+ # TLS parsing and inspection. Offload flow bypass to kernel
+ # or hardware if possible.
+ # - full: keep tracking and inspection as normal. Unmodified content
+ # keyword signatures are inspected as well.
+ #
+ # For best performance, select 'bypass'.
+ #
+ #encryption-handling: default
+
+ dcerpc:
+ enabled: yes
+ ftp:
+ enabled: yes
+ # memcap: 64mb
+ rdp:
+ enabled: yes
+ ssh:
+ enabled: yes
+ #hassh: yes
+ # HTTP2: Experimental HTTP 2 support. Disabled by default.
+ http2:
+ enabled: no
+ # use http keywords on HTTP2 traffic
+ http1-rules: no
+ smtp:
+ enabled: yes
+ raw-extraction: no
+ # Configure SMTP-MIME Decoder
+ mime:
+ # Decode MIME messages from SMTP transactions
+ # (may be resource intensive)
+ # This field supersedes all others because it turns the entire
+ # process on or off
+ decode-mime: yes
+
+ # Decode MIME entity bodies (ie. Base64, quoted-printable, etc.)
+ decode-base64: yes
+ decode-quoted-printable: yes
+
+ # Maximum bytes per header data value stored in the data structure
+ # (default is 2000)
+ header-value-depth: 2000
+
+ # Extract URLs and save in state data structure
+ extract-urls: yes
+ # Set to yes to compute the md5 of the mail body. You will then
+ # be able to journalize it.
+ body-md5: no
+ # Configure inspected-tracker for file_data keyword
+ inspected-tracker:
+ content-limit: 100000
+ content-inspect-min-size: 32768
+ content-inspect-window: 4096
+ imap:
+ enabled: detection-only
+ smb:
+ enabled: yes
+ detection-ports:
+ dp: 139, 445
+
+ # Stream reassembly size for SMB streams. By default track it completely.
+ #stream-depth: 0
+
+ nfs:
+ enabled: yes
+ tftp:
+ enabled: yes
+ dns:
+ tcp:
+ enabled: yes
+ detection-ports:
+ dp: 53
+ udp:
+ enabled: yes
+ detection-ports:
+ dp: 53
+ http:
+ enabled: yes
+ # memcap: Maximum memory capacity for HTTP
+ # Default is unlimited, values can be 64mb, e.g.
+
+ # default-config: Used when no server-config matches
+ # personality: List of personalities used by default
+ # request-body-limit: Limit reassembly of request body for inspection
+ # by http_client_body & pcre /P option.
+ # response-body-limit: Limit reassembly of response body for inspection
+ # by file_data, http_server_body & pcre /Q option.
+ #
+ # For advanced options, see the user guide
+
+
+ # server-config: List of server configurations to use if address matches
+ # address: List of IP addresses or networks for this block
+ # personality: List of personalities used by this block
+ #
+ # Then, all the fields from default-config can be overloaded
+ #
+ # Currently Available Personalities:
+ # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
+ # IIS_7_0, IIS_7_5, Apache_2
+ libhtp:
+ default-config:
+ personality: IDS
+
+ # Can be specified in kb, mb, gb. Just a number indicates
+ # it's in bytes.
+ request-body-limit: 100kb
+ response-body-limit: 100kb
+
+ # inspection limits
+ request-body-minimal-inspect-size: 32kb
+ request-body-inspect-window: 4kb
+ response-body-minimal-inspect-size: 40kb
+ response-body-inspect-window: 16kb
+
+ # response body decompression (0 disables)
+ response-body-decompress-layer-limit: 2
+
+ # auto will use http-body-inline mode in IPS mode, yes or no set it statically
+ http-body-inline: auto
+
+ # Decompress SWF files.
+ # Two types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
+ # compress-depth:
+ # Specifies the maximum amount of data to decompress,
+ # set 0 for unlimited.
+ # decompress-depth:
+ # Specifies the maximum amount of decompressed data to obtain,
+ # set 0 for unlimited.
+ swf-decompression:
+ enabled: yes
+ type: both
+ compress-depth: 100kb
+ decompress-depth: 100kb
+
+ # Use a random value for inspection sizes around the specified value.
+ # This lowers the risk of some evasion techniques but could lead
+ # to detection change between runs. It is set to 'yes' by default.
+ #randomize-inspection-sizes: yes
+ # If "randomize-inspection-sizes" is active, the value of various
+ # inspection size will be chosen from the [1 - range%, 1 + range%]
+ # range
+ # Default value of "randomize-inspection-range" is 10.
+ #randomize-inspection-range: 10
+
+ # decoding
+ double-decode-path: no
+ double-decode-query: no
+
+ # Can enable LZMA decompression
+ #lzma-enabled: false
+ # Memory limit usage for LZMA decompression dictionary
+ # Data is decompressed until dictionary reaches this size
+ #lzma-memlimit: 1mb
+ # Maximum decompressed size with a compression ratio
+ # above 2048 (only LZMA can reach this ratio, deflate cannot)
+ #compression-bomb-limit: 1mb
+ # Maximum time spent decompressing a single transaction in usec
+ #decompression-time-limit: 100000
+
+ server-config:
+
+ #- apache:
+ # address: [192.168.1.0/24, 127.0.0.0/8, "::1"]
+ # personality: Apache_2
+ # # Can be specified in kb, mb, gb. Just a number indicates
+ # # it's in bytes.
+ # request-body-limit: 4096
+ # response-body-limit: 4096
+ # double-decode-path: no
+ # double-decode-query: no
+
+ #- iis7:
+ # address:
+ # - 192.168.0.0/24
+ # - 192.168.10.0/24
+ # personality: IIS_7_0
+ # # Can be specified in kb, mb, gb. Just a number indicates
+ # # it's in bytes.
+ # request-body-limit: 4096
+ # response-body-limit: 4096
+ # double-decode-path: no
+ # double-decode-query: no
+
+ # Note: Modbus probe parser is minimalist due to the limited usage in the field.
+ # Only Modbus message length (greater than Modbus header length)
+ # and protocol ID (equal to 0) are checked in probing parser
+ # It is important to enable detection port and define Modbus port
+ # to avoid false positives
+ modbus:
+ # How many unanswered Modbus requests are considered a flood.
+ # If the limit is reached, the app-layer-event:modbus.flooded; will match.
+ #request-flood: 500
+
+ enabled: no
+ detection-ports:
+ dp: 502
+ # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it
+ # is recommended to keep the TCP connection opened with a remote device
+ # and not to open and close it for each MODBUS/TCP transaction. In that
+ # case, it is important to set the depth of the stream reassembling as
+ # unlimited (stream.reassembly.depth: 0)
+
+ # Stream reassembly size for modbus. By default track it completely.
+ stream-depth: 0
+
+ # DNP3
+ dnp3:
+ enabled: no
+ detection-ports:
+ dp: 20000
+
+ # SCADA EtherNet/IP and CIP protocol support
+ enip:
+ enabled: no
+ detection-ports:
+ dp: 44818
+ sp: 44818
+
+ ntp:
+ enabled: yes
+
+ dhcp:
+ enabled: yes
+
+ sip:
+ enabled: yes
+
+# Limit for the maximum number of asn1 frames to decode (default 256)
+asn1-max-frames: 256
+
+# Datasets default settings
+# datasets:
+# # Default fallback memcap and hashsize values for datasets in case these
+# # were not explicitly defined.
+# defaults:
+# memcap: 100mb
+# hashsize: 2048
+
+##############################################################################
+##
+## Advanced settings below
+##
+##############################################################################
+
+##
+## Run Options
+##
+
+# Run Suricata with a specific user-id and group-id:
+#run-as:
+# user: suri
+# group: suri
+
+# Some logging modules will use that name in event as identifier. The default
+# value is the hostname
+#sensor-name: suricata
+
+# Default location of the pid file. The pid file is only used in
+# daemon mode (start Suricata with -D). If not running in daemon mode
+# the --pidfile command line option must be used to create a pid file.
+#pid-file: /var/run/suricata.pid
+
+# Daemon working directory
+# Suricata will change directory to this one if provided
+# Default: "/"
+#daemon-directory: "/"
+
+# Umask.
+# Suricata will use this umask if it is provided. By default it will use the
+# umask passed on by the shell.
+#umask: 022
+
+# Suricata core dump configuration. Limits the size of the core dump file to
+# approximately max-dump. The actual core dump size will be a multiple of the
+# page size. Core dumps that would be larger than max-dump are truncated. On
+# Linux, the actual core dump size may be a few pages larger than max-dump.
+# Setting max-dump to 0 disables core dumping.
+# Setting max-dump to 'unlimited' will give the full core dump file.
+# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size
+# to be 'unlimited'.
+
+coredump:
+ max-dump: unlimited
+
+# If the Suricata box is a router for the sniffed networks, set it to 'router'. If
+# it is a pure sniffing setup, set it to 'sniffer-only'.
+# If set to auto, the variable is internally switched to 'router' in IPS mode
+# and 'sniffer-only' in IDS mode.
+# This feature is currently only used by the reject* keywords.
+host-mode: auto
+
+# Number of packets preallocated per thread. The default is 1024. A higher number
+# will make sure each CPU will be more easily kept busy, but may negatively
+# impact caching.
+#max-pending-packets: 1024
+
+# Runmode the engine should use. Please check --list-runmodes to get the available
+# runmodes for each packet acquisition method. Default depends on selected capture
+# method. 'workers' generally gives best performance.
+#runmode: autofp
+
+# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
+#
+# Supported schedulers are:
+#
+# hash - Flow assigned to threads using the 5-7 tuple hash.
+# ippair - Flow assigned to threads using addresses only.
+#
+#autofp-scheduler: hash
+
+# Preallocated size for each packet. Default is 1514 which is the classical
+# size for pcap on Ethernet. You should adjust this value to the highest
+# packet size (MTU + hardware header) on your system.
+#default-packet-size: 1514
+
+# Unix command socket that can be used to pass commands to Suricata.
+# An external tool can then connect to get information from Suricata
+# or trigger some modifications of the engine. Set enabled to yes
+# to activate the feature. In auto mode, the feature will only be
+# activated in live capture mode. You can use the filename variable to set
+# the file name of the socket.
+unix-command:
+ enabled: yes
+ filename: /run/suricata/suricata.socket
+
+# Magic file. The extension .mgc is added to the value here.
+#magic-file: /usr/share/file/magic
+#magic-file:
+
+# GeoIP2 database file. Specify path and filename of GeoIP2 database
+# if using rules with "geoip" rule option.
+#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
+
+legacy:
+ uricontent: enabled
+
+##
+## Detection settings
+##
+
+# Set the order of alerts based on actions
+# The default order is pass, drop, reject, alert
+# action-order:
+# - pass
+# - drop
+# - reject
+# - alert
+
+# Define maximum number of possible alerts that can be triggered for the same
+# packet. Default is 15
+#packet-alert-max: 15
+
+# IP Reputation
+#reputation-categories-file: /etc/suricata/iprep/categories.txt
+#default-reputation-path: /etc/suricata/iprep
+#reputation-files:
+# - reputation.list
+
+# When run with the option --engine-analysis, the engine will read each of
+# the parameters below, and print reports for each of the enabled sections
+# and exit. The reports are printed to a file in the default log dir
+# given by the parameter "default-log-dir", with engine reporting
+# subsection below printing reports in its own report file.
+engine-analysis:
+ # enables printing reports for fast-pattern for every rule.
+ rules-fast-pattern: yes
+ # enables printing reports for each rule
+ rules: yes
+
+#recursion and match limits for PCRE where supported
+pcre:
+ match-limit: 3500
+ match-limit-recursion: 1500
+
+##
+## Advanced Traffic Tracking and Reconstruction Settings
+##
+
+# Host specific policies for defragmentation and TCP stream
+# reassembly. The host OS lookup is done using a radix tree, just
+# like a routing table so the most specific entry matches.
+host-os-policy:
+ # Make the default policy windows.
+ windows: [0.0.0.0/0]
+ bsd: []
+ bsd-right: []
+ old-linux: []
+ linux: []
+ old-solaris: []
+ solaris: []
+ hpux10: []
+ hpux11: []
+ irix: []
+ macos: []
+ vista: []
+ windows2k3: []
+
+# Defrag settings:
+
+# The memcap-policy value can be "drop-flow", "pass-flow", "bypass",
+# "drop-packet", "pass-packet", "reject" or "ignore" (which is the default).
+defrag:
+ memcap: 32mb
+ # memcap-policy: ignore
+ hash-size: 65536
+ trackers: 65535 # number of defragmented flows to follow
+ max-frags: 65535 # number of fragments to keep (higher than trackers)
+ prealloc: yes
+ timeout: 60
+
+# Enable defrag per host settings
+# host-config:
+#
+# - dmz:
+# timeout: 30
+# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"]
+#
+# - lan:
+# timeout: 45
+# address:
+# - 192.168.0.0/24
+# - 192.168.10.0/24
+# - 172.16.14.0/24
+
+# Flow settings:
+# By default, the reserved memory (memcap) for flows is 32MB. This is the limit
+# for flow allocation inside the engine. You can change this value to allow
+# more memory usage for flows.
+# The hash-size determines the size of the hash used to identify flows inside
+# the engine, and by default the value is 65536.
+# At startup, the engine can preallocate a number of flows, to get better
+# performance. The number of flows preallocated is 10000 by default.
+# emergency-recovery is the percentage of flows that the engine needs to
+# prune before clearing the emergency state. The emergency state is activated
+# when the memcap limit is reached, allowing new flows to be created, but
+# pruning them with the emergency timeouts (they are defined below).
+# If the memcap is reached, the engine will try to prune flows
+# with the default timeouts. If it doesn't find a flow to prune, it will set
+# the emergency bit and it will try again with more aggressive timeouts.
+# If that doesn't work, then it will try to kill the oldest flows using
+# last time seen flows.
+# The memcap can be specified in kb, mb, gb. Just a number indicates it's
+# in bytes.
+# The memcap-policy can be "drop-flow", "pass-flow", "bypass", "drop-packet",
+# "pass-packet", "reject" or "ignore" (which is the default).
+
+flow:
+ memcap: 128mb
+ #memcap-policy: ignore
+ hash-size: 65536
+ prealloc: 10000
+ emergency-recovery: 30
+ #managers: 1 # default to one flow manager
+ #recyclers: 1 # default to one flow recycler thread
+
+# This option controls the use of VLAN ids in the flow (and defrag)
+# hashing. Normally this should be enabled, but in some (broken)
+# setups where both sides of a flow are not tagged with the same VLAN
+# tag, we can ignore the VLAN id's in the flow hashing.
+vlan:
+ use-for-tracking: true
+
+# Specific timeouts for flows. Here you can specify the timeouts that the
+# active flows will wait to transit from the current state to another, on each
+# protocol. The value of "new" determines the seconds to wait after a handshake or
+# stream startup before the engine frees the data of that flow it doesn't
+# change the state to established (usually if we don't receive more packets
+# of that flow). The value of "established" is the amount of
+# seconds that the engine will wait to free the flow if that time elapses
+# without receiving new packets or closing the connection. "closed" is the
+# amount of time to wait after a flow is closed (usually zero). "bypassed"
+# timeout controls locally bypassed flows. For these flows we don't do any other
+# tracking. If no packets have been seen after this timeout, the flow is discarded.
+#
+# There's an emergency mode that will become active under attack circumstances,
+# making the engine to check flow status faster. This configuration variables
+# use the prefix "emergency-" and work similar as the normal ones.
+# Some timeouts doesn't apply to all the protocols, like "closed", for udp and
+# icmp.
+
+flow-timeouts:
+
+ default:
+ new: 30
+ established: 300
+ closed: 0
+ bypassed: 100
+ emergency-new: 10
+ emergency-established: 100
+ emergency-closed: 0
+ emergency-bypassed: 50
+ tcp:
+ new: 60
+ established: 600
+ closed: 60
+ bypassed: 100
+ emergency-new: 5
+ emergency-established: 100
+ emergency-closed: 10
+ emergency-bypassed: 50
+ udp:
+ new: 30
+ established: 300
+ bypassed: 100
+ emergency-new: 10
+ emergency-established: 100
+ emergency-bypassed: 50
+ icmp:
+ new: 30
+ established: 300
+ bypassed: 100
+ emergency-new: 10
+ emergency-established: 100
+ emergency-bypassed: 50
+
+# Stream engine settings. Here the TCP stream tracking and reassembly
+# engine is configured.
+#
+# stream:
+# memcap: 64mb # Can be specified in kb, mb, gb. Just a
+# # number indicates it's in bytes.
+# memcap-policy: ignore # Can be "drop-flow", "pass-flow", "bypass",
+# # "drop-packet", "pass-packet", "reject" or
+# # "ignore" default is "ignore"
+# checksum-validation: yes # To validate the checksum of received
+# # packet. If csum validation is specified as
+# # "yes", then packets with invalid csum values will not
+# # be processed by the engine stream/app layer.
+# # Warning: locally generated traffic can be
+# # generated without checksum due to hardware offload
+# # of checksum. You can control the handling of checksum
+# # on a per-interface basis via the 'checksum-checks'
+# # option
+# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread
+# midstream: false # don't allow midstream session pickups
+# midstream-policy: ignore # Can be "drop-flow", "pass-flow", "bypass",
+# # "drop-packet", "pass-packet", "reject" or
+# # "ignore" default is "ignore"
+# async-oneside: false # don't enable async stream handling
+# inline: no # stream inline mode
+# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
+# max-synack-queued: 5 # Max different SYN/ACKs to queue
+# bypass: no # Bypass packets when stream.reassembly.depth is reached.
+# # Warning: first side to reach this triggers
+# # the bypass.
+#
+# reassembly:
+# memcap: 256mb # Can be specified in kb, mb, gb. Just a number
+# # indicates it's in bytes.
+# memcap-policy: ignore # Can be "drop-flow", "pass-flow", "bypass",
+# # "drop-packet", "pass-packet", "reject" or
+# # "ignore" default is "ignore"
+# depth: 1mb # Can be specified in kb, mb, gb. Just a number
+# # indicates it's in bytes.
+# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least
+# # this size. Can be specified in kb, mb,
+# # gb. Just a number indicates it's in bytes.
+# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least
+# # this size. Can be specified in kb, mb,
+# # gb. Just a number indicates it's in bytes.
+# randomize-chunk-size: yes # Take a random value for chunk size around the specified value.
+# # This lowers the risk of some evasion techniques but could lead
+# # to detection change between runs. It is set to 'yes' by default.
+# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is
+# # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size
+# # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same
+# # calculation for toclient-chunk-size.
+# # Default value of randomize-chunk-range is 10.
+#
+# raw: yes # 'Raw' reassembly enabled or disabled.
+# # raw is for content inspection by detection
+# # engine.
+#
+# segment-prealloc: 2048 # number of segments preallocated per thread
+#
+# check-overlap-different-data: true|false
+# # check if a segment contains different data
+# # than what we've already seen for that
+# # position in the stream.
+# # This is enabled automatically if inline mode
+# # is used or when stream-event:reassembly_overlap_different_data;
+# # is used in a rule.
+#
+stream:
+ memcap: 64mb
+ #memcap-policy: ignore
+ checksum-validation: yes # reject incorrect csums
+ #midstream: false
+ #midstream-policy: ignore
+ inline: auto # auto will use inline mode in IPS mode, yes or no set it statically
+ reassembly:
+ memcap: 256mb
+ #memcap-policy: ignore
+ depth: 1mb # reassemble 1mb into a stream
+ toserver-chunk-size: 2560
+ toclient-chunk-size: 2560
+ randomize-chunk-size: yes
+ #randomize-chunk-range: 10
+ #raw: yes
+ #segment-prealloc: 2048
+ #check-overlap-different-data: true
+
+# Host table:
+#
+# Host table is used by the tagging and per host thresholding subsystems.
+#
+host:
+ hash-size: 4096
+ prealloc: 1000
+ memcap: 32mb
+
+# IP Pair table:
+#
+# Used by xbits 'ippair' tracking.
+#
+#ippair:
+# hash-size: 4096
+# prealloc: 1000
+# memcap: 32mb
+
+# Decoder settings
+
+decoder:
+ # Teredo decoder is known to not be completely accurate
+ # as it will sometimes detect non-teredo as teredo.
+ teredo:
+ enabled: true
+ # ports to look for Teredo. Max 4 ports. If no ports are given, or
+ # the value is set to 'any', Teredo detection runs on _all_ UDP packets.
+ ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
+
+ # VXLAN decoder is assigned to up to 4 UDP ports. By default only the
+ # IANA assigned port 4789 is enabled.
+ vxlan:
+ enabled: true
+ ports: $VXLAN_PORTS # syntax: '[8472, 4789]' or '4789'.
+
+ # VNTag decode support
+ vntag:
+ enabled: false
+
+ # Geneve decoder is assigned to up to 4 UDP ports. By default only the
+ # IANA assigned port 6081 is enabled.
+ geneve:
+ enabled: true
+ ports: $GENEVE_PORTS # syntax: '[6081, 1234]' or '6081'.
+
+ # maximum number of decoder layers for a packet
+ # max-layers: 16
+
+##
+## Performance tuning and profiling
+##
+
+# The detection engine builds internal groups of signatures. The engine
+# allows us to specify the profile to use for them, to manage memory in an
+# efficient way keeping good performance. For the profile keyword you
+# can use the words "low", "medium", "high" or "custom". If you use custom,
+# make sure to define the values in the "custom-values" section.
+# Usually you would prefer medium/high/low.
+#
+# "sgh mpm-context", indicates how the staging should allot mpm contexts for
+# the signature groups. "single" indicates the use of a single context for
+# all the signature group heads. "full" indicates a mpm-context for each
+# group head. "auto" lets the engine decide the distribution of contexts
+# based on the information the engine gathers on the patterns from each
+# group head.
+#
+# The option inspection-recursion-limit is used to limit the recursive calls
+# in the content inspection code. For certain payload-sig combinations, we
+# might end up taking too much time in the content inspection code.
+# If the argument specified is 0, the engine uses an internally defined
+# default limit. When a value is not specified, there are no limits on the recursion.
+detect:
+ profile: medium
+ custom-values:
+ toclient-groups: 3
+ toserver-groups: 25
+ sgh-mpm-context: auto
+ inspection-recursion-limit: 3000
+ # If set to yes, the loading of signatures will be made after the capture
+ # is started. This will limit the downtime in IPS mode.
+ #delayed-detect: yes
+
+ prefilter:
+ # default prefiltering setting. "mpm" only creates MPM/fast_pattern
+ # engines. "auto" also sets up prefilter engines for other keywords.
+ # Use --list-keywords=all to see which keywords support prefiltering.
+ default: mpm
+
+ # the grouping values above control how many groups are created per
+ # direction. Port whitelisting forces that port to get its own group.
+ # Very common ports will benefit, as well as ports with many expensive
+ # rules.
+ grouping:
+ #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080
+ #udp-whitelist: 53, 135, 5060
+
+ profiling:
+ # Log the rules that made it past the prefilter stage, per packet
+ # default is off. The threshold setting determines how many rules
+ # must have made it past pre-filter for that rule to trigger the
+ # logging.
+ #inspect-logging-threshold: 200
+ grouping:
+ dump-to-disk: false
+ include-rules: false # very verbose
+ include-mpm-stats: false
+
+# Select the multi pattern algorithm you want to run for scan/search the
+# in the engine.
+#
+# The supported algorithms are:
+# "ac" - Aho-Corasick, default implementation
+# "ac-bs" - Aho-Corasick, reduced memory implementation
+# "ac-ks" - Aho-Corasick, "Ken Steele" variant
+# "hs" - Hyperscan, available when built with Hyperscan support
+#
+# The default mpm-algo value of "auto" will use "hs" if Hyperscan is
+# available, "ac" otherwise.
+#
+# The mpm you choose also decides the distribution of mpm contexts for
+# signature groups, specified by the conf - "detect.sgh-mpm-context".
+# Selecting "ac" as the mpm would require "detect.sgh-mpm-context"
+# to be set to "single", because of ac's memory requirements, unless the
+# ruleset is small enough to fit in memory, in which case one can
+# use "full" with "ac". The rest of the mpms can be run in "full" mode.
+
+mpm-algo: auto
+
+# Select the matching algorithm you want to use for single-pattern searches.
+#
+# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only
+# available if Suricata has been built with Hyperscan support).
+#
+# The default of "auto" will use "hs" if available, otherwise "bm".
+
+spm-algo: auto
+
+# Suricata is multi-threaded. Here the threading can be influenced.
+threading:
+ set-cpu-affinity: no
+ # Tune cpu affinity of threads. Each family of threads can be bound
+ # to specific CPUs.
+ #
+ # These 2 apply to the all runmodes:
+ # management-cpu-set is used for flow timeout handling, counters
+ # worker-cpu-set is used for 'worker' threads
+ #
+ # Additionally, for autofp these apply:
+ # receive-cpu-set is used for capture threads
+ # verdict-cpu-set is used for IPS verdict threads
+ #
+ cpu-affinity:
+ - management-cpu-set:
+ cpu: [ 0 ] # include only these CPUs in affinity settings
+ - receive-cpu-set:
+ cpu: [ 0 ] # include only these CPUs in affinity settings
+ - worker-cpu-set:
+ cpu: [ "all" ]
+ mode: "exclusive"
+ # Use explicitly 3 threads and don't compute number by using
+ # detect-thread-ratio variable:
+ # threads: 3
+ prio:
+ low: [ 0 ]
+ medium: [ "1-2" ]
+ high: [ 3 ]
+ default: "medium"
+ #- verdict-cpu-set:
+ # cpu: [ 0 ]
+ # prio:
+ # default: "high"
+ #
+ # By default Suricata creates one "detect" thread per available CPU/CPU core.
+ # This setting allows controlling this behaviour. A ratio setting of 2 will
+ # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
+ # will result in 4 detect threads. If values below 1 are used, less threads
+ # are created. So on a dual core CPU a setting of 0.5 results in 1 detect
+ # thread being created. Regardless of the setting at a minimum 1 detect
+ # thread will always be created.
+ #
+ detect-thread-ratio: 1.0
+ #
+ # By default, the per-thread stack size is left to its default setting. If
+ # the default thread stack size is too small, use the following configuration
+ # setting to change the size. Note that if any thread's stack size cannot be
+ # set to this value, a fatal error occurs.
+ #
+ # Generally, the per-thread stack-size should not exceed 8MB.
+ #stack-size: 8mb
+
+# Luajit has a strange memory requirement, its 'states' need to be in the
+# first 2G of the process' memory.
+#
+# 'luajit.states' is used to control how many states are preallocated.
+# State use: per detect script: 1 per detect thread. Per output script: 1 per
+# script.
+luajit:
+ states: 128
+
+# Profiling settings. Only effective if Suricata has been built with
+# the --enable-profiling configure flag.
+#
+profiling:
+ # Run profiling for every X-th packet. The default is 1, which means we
+ # profile every packet. If set to 1000, one packet is profiled for every
+ # 1000 received.
+ #sample-rate: 1000
+
+ # rule profiling
+ rules:
+
+ # Profiling can be disabled here, but it will still have a
+ # performance impact if compiled in.
+ enabled: yes
+ filename: rule_perf.log
+ append: yes
+
+ # Sort options: ticks, avgticks, checks, matches, maxticks
+ # If commented out all the sort options will be used.
+ #sort: avgticks
+
+ # Limit the number of sids for which stats are shown at exit (per sort).
+ limit: 10
+
+ # output to json
+ json: yes
+
+ # per keyword profiling
+ keywords:
+ enabled: yes
+ filename: keyword_perf.log
+ append: yes
+
+ prefilter:
+ enabled: yes
+ filename: prefilter_perf.log
+ append: yes
+
+ # per rulegroup profiling
+ rulegroups:
+ enabled: yes
+ filename: rule_group_perf.log
+ append: yes
+
+ # packet profiling
+ packets:
+
+ # Profiling can be disabled here, but it will still have a
+ # performance impact if compiled in.
+ enabled: yes
+ filename: packet_stats.log
+ append: yes
+
+ # per packet csv output
+ csv:
+
+ # Output can be disabled here, but it will still have a
+ # performance impact if compiled in.
+ enabled: no
+ filename: packet_stats.csv
+
+ # profiling of locking. Only available when Suricata was built with
+ # --enable-profiling-locks.
+ locks:
+ enabled: no
+ filename: lock_stats.log
+ append: yes
+
+ pcap-log:
+ enabled: no
+ filename: pcaplog_stats.log
+ append: yes
+
+##
+## Netfilter integration
+##
+
+# When running in NFQ inline mode, it is possible to use a simulated
+# non-terminal NFQUEUE verdict.
+# This permits sending all needed packet to Suricata via this rule:
+# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
+# And below, you can have your standard filtering ruleset. To activate
+# this mode, you need to set mode to 'repeat'
+# If you want a packet to be sent to another queue after an ACCEPT decision
+# set the mode to 'route' and set next-queue value.
+# On Linux >= 3.1, you can set batchcount to a value > 1 to improve performance
+# by processing several packets before sending a verdict (worker runmode only).
+# On Linux >= 3.6, you can set the fail-open option to yes to have the kernel
+# accept the packet if Suricata is not able to keep pace.
+# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
+# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
+# on packet of a flow that need to be bypassed. The Nefilter ruleset has to
+# directly accept all packets of a flow once a packet has been marked.
+nfq:
+# mode: accept
+# repeat-mark: 1
+# repeat-mask: 1
+# bypass-mark: 1
+# bypass-mask: 1
+# route-queue: 2
+# batchcount: 20
+# fail-open: yes
+
+#nflog support
+nflog:
+ # netlink multicast group
+ # (the same as the iptables --nflog-group param)
+ # Group 0 is used by the kernel, so you can't use it
+ - group: 2
+ # netlink buffer size
+ buffer-size: 18432
+ # put default value here
+ - group: default
+ # set number of packets to queue inside kernel
+ qthreshold: 1
+ # set the delay before flushing packet in the kernel's queue
+ qtimeout: 100
+ # netlink max buffer size
+ max-size: 20000
+
+##
+## Advanced Capture Options
+##
+
+# General settings affecting packet capture
+capture:
+ # disable NIC offloading. It's restored when Suricata exits.
+ # Enabled by default.
+ #disable-offloading: false
+ #
+ # disable checksum validation. Same as setting '-k none' on the
+ # commandline.
+ #checksum-validation: none
+
+# Netmap support
+#
+# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which has
+# built-in Netmap support or compile and install the Netmap module and appropriate
+# NIC driver for your Linux system.
+# To reach maximum throughput disable all receive-, segmentation-,
+# checksum- offloading on your NIC (using ethtool or similar).
+# Disabling TX checksum offloading is *required* for connecting OS endpoint
+# with NIC endpoint.
+# You can find more information at https://github.com/luigirizzo/netmap
+#
+netmap:
+ - interface: default
+
+# PF_RING configuration: for use with native PF_RING support
+# for more info see http://www.ntop.org/products/pf_ring/
+pfring:
+ - interface: default
+ #threads: 2
+
+# For FreeBSD ipfw(8) divert(4) support.
+# Please make sure you have ipfw_load="YES" and ipdivert_load="YES"
+# in /etc/loader.conf or kldload'ing the appropriate kernel modules.
+# Additionally, you need to have an ipfw rule for the engine to see
+# the packets from ipfw. For Example:
+#
+# ipfw add 100 divert 8000 ip from any to any
+#
+# N.B. This example uses "8000" -- this number must mach the values
+# you passed on the command line, i.e., -d 8000
+#
+ipfw:
+
+ # Reinject packets at the specified ipfw rule number. This config
+ # option is the ipfw rule number AT WHICH rule processing continues
+ # in the ipfw processing system after the engine has finished
+ # inspecting the packet for acceptance. If no rule number is specified,
+ # accepted packets are reinjected at the divert rule which they entered
+ # and IPFW rule processing continues. No check is done to verify
+ # this will rule makes sense so care must be taken to avoid loops in ipfw.
+ #
+ ## The following example tells the engine to reinject packets
+ # back into the ipfw firewall AT rule number 5500:
+ #
+ # ipfw-reinjection-rule-number: 5500
+
+
+napatech:
+ # When use_all_streams is set to "yes" the initialization code will query
+ # the Napatech service for all configured streams and listen on all of them.
+ # When set to "no" the streams config array will be used.
+ #
+ # This option necessitates running the appropriate NTPL commands to create
+ # the desired streams prior to running Suricata.
+ #use-all-streams: no
+
+ # The streams to listen on when auto-config is disabled or when and threading
+ # cpu-affinity is disabled. This can be either:
+ # an individual stream (e.g. streams: [0])
+ # or
+ # a range of streams (e.g. streams: ["0-3"])
+ #
+ streams: ["0-3"]
+
+ # Stream stats can be enabled to provide fine grain packet and byte counters
+ # for each thread/stream that is configured.
+ #
+ enable-stream-stats: no
+
+ # When auto-config is enabled the streams will be created and assigned
+ # automatically to the NUMA node where the thread resides. If cpu-affinity
+ # is enabled in the threading section. Then the streams will be created
+ # according to the number of worker threads specified in the worker-cpu-set.
+ # Otherwise, the streams array is used to define the streams.
+ #
+ # This option is intended primarily to support legacy configurations.
+ #
+ # This option cannot be used simultaneously with either "use-all-streams"
+ # or "hardware-bypass".
+ #
+ auto-config: yes
+
+ # Enable hardware level flow bypass.
+ #
+ hardware-bypass: yes
+
+ # Enable inline operation. When enabled traffic arriving on a given port is
+ # automatically forwarded out its peer port after analysis by Suricata.
+ #
+ inline: no
+
+ # Ports indicates which Napatech ports are to be used in auto-config mode.
+ # these are the port IDs of the ports that will be merged prior to the
+ # traffic being distributed to the streams.
+ #
+ # When hardware-bypass is enabled the ports must be configured as a segment.
+ # specify the port(s) on which upstream and downstream traffic will arrive.
+ # This information is necessary for the hardware to properly process flows.
+ #
+ # When using a tap configuration one of the ports will receive inbound traffic
+ # for the network and the other will receive outbound traffic. The two ports on a
+ # given segment must reside on the same network adapter.
+ #
+ # When using a SPAN-port configuration the upstream and downstream traffic
+ # arrives on a single port. This is configured by setting the two sides of the
+ # segment to reference the same port. (e.g. 0-0 to configure a SPAN port on
+ # port 0).
+ #
+ # port segments are specified in the form:
+ # ports: [0-1,2-3,4-5,6-6,7-7]
+ #
+ # For legacy systems when hardware-bypass is disabled this can be specified in any
+ # of the following ways:
+ #
+ # a list of individual ports (e.g. ports: [0,1,2,3])
+ #
+ # a range of ports (e.g. ports: [0-3])
+ #
+ # "all" to indicate that all ports are to be merged together
+ # (e.g. ports: [all])
+ #
+ # This parameter has no effect if auto-config is disabled.
+ #
+ ports: [0-1,2-3]
+
+ # When auto-config is enabled the hashmode specifies the algorithm for
+ # determining to which stream a given packet is to be delivered.
+ # This can be any valid Napatech NTPL hashmode command.
+ #
+ # The most common hashmode commands are: hash2tuple, hash2tuplesorted,
+ # hash5tuple, hash5tuplesorted and roundrobin.
+ #
+ # See Napatech NTPL documentation other hashmodes and details on their use.
+ #
+ # This parameter has no effect if auto-config is disabled.
+ #
+ hashmode: hash5tuplesorted
+
+##
+## Configure Suricata to load Suricata-Update managed rules.
+##
+
+# As VyOS leverages suricata-update, the default rule path points to the
+# generated rules instead of the built-in rules.
+#
+# default-rule-path: /etc/suricata/rules
+default-rule-path: /var/lib/suricata/rules
+
+rule-files:
+ - suricata.rules
+
+##
+## Auxiliary configuration files.
+##
+
+# As VyOS leverages suricata-update, the classification file points to the
+# generated classification instead of the built-in one.
+#
+# classification-file: /etc/suricata/classification.config
+classification-file: /var/lib/suricata/rules/classification.config
+reference-config-file: /etc/suricata/reference.config
+# threshold-file: /etc/suricata/threshold.config
+
+##
+## Include other configs
+##
+
+# Includes: Files included here will be handled as if they were in-lined
+# in this configuration file. Files with relative pathnames will be
+# searched for in the same directory as this configuration file. You may
+# use absolute pathnames too.
+# You can specify more than 2 configuration files, if needed.
+#include: include1.yaml
+#include: include2.yaml
diff --git a/data/templates/ids/suricata_logrotate.j2 b/data/templates/ids/suricata_logrotate.j2
new file mode 100644
index 000000000..62773fc68
--- /dev/null
+++ b/data/templates/ids/suricata_logrotate.j2
@@ -0,0 +1,17 @@
+{% for filename in [(log.eve.filename | default("eve.json"))] %}
+{{ filename if filename.startswith("/") else ("/var/log/suricata/" + filename) }}
+{% endfor %}{
+ weekly
+ dateext
+ dateformat _%Y-%m-%d_%H-%M-%S
+ maxsize 10M
+ rotate 10
+ missingok
+ nocompress
+ nocreate
+ nomail
+ sharedscripts
+ postrotate
+ /bin/kill -HUP `cat /run/suricata/suricata.pid 2>/dev/null` 2>/dev/null || true
+ endscript
+}
diff --git a/data/templates/ipsec/ios_profile.j2 b/data/templates/ipsec/ios_profile.j2
index eb74924b8..a9ae1c7a9 100644
--- a/data/templates/ipsec/ios_profile.j2
+++ b/data/templates/ipsec/ios_profile.j2
@@ -83,12 +83,15 @@
</dict>
</dict>
</dict>
+{% if certs is vyos_defined %}
<!-- This payload is optional but it provides an easy way to install the CA certificate together with the configuration -->
+{% for cert in certs %}
+ <!-- Payload for: {{ cert.ca_cn }} -->
<dict>
<key>PayloadIdentifier</key>
- <string>org.example.ca</string>
+ <string>org.{{ cert.ca_cn | lower | replace(' ', '.') | replace('_', '.') }}</string>
<key>PayloadUUID</key>
- <string>{{ '' | get_uuid }}</string>
+ <string>{{ cert.ca_cn | generate_uuid4 }}</string>
<key>PayloadType</key>
<string>com.apple.security.root</string>
<key>PayloadVersion</key>
@@ -96,9 +99,11 @@
<!-- This is the Base64 (PEM) encoded CA certificate -->
<key>PayloadContent</key>
<data>
- {{ ca_cert }}
+ {{ cert.ca_cert }}
</data>
</dict>
+{% endfor %}
+{% endif %}
</array>
</dict>
</plist>
diff --git a/data/templates/load-balancing/haproxy.cfg.j2 b/data/templates/load-balancing/haproxy.cfg.j2
index 7917c8257..c6027e09b 100644
--- a/data/templates/load-balancing/haproxy.cfg.j2
+++ b/data/templates/load-balancing/haproxy.cfg.j2
@@ -62,7 +62,7 @@ frontend {{ front }}
bind {{ address | bracketize_ipv6 }}:{{ front_config.port }} {{ ssl_directive }} {{ ssl_front | join(' ') }}
{% endfor %}
{% else %}
- bind :::{{ front_config.port }} v4v6 {{ ssl_directive }} {{ ssl_front | join(' ') }}
+ bind [::]:{{ front_config.port }} v4v6 {{ ssl_directive }} {{ ssl_front | join(' ') }}
{% endif %}
{% if front_config.redirect_http_to_https is vyos_defined %}
http-request redirect scheme https unless { ssl_fc }
@@ -81,6 +81,11 @@ frontend {{ front }}
{% endif %}
{% endfor %}
{% endif %}
+{% if front_config.http_response_headers is vyos_defined %}
+{% for header, header_config in front_config.http_response_headers.items() %}
+ http-response set-header {{ header }} '{{ header_config['value'] }}'
+{% endfor %}
+{% endif %}
{% endif %}
{% if front_config.rule is vyos_defined %}
{% for rule, rule_config in front_config.rule.items() %}
@@ -126,6 +131,13 @@ frontend {{ front }}
{% if backend is vyos_defined %}
{% for back, back_config in backend.items() %}
backend {{ back }}
+{% if back_config.health_check is vyos_defined %}
+{% if back_config.health_check == 'smtp' %}
+ option smtpchk
+{% else %}
+ option {{ back_config.health_check }}-check
+{% endif %}
+{% endif %}
{% if back_config.http_check is vyos_defined %}
option httpchk
{% endif %}
@@ -158,6 +170,11 @@ backend {{ back }}
{% endif %}
{% if back_config.mode is vyos_defined %}
mode {{ back_config.mode }}
+{% if back_config.http_response_headers is vyos_defined %}
+{% for header, header_config in back_config.http_response_headers.items() %}
+ http-response set-header {{ header }} '{{ header_config['value'] }}'
+{% endfor %}
+{% endif %}
{% endif %}
{% if back_config.rule is vyos_defined %}
{% for rule, rule_config in back_config.rule.items() %}
diff --git a/data/templates/login/default_motd.j2 b/data/templates/login/default_motd.j2
index 543c6f8e0..0d52092f8 100644
--- a/data/templates/login/default_motd.j2
+++ b/data/templates/login/default_motd.j2
@@ -4,6 +4,9 @@ Welcome to VyOS!
. VyOS {{ version_data.version }}
└ ──┘ {{ version_data.release_train }}
+{% if version_data.lts_build %}
+ * Support portal: {{ version_data.support_url }}
+{% endif %}
* Documentation: {{ version_data.documentation_url }}
* Project news: {{ version_data.project_news_url }}
* Bug reports: {{ version_data.bugtracker_url }}
diff --git a/debian/control b/debian/control
index 594e9e8d8..2e99bdc28 100644
--- a/debian/control
+++ b/debian/control
@@ -164,6 +164,8 @@ Depends:
# End "service dns dynamic"
# # For "service ids"
fastnetmon [amd64],
+ suricata,
+ suricata-update,
# End "service ids"
# # For "service ndp-proxy"
ndppd,
@@ -196,9 +198,6 @@ Depends:
snmp,
snmpd,
# End "service snmp"
-# For "service upnp"
- miniupnpd-nftables,
-# End "service upnp"
# For "service webproxy"
squid,
squidclient,
diff --git a/debian/rules b/debian/rules
index d007089a4..9da40465f 100755
--- a/debian/rules
+++ b/debian/rules
@@ -11,6 +11,7 @@ VYOS_MIBS_DIR := usr/share/snmp/mibs
VYOS_LOCALUI_DIR := srv/localui
MIGRATION_SCRIPTS_DIR := opt/vyatta/etc/config-migrate/migrate
+ACTIVATION_SCRIPTS_DIR := usr/libexec/vyos/activate
SYSTEM_SCRIPTS_DIR := usr/libexec/vyos/system
SERVICES_DIR := usr/libexec/vyos/services
@@ -67,6 +68,10 @@ override_dh_auto_install:
mkdir -p $(DIR)/$(MIGRATION_SCRIPTS_DIR)
cp -r src/migration-scripts/* $(DIR)/$(MIGRATION_SCRIPTS_DIR)
+ # Install activation scripts
+ mkdir -p $(DIR)/$(ACTIVATION_SCRIPTS_DIR)
+ cp -r src/activation-scripts/* $(DIR)/$(ACTIVATION_SCRIPTS_DIR)
+
# Install system scripts
mkdir -p $(DIR)/$(SYSTEM_SCRIPTS_DIR)
cp -r src/system/* $(DIR)/$(SYSTEM_SCRIPTS_DIR)
diff --git a/debian/vyos-1x.install b/debian/vyos-1x.install
index 9e43669be..b3978d38a 100644
--- a/debian/vyos-1x.install
+++ b/debian/vyos-1x.install
@@ -28,6 +28,7 @@ usr/bin/vyos-config-to-commands
usr/bin/vyos-config-to-json
usr/bin/vyos-hostsd-client
usr/lib
+usr/libexec/vyos/activate
usr/libexec/vyos/completion
usr/libexec/vyos/conf_mode
usr/libexec/vyos/init
diff --git a/interface-definitions/container.xml.in b/interface-definitions/container.xml.in
index e7dacea36..1ad7215e5 100644
--- a/interface-definitions/container.xml.in
+++ b/interface-definitions/container.xml.in
@@ -15,9 +15,15 @@
<constraintErrorMessage>Container name must be alphanumeric and can contain hyphens</constraintErrorMessage>
</properties>
<children>
+ <leafNode name="allow-host-pid">
+ <properties>
+ <help>Allow sharing host process namespace with container</help>
+ <valueless/>
+ </properties>
+ </leafNode>
<leafNode name="allow-host-networks">
<properties>
- <help>Allow host networks in container</help>
+ <help>Allow sharing host networking with container</help>
<valueless/>
</properties>
</leafNode>
@@ -186,6 +192,24 @@
</leafNode>
</children>
</tagNode>
+ <leafNode name="cpu-quota">
+ <properties>
+ <help>This limits the number of CPU resources the container can use</help>
+ <valueHelp>
+ <format>u32:0</format>
+ <description>Unlimited</description>
+ </valueHelp>
+ <valueHelp>
+ <format>txt</format>
+ <description>Amount of CPU time the container can use in amount of cores (up to three decimals)</description>
+ </valueHelp>
+ <constraint>
+ <regex>(0|[1-9]\d*)(\.\d{1,3})?</regex>
+ </constraint>
+ <constraintErrorMessage>Container CPU limit must be a (decimal) number in range 0 to number of threads</constraintErrorMessage>
+ </properties>
+ <defaultValue>0</defaultValue>
+ </leafNode>
<leafNode name="memory">
<properties>
<help>Memory (RAM) available to this container</help>
diff --git a/interface-definitions/include/accel-ppp/log.xml.i b/interface-definitions/include/accel-ppp/log.xml.i
new file mode 100644
index 000000000..96ce93ff9
--- /dev/null
+++ b/interface-definitions/include/accel-ppp/log.xml.i
@@ -0,0 +1,42 @@
+<!-- include start from accel-ppp/log.xml.i -->
+<node name="log">
+ <properties>
+ <help>Server logging </help>
+ </properties>
+ <children>
+ <leafNode name="level">
+ <properties>
+ <help>Specifies log level</help>
+ <valueHelp>
+ <format>0</format>
+ <description>Turn off logging</description>
+ </valueHelp>
+ <valueHelp>
+ <format>1</format>
+ <description>Log only error messages</description>
+ </valueHelp>
+ <valueHelp>
+ <format>2</format>
+ <description>Log error and warning messages</description>
+ </valueHelp>
+ <valueHelp>
+ <format>3</format>
+ <description>Log error, warning and minimum information messages</description>
+ </valueHelp>
+ <valueHelp>
+ <format>4</format>
+ <description>Log error, warning and full information messages</description>
+ </valueHelp>
+ <valueHelp>
+ <format>5</format>
+ <description>Log all messages including debug messages</description>
+ </valueHelp>
+ <constraint>
+ <validator name="numeric" argument="--range 0-5"/>
+ </constraint>
+ </properties>
+ <defaultValue>3</defaultValue>
+ </leafNode>
+ </children>
+</node>
+<!-- include end -->
diff --git a/interface-definitions/include/accel-ppp/radius-additions.xml.i b/interface-definitions/include/accel-ppp/radius-additions.xml.i
index 3c2eb09eb..5222ba864 100644
--- a/interface-definitions/include/accel-ppp/radius-additions.xml.i
+++ b/interface-definitions/include/accel-ppp/radius-additions.xml.i
@@ -57,6 +57,13 @@
</properties>
<defaultValue>0</defaultValue>
</leafNode>
+ #include <include/radius-priority.xml.i>
+ <leafNode name="backup">
+ <properties>
+ <help>Use backup server if other servers are not available</help>
+ <valueless/>
+ </properties>
+ </leafNode>
</children>
</tagNode>
<leafNode name="timeout">
diff --git a/interface-definitions/include/haproxy/http-response-headers.xml.i b/interface-definitions/include/haproxy/http-response-headers.xml.i
new file mode 100644
index 000000000..9e7ddfd28
--- /dev/null
+++ b/interface-definitions/include/haproxy/http-response-headers.xml.i
@@ -0,0 +1,29 @@
+<!-- include start from haproxy/http-response-headers.xml.i -->
+<tagNode name="http-response-headers">
+ <properties>
+ <help>Headers to include in HTTP response</help>
+ <valueHelp>
+ <format>txt</format>
+ <description>HTTP header name</description>
+ </valueHelp>
+ <constraint>
+ <regex>[-a-zA-Z]+</regex>
+ </constraint>
+ <constraintErrorMessage>Header names must only include alphabetical characters and hyphens</constraintErrorMessage>
+ </properties>
+ <children>
+ <leafNode name="value">
+ <properties>
+ <help>HTTP header value</help>
+ <valueHelp>
+ <format>txt</format>
+ <description>HTTP header value</description>
+ </valueHelp>
+ <constraint>
+ <regex>[[:ascii:]]{1,256}</regex>
+ </constraint>
+ </properties>
+ </leafNode>
+ </children>
+</tagNode>
+<!-- include end -->
diff --git a/interface-definitions/include/haproxy/timeout.xml.i b/interface-definitions/include/haproxy/timeout.xml.i
index 250b35683..79e7303b1 100644
--- a/interface-definitions/include/haproxy/timeout.xml.i
+++ b/interface-definitions/include/haproxy/timeout.xml.i
@@ -1,7 +1,7 @@
<!-- include start from haproxy/timeout.xml.i -->
<node name="timeout">
<properties>
- <help>Tiemout options</help>
+ <help>Timeout options</help>
</properties>
<children>
<leafNode name="check">
diff --git a/interface-definitions/include/isis/protocol-common-config.xml.i b/interface-definitions/include/isis/protocol-common-config.xml.i
index 404f03cb5..0e79ca5f2 100644
--- a/interface-definitions/include/isis/protocol-common-config.xml.i
+++ b/interface-definitions/include/isis/protocol-common-config.xml.i
@@ -165,6 +165,41 @@
</properties>
</leafNode>
#include <include/isis/ldp-sync-protocol.xml.i>
+<leafNode name="topology">
+ <properties>
+ <help>Configure IS-IS topologies</help>
+ <completionHelp>
+ <list>ipv4-multicast ipv4-mgmt ipv6-unicast ipv6-multicast ipv6-mgmt ipv6-dstsrc</list>
+ </completionHelp>
+ <valueHelp>
+ <format>ipv4-multicast</format>
+ <description>Use IPv4 multicast topology</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv4-mgmt</format>
+ <description>Use IPv4 management topology</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv6-unicast</format>
+ <description>Use IPv6 unicast topology</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv6-multicast</format>
+ <description>Use IPv6 multicast topology</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv6-mgmt</format>
+ <description>Use IPv6 management topology</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv6-dstsrc</format>
+ <description>Use IPv6 dst-src topology</description>
+ </valueHelp>
+ <constraint>
+ <regex>(ipv4-multicast|ipv4-mgmt|ipv6-unicast|ipv6-multicast|ipv6-mgmt|ipv6-dstsrc)</regex>
+ </constraint>
+ </properties>
+</leafNode>
<node name="fast-reroute">
<properties>
<help>IS-IS fast reroute configuration</help>
diff --git a/interface-definitions/include/nat-translation-options.xml.i b/interface-definitions/include/nat-translation-options.xml.i
index 6b95de045..c8900590f 100644
--- a/interface-definitions/include/nat-translation-options.xml.i
+++ b/interface-definitions/include/nat-translation-options.xml.i
@@ -28,22 +28,18 @@
<properties>
<help>Port mapping options</help>
<completionHelp>
- <list>random fully-random none</list>
+ <list>random none</list>
</completionHelp>
<valueHelp>
<format>random</format>
<description>Randomize source port mapping</description>
</valueHelp>
<valueHelp>
- <format>fully-random</format>
- <description>Full port randomization</description>
- </valueHelp>
- <valueHelp>
<format>none</format>
<description>Do not apply port randomization</description>
</valueHelp>
<constraint>
- <regex>(random|fully-random|none)</regex>
+ <regex>(random|none)</regex>
</constraint>
</properties>
<defaultValue>none</defaultValue>
diff --git a/interface-definitions/include/qos/class-match-group.xml.i b/interface-definitions/include/qos/class-match-group.xml.i
new file mode 100644
index 000000000..40e3b7259
--- /dev/null
+++ b/interface-definitions/include/qos/class-match-group.xml.i
@@ -0,0 +1,15 @@
+<!-- include start from qos/class-match-group.xml.i -->
+<leafNode name="match-group">
+ <properties>
+ <help>Filter group for QoS policy</help>
+ <valueHelp>
+ <format>txt</format>
+ <description>Match group name</description>
+ </valueHelp>
+ <completionHelp>
+ <script>${vyos_completion_dir}/qos/list_traffic_match_group.py</script>
+ </completionHelp>
+ <multi/>
+ </properties>
+</leafNode>
+<!-- include end -->
diff --git a/interface-definitions/include/qos/class-match-ipv4.xml.i b/interface-definitions/include/qos/class-match-ipv4.xml.i
new file mode 100644
index 000000000..dc44d32d5
--- /dev/null
+++ b/interface-definitions/include/qos/class-match-ipv4.xml.i
@@ -0,0 +1,31 @@
+<!-- include start from qos/class-match-ipv4.xml.i -->
+<node name="ip">
+ <properties>
+ <help>Match IP protocol header</help>
+ </properties>
+ <children>
+ <node name="destination">
+ <properties>
+ <help>Match on destination port or address</help>
+ </properties>
+ <children>
+ #include <include/qos/class-match-ipv4-address.xml.i>
+ #include <include/port-number.xml.i>
+ </children>
+ </node>
+ #include <include/qos/match-dscp.xml.i>
+ #include <include/qos/max-length.xml.i>
+ #include <include/ip-protocol.xml.i>
+ <node name="source">
+ <properties>
+ <help>Match on source port or address</help>
+ </properties>
+ <children>
+ #include <include/qos/class-match-ipv4-address.xml.i>
+ #include <include/port-number.xml.i>
+ </children>
+ </node>
+ #include <include/qos/tcp-flags.xml.i>
+ </children>
+</node>
+<!-- include end -->
diff --git a/interface-definitions/include/qos/class-match-ipv6.xml.i b/interface-definitions/include/qos/class-match-ipv6.xml.i
new file mode 100644
index 000000000..ed7aceff9
--- /dev/null
+++ b/interface-definitions/include/qos/class-match-ipv6.xml.i
@@ -0,0 +1,31 @@
+<!-- include start from qos/class-match-ipv6.xml.i -->
+<node name="ipv6">
+ <properties>
+ <help>Match IPv6 protocol header</help>
+ </properties>
+ <children>
+ <node name="destination">
+ <properties>
+ <help>Match on destination port or address</help>
+ </properties>
+ <children>
+ #include <include/qos/class-match-ipv6-address.xml.i>
+ #include <include/port-number.xml.i>
+ </children>
+ </node>
+ #include <include/qos/match-dscp.xml.i>
+ #include <include/qos/max-length.xml.i>
+ #include <include/ip-protocol.xml.i>
+ <node name="source">
+ <properties>
+ <help>Match on source port or address</help>
+ </properties>
+ <children>
+ #include <include/qos/class-match-ipv6-address.xml.i>
+ #include <include/port-number.xml.i>
+ </children>
+ </node>
+ #include <include/qos/tcp-flags.xml.i>
+ </children>
+</node>
+<!-- include end -->
diff --git a/interface-definitions/include/qos/class-match-mark.xml.i b/interface-definitions/include/qos/class-match-mark.xml.i
new file mode 100644
index 000000000..a7481c6aa
--- /dev/null
+++ b/interface-definitions/include/qos/class-match-mark.xml.i
@@ -0,0 +1,14 @@
+<!-- include start from qos/class-match-mark.xml.i -->
+<leafNode name="mark">
+ <properties>
+ <help>Match on mark applied by firewall</help>
+ <valueHelp>
+ <format>u32</format>
+ <description>FW mark to match</description>
+ </valueHelp>
+ <constraint>
+ <validator name="numeric" argument="--range 0-4294967295"/>
+ </constraint>
+ </properties>
+</leafNode>
+<!-- include end -->
diff --git a/interface-definitions/include/qos/class-match-vif.xml.i b/interface-definitions/include/qos/class-match-vif.xml.i
new file mode 100644
index 000000000..ec58db606
--- /dev/null
+++ b/interface-definitions/include/qos/class-match-vif.xml.i
@@ -0,0 +1,15 @@
+<!-- include start from qos/class-match-vif.xml.i -->
+<leafNode name="vif">
+ <properties>
+ <help>Virtual Local Area Network (VLAN) ID for this match</help>
+ <valueHelp>
+ <format>u32:0-4095</format>
+ <description>Virtual Local Area Network (VLAN) tag </description>
+ </valueHelp>
+ <constraint>
+ <validator name="numeric" argument="--range 0-4095"/>
+ </constraint>
+ <constraintErrorMessage>VLAN ID must be between 0 and 4095</constraintErrorMessage>
+ </properties>
+</leafNode>
+<!-- include end -->
diff --git a/interface-definitions/include/qos/class-match.xml.i b/interface-definitions/include/qos/class-match.xml.i
index 4ba12f8f7..77d1933a3 100644
--- a/interface-definitions/include/qos/class-match.xml.i
+++ b/interface-definitions/include/qos/class-match.xml.i
@@ -5,7 +5,7 @@
<constraint>
<regex>[^-].*</regex>
</constraint>
- <constraintErrorMessage>Match queue name cannot start with hyphen (-)</constraintErrorMessage>
+ <constraintErrorMessage>Match queue name cannot start with hyphen</constraintErrorMessage>
</properties>
<children>
#include <include/generic-description.xml.i>
@@ -89,89 +89,10 @@
</children>
</node>
#include <include/generic-interface.xml.i>
- <node name="ip">
- <properties>
- <help>Match IP protocol header</help>
- </properties>
- <children>
- <node name="destination">
- <properties>
- <help>Match on destination port or address</help>
- </properties>
- <children>
- #include <include/qos/class-match-ipv4-address.xml.i>
- #include <include/port-number.xml.i>
- </children>
- </node>
- #include <include/qos/match-dscp.xml.i>
- #include <include/qos/max-length.xml.i>
- #include <include/ip-protocol.xml.i>
- <node name="source">
- <properties>
- <help>Match on source port or address</help>
- </properties>
- <children>
- #include <include/qos/class-match-ipv4-address.xml.i>
- #include <include/port-number.xml.i>
- </children>
- </node>
- #include <include/qos/tcp-flags.xml.i>
- </children>
- </node>
- <node name="ipv6">
- <properties>
- <help>Match IPv6 protocol header</help>
- </properties>
- <children>
- <node name="destination">
- <properties>
- <help>Match on destination port or address</help>
- </properties>
- <children>
- #include <include/qos/class-match-ipv6-address.xml.i>
- #include <include/port-number.xml.i>
- </children>
- </node>
- #include <include/qos/match-dscp.xml.i>
- #include <include/qos/max-length.xml.i>
- #include <include/ip-protocol.xml.i>
- <node name="source">
- <properties>
- <help>Match on source port or address</help>
- </properties>
- <children>
- #include <include/qos/class-match-ipv6-address.xml.i>
- #include <include/port-number.xml.i>
- </children>
- </node>
- #include <include/qos/tcp-flags.xml.i>
- </children>
- </node>
- <leafNode name="mark">
- <properties>
- <help>Match on mark applied by firewall</help>
- <valueHelp>
- <format>u32</format>
- <description>FW mark to match</description>
- </valueHelp>
- <constraint>
- <validator name="numeric" argument="--range 0-4294967295"/>
- </constraint>
- </properties>
- </leafNode>
- <leafNode name="vif">
- <properties>
- <help>Virtual Local Area Network (VLAN) ID for this match</help>
- <valueHelp>
- <format>u32:0-4095</format>
- <description>Virtual Local Area Network (VLAN) tag </description>
- </valueHelp>
- <constraint>
- <validator name="numeric" argument="--range 0-4095"/>
- </constraint>
- <constraintErrorMessage>VLAN ID must be between 0 and 4095</constraintErrorMessage>
- </properties>
- </leafNode>
+ #include <include/qos/class-match-ipv4.xml.i>
+ #include <include/qos/class-match-ipv6.xml.i>
+ #include <include/qos/class-match-mark.xml.i>
+ #include <include/qos/class-match-vif.xml.i>
</children>
</tagNode>
<!-- include end -->
diff --git a/interface-definitions/include/radius-priority.xml.i b/interface-definitions/include/radius-priority.xml.i
new file mode 100644
index 000000000..f77f5016e
--- /dev/null
+++ b/interface-definitions/include/radius-priority.xml.i
@@ -0,0 +1,14 @@
+<!-- include start from radius-priority.xml.i -->
+<leafNode name="priority">
+ <properties>
+ <help>Server priority</help>
+ <valueHelp>
+ <format>u32:1-255</format>
+ <description>Server priority</description>
+ </valueHelp>
+ <constraint>
+ <validator name="numeric" argument="--range 1-255"/>
+ </constraint>
+ </properties>
+</leafNode>
+<!-- include end -->
diff --git a/interface-definitions/include/version/nat-version.xml.i b/interface-definitions/include/version/nat-version.xml.i
index 656da6e14..173e91ed3 100644
--- a/interface-definitions/include/version/nat-version.xml.i
+++ b/interface-definitions/include/version/nat-version.xml.i
@@ -1,3 +1,3 @@
<!-- include start from include/version/nat-version.xml.i -->
-<syntaxVersion component='nat' version='7'></syntaxVersion>
+<syntaxVersion component='nat' version='8'></syntaxVersion>
<!-- include end -->
diff --git a/interface-definitions/include/version/reverseproxy-version.xml.i b/interface-definitions/include/version/reverseproxy-version.xml.i
new file mode 100644
index 000000000..907ea1e5e
--- /dev/null
+++ b/interface-definitions/include/version/reverseproxy-version.xml.i
@@ -0,0 +1,3 @@
+<!-- include start from include/version/reverseproxy-version.xml.i -->
+<syntaxVersion component='reverse-proxy' version='1'></syntaxVersion>
+<!-- include end -->
diff --git a/interface-definitions/load-balancing_reverse-proxy.xml.in b/interface-definitions/load-balancing_reverse-proxy.xml.in
index 6a3b3cef1..ce757a5d6 100644
--- a/interface-definitions/load-balancing_reverse-proxy.xml.in
+++ b/interface-definitions/load-balancing_reverse-proxy.xml.in
@@ -39,6 +39,7 @@
#include <include/port-number.xml.i>
#include <include/haproxy/rule-frontend.xml.i>
#include <include/haproxy/tcp-request.xml.i>
+ #include <include/haproxy/http-response-headers.xml.i>
<leafNode name="redirect-http-to-https">
<properties>
<help>Redirect HTTP to HTTPS</help>
@@ -90,19 +91,7 @@
</leafNode>
#include <include/generic-description.xml.i>
#include <include/haproxy/mode.xml.i>
- <node name="parameters">
- <properties>
- <help>Backend parameters</help>
- </properties>
- <children>
- <leafNode name="http-check">
- <properties>
- <help>HTTP health check</help>
- <valueless/>
- </properties>
- </leafNode>
- </children>
- </node>
+ #include <include/haproxy/http-response-headers.xml.i>
<node name="http-check">
<properties>
<help>HTTP check configuration</help>
@@ -162,6 +151,37 @@
</node>
</children>
</node>
+ <leafNode name="health-check">
+ <properties>
+ <help>Non HTTP health check options</help>
+ <completionHelp>
+ <list>ldap mysql pgsql redis smtp</list>
+ </completionHelp>
+ <valueHelp>
+ <format>ldap</format>
+ <description>LDAP protocol check</description>
+ </valueHelp>
+ <valueHelp>
+ <format>mysql</format>
+ <description>MySQL protocol check</description>
+ </valueHelp>
+ <valueHelp>
+ <format>pgsql</format>
+ <description>PostgreSQL protocol check</description>
+ </valueHelp>
+ <valueHelp>
+ <format>redis</format>
+ <description>Redis protocol check</description>
+ </valueHelp>
+ <valueHelp>
+ <format>smtp</format>
+ <description>SMTP protocol check</description>
+ </valueHelp>
+ <constraint>
+ <regex>(ldap|mysql|redis|pgsql|smtp)</regex>
+ </constraint>
+ </properties>
+ </leafNode>
#include <include/haproxy/rule-backend.xml.i>
<tagNode name="server">
<properties>
diff --git a/interface-definitions/nat.xml.in b/interface-definitions/nat.xml.in
index 0a639bd80..73a748137 100644
--- a/interface-definitions/nat.xml.in
+++ b/interface-definitions/nat.xml.in
@@ -141,6 +141,7 @@
</children>
</node>
#include <include/inbound-interface.xml.i>
+ #include <include/firewall/log.xml.i>
<node name="translation">
<properties>
<help>Translation address or prefix</help>
diff --git a/interface-definitions/nat_cgnat.xml.in b/interface-definitions/nat_cgnat.xml.in
index caa26b4d9..fce5e655d 100644
--- a/interface-definitions/nat_cgnat.xml.in
+++ b/interface-definitions/nat_cgnat.xml.in
@@ -123,6 +123,7 @@
<validator name="ipv4-host"/>
<validator name="ipv4-range"/>
</constraint>
+ <multi/>
</properties>
</leafNode>
</children>
diff --git a/interface-definitions/policy.xml.in b/interface-definitions/policy.xml.in
index 791fa1d87..eb907cb9e 100644
--- a/interface-definitions/policy.xml.in
+++ b/interface-definitions/policy.xml.in
@@ -1546,11 +1546,11 @@
<properties>
<help>Set prefixes to table</help>
<valueHelp>
- <format>u32:1-200</format>
+ <format>u32:1-4294967295</format>
<description>Table value</description>
</valueHelp>
<constraint>
- <validator name="numeric" argument="--range 1-200"/>
+ <validator name="numeric" argument="--range 1-4294967295"/>
</constraint>
</properties>
</leafNode>
diff --git a/interface-definitions/qos.xml.in b/interface-definitions/qos.xml.in
index 8f9ae3fa6..927594c11 100644
--- a/interface-definitions/qos.xml.in
+++ b/interface-definitions/qos.xml.in
@@ -281,6 +281,7 @@
#include <include/qos/mtu.xml.i>
#include <include/qos/class-police-exceed.xml.i>
#include <include/qos/class-match.xml.i>
+ #include <include/qos/class-match-group.xml.i>
#include <include/qos/class-priority.xml.i>
<leafNode name="priority">
<defaultValue>20</defaultValue>
@@ -415,6 +416,7 @@
#include <include/qos/flows.xml.i>
#include <include/qos/interval.xml.i>
#include <include/qos/class-match.xml.i>
+ #include <include/qos/class-match-group.xml.i>
#include <include/qos/queue-limit-1-4294967295.xml.i>
#include <include/qos/queue-type.xml.i>
<leafNode name="queue-type">
@@ -542,6 +544,8 @@
#include <include/qos/flows.xml.i>
#include <include/qos/interval.xml.i>
#include <include/qos/class-match.xml.i>
+ #include <include/qos/class-match-group.xml.i>
+
<leafNode name="quantum">
<properties>
<help>Packet scheduling quantum</help>
@@ -645,6 +649,7 @@
#include <include/qos/flows.xml.i>
#include <include/qos/interval.xml.i>
#include <include/qos/class-match.xml.i>
+ #include <include/qos/class-match-group.xml.i>
#include <include/qos/class-priority.xml.i>
#include <include/qos/queue-average-packet.xml.i>
#include <include/qos/queue-maximum-threshold.xml.i>
@@ -767,6 +772,7 @@
</children>
</node>
#include <include/qos/class-match.xml.i>
+ #include <include/qos/class-match-group.xml.i>
<node name="realtime">
<properties>
<help>Realtime class settings</help>
@@ -830,6 +836,39 @@
</tagNode>
</children>
</node>
+ <tagNode name="traffic-match-group">
+ <properties>
+ <help>Filter group for QoS policy</help>
+ <valueHelp>
+ <format>txt</format>
+ <description>Match group name</description>
+ </valueHelp>
+ <constraint>
+ <regex>[^-].*</regex>
+ </constraint>
+ <constraintErrorMessage>Match group name cannot start with hyphen</constraintErrorMessage>
+ </properties>
+ <children>
+ #include <include/generic-description.xml.i>
+ <tagNode name="match">
+ <properties>
+ <help>Class matching rule name</help>
+ <constraint>
+ <regex>[^-].*</regex>
+ </constraint>
+ <constraintErrorMessage>Match queue name cannot start with hyphen</constraintErrorMessage>
+ </properties>
+ <children>
+ #include <include/generic-description.xml.i>
+ #include <include/qos/class-match-ipv4.xml.i>
+ #include <include/qos/class-match-ipv6.xml.i>
+ #include <include/qos/class-match-mark.xml.i>
+ #include <include/qos/class-match-vif.xml.i>
+ </children>
+ </tagNode>
+ #include <include/qos/class-match-group.xml.i>
+ </children>
+ </tagNode>
</children>
</node>
</interfaceDefinition>
diff --git a/interface-definitions/service_dns_forwarding.xml.in b/interface-definitions/service_dns_forwarding.xml.in
index b52b4bda3..5667028b7 100644
--- a/interface-definitions/service_dns_forwarding.xml.in
+++ b/interface-definitions/service_dns_forwarding.xml.in
@@ -311,6 +311,7 @@
<constraint>
<regex>[-_a-zA-Z0-9.]{1,63}(?&lt;!\.)</regex>
</constraint>
+ <multi/>
</properties>
</leafNode>
#include <include/dns/time-to-live.xml.i>
diff --git a/interface-definitions/service_ipoe-server.xml.in b/interface-definitions/service_ipoe-server.xml.in
index 414c9a731..c7542f0d0 100644
--- a/interface-definitions/service_ipoe-server.xml.in
+++ b/interface-definitions/service_ipoe-server.xml.in
@@ -189,6 +189,7 @@
#include <include/accel-ppp/snmp.xml.i>
#include <include/generic-description.xml.i>
#include <include/name-server-ipv4-ipv6.xml.i>
+ #include <include/accel-ppp/log.xml.i>
</children>
</node>
</children>
diff --git a/interface-definitions/service_pppoe-server.xml.in b/interface-definitions/service_pppoe-server.xml.in
index 5d357c2f9..81228938f 100644
--- a/interface-definitions/service_pppoe-server.xml.in
+++ b/interface-definitions/service_pppoe-server.xml.in
@@ -153,6 +153,7 @@
#include <include/accel-ppp/wins-server.xml.i>
#include <include/generic-description.xml.i>
#include <include/name-server-ipv4-ipv6.xml.i>
+ #include <include/accel-ppp/log.xml.i>
</children>
</node>
</children>
diff --git a/interface-definitions/service_suricata.xml.in b/interface-definitions/service_suricata.xml.in
new file mode 100644
index 000000000..e0159e2ba
--- /dev/null
+++ b/interface-definitions/service_suricata.xml.in
@@ -0,0 +1,238 @@
+<?xml version="1.0"?>
+<interfaceDefinition>
+ <node name="service">
+ <children>
+ <node name="suricata" owner="${vyos_conf_scripts_dir}/service_suricata.py">
+ <properties>
+ <help>Network IDS, IPS and Security Monitoring</help>
+ <priority>740</priority>
+ </properties>
+ <children>
+ #include <include/generic-interface-multi.xml.i>
+ <tagNode name="address-group">
+ <properties>
+ <help>Address group name</help>
+ <constraint>
+ <regex>[a-z0-9-]+</regex>
+ </constraint>
+ </properties>
+ <children>
+ <leafNode name="address">
+ <properties>
+ <help>IP address or subnet</help>
+ <valueHelp>
+ <format>ipv4</format>
+ <description>IPv4 address to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv6</format>
+ <description>IPv6 address to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv4net</format>
+ <description>IPv4 prefix to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>ipv6net</format>
+ <description>IPv6 prefix to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!ipv4</format>
+ <description>Exclude the specified IPv4 address from matches</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!ipv6</format>
+ <description>Exclude the specified IPv6 address from matches</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!ipv4net</format>
+ <description>Exclude the specified IPv6 prefix from matches</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!ipv6net</format>
+ <description>Exclude the specified IPv6 prefix from matches</description>
+ </valueHelp>
+ <constraint>
+ <validator name="ipv4-address"/>
+ <validator name="ipv6-address"/>
+ <validator name="ipv4-prefix"/>
+ <validator name="ipv6-prefix"/>
+ <validator name="ipv4-address-exclude"/>
+ <validator name="ipv6-address-exclude"/>
+ <validator name="ipv4-prefix-exclude"/>
+ <validator name="ipv6-prefix-exclude"/>
+ </constraint>
+ <multi/>
+ </properties>
+ </leafNode>
+ <leafNode name="group">
+ <properties>
+ <help>Address group</help>
+ <completionHelp>
+ <path>service ids suricata address-group</path>
+ </completionHelp>
+ <valueHelp>
+ <format>txt</format>
+ <description>Address group to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!txt</format>
+ <description>Exclude the specified address group from matches</description>
+ </valueHelp>
+ <constraint>
+ <regex>!?[a-z0-9-]+</regex>
+ </constraint>
+ <multi/>
+ </properties>
+ </leafNode>
+ </children>
+ </tagNode>
+ <tagNode name="port-group">
+ <properties>
+ <help>Port group name</help>
+ <constraint>
+ <regex>[a-z0-9-]+</regex>
+ </constraint>
+ </properties>
+ <children>
+ <leafNode name="port">
+ <properties>
+ <help>Port number</help>
+ <valueHelp>
+ <format>u32:1-65535</format>
+ <description>Numeric port to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!u32:1-65535</format>
+ <description>Numeric port to exclude from matches</description>
+ </valueHelp>
+ <valueHelp>
+ <format>start-end</format>
+ <description>Numbered port range (e.g. 1001-1005) to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!start-end</format>
+ <description>Numbered port range (e.g. !1001-1005) to exclude from matches</description>
+ </valueHelp>
+ <constraint>
+ <validator name="port-range"/>
+ <validator name="port-range-exclude"/>
+ </constraint>
+ <multi/>
+ </properties>
+ </leafNode>
+ <leafNode name="group">
+ <properties>
+ <help>Port group</help>
+ <completionHelp>
+ <path>service ids suricata port-group</path>
+ </completionHelp>
+ <valueHelp>
+ <format>txt</format>
+ <description>Port group to match</description>
+ </valueHelp>
+ <valueHelp>
+ <format>!txt</format>
+ <description>Exclude the specified port group from matches</description>
+ </valueHelp>
+ <constraint>
+ <regex>!?[a-z0-9-]+</regex>
+ </constraint>
+ <multi/>
+ </properties>
+ </leafNode>
+ </children>
+ </tagNode>
+ <node name="log">
+ <properties>
+ <help>Suricata log outputs</help>
+ </properties>
+ <children>
+ <node name="eve">
+ <properties>
+ <help>Extensible Event Format (EVE)</help>
+ </properties>
+ <children>
+ <leafNode name="filetype">
+ <properties>
+ <help>EVE logging destination</help>
+ <completionHelp>
+ <list>regular syslog</list>
+ </completionHelp>
+ <valueHelp>
+ <format>regular</format>
+ <description>Log to filename</description>
+ </valueHelp>
+ <valueHelp>
+ <format>syslog</format>
+ <description>Log to syslog</description>
+ </valueHelp>
+ <constraint>
+ <regex>(regular|syslog)</regex>
+ </constraint>
+ </properties>
+ <defaultValue>regular</defaultValue>
+ </leafNode>
+ <leafNode name="filename">
+ <properties>
+ <help>Log file</help>
+ <valueHelp>
+ <format>filename</format>
+ <description>File name in default Suricata log directory</description>
+ </valueHelp>
+ <valueHelp>
+ <format>/path</format>
+ <description>Absolute file path</description>
+ </valueHelp>
+ </properties>
+ <defaultValue>eve.json</defaultValue>
+ </leafNode>
+ <leafNode name="type">
+ <properties>
+ <help>Log types</help>
+ <completionHelp>
+ <list>alert anomaly drop files http dns tls smtp dnp3 ftp rdp nfs smb tftp ikev2 dcerpc krb5 snmp rfb sip dhcp ssh mqtt http2 flow netflow</list>
+ </completionHelp>
+ <valueHelp>
+ <format>alert</format>
+ <description>Record events for rule matches</description>
+ </valueHelp>
+ <valueHelp>
+ <format>anomaly</format>
+ <description>Record unexpected conditions such as truncated packets, packets with invalid IP/UDP/TCP length values, and other events that render the packet invalid for further processing or describe unexpected behavior on an established stream</description>
+ </valueHelp>
+ <valueHelp>
+ <format>drop</format>
+ <description>Record events for dropped packets</description>
+ </valueHelp>
+ <valueHelp>
+ <format>file</format>
+ <description>Record file details (e.g., MD5) for files extracted from application protocols (e.g., HTTP)</description>
+ </valueHelp>
+ <valueHelp>
+ <format>application (http, dns, tls, ...)</format>
+ <description>Record application-level transactions</description>
+ </valueHelp>
+ <valueHelp>
+ <format>flow</format>
+ <description>Record bi-directional flows</description>
+ </valueHelp>
+ <valueHelp>
+ <format>netflow</format>
+ <description>Record uni-directional flows</description>
+ </valueHelp>
+ <constraint>
+ <regex>(alert|anomaly|http|dns|tls|files|drop|smtp|dnp3|ftp|rdp|nfs|smb|tftp|ikev2|dcerpc|krb5|snmp|rfb|sip|dhcp|ssh|mqtt|http2|flow|netflow)</regex>
+ </constraint>
+ <multi/>
+ </properties>
+ </leafNode>
+ </children>
+ </node>
+ </children>
+ </node>
+ </children>
+ </node>
+ </children>
+ </node>
+</interfaceDefinition>
diff --git a/interface-definitions/service_upnp.xml.in b/interface-definitions/service_upnp.xml.in
deleted file mode 100644
index 064386ee5..000000000
--- a/interface-definitions/service_upnp.xml.in
+++ /dev/null
@@ -1,229 +0,0 @@
-<?xml version="1.0"?>
-<interfaceDefinition>
- <node name="service">
- <children>
- <node name="upnp" owner="${vyos_conf_scripts_dir}/service_upnp.py">
- <properties>
- <help>Universal Plug and Play (UPnP) service</help>
- <priority>900</priority>
- </properties>
- <children>
- <leafNode name="friendly-name">
- <properties>
- <help>Name of this service</help>
- <valueHelp>
- <format>txt</format>
- <description>Friendly name</description>
- </valueHelp>
- </properties>
- </leafNode>
- <leafNode name="wan-interface">
- <properties>
- <help>WAN network interface</help>
- <completionHelp>
- <script>${vyos_completion_dir}/list_interfaces</script>
- </completionHelp>
- <constraint>
- #include <include/constraint/interface-name.xml.i>
- </constraint>
- </properties>
- </leafNode>
- <leafNode name="wan-ip">
- <properties>
- <help>WAN network IP</help>
- <valueHelp>
- <format>ipv4</format>
- <description>IPv4 address</description>
- </valueHelp>
- <valueHelp>
- <format>ipv6</format>
- <description>IPv6 address</description>
- </valueHelp>
- <constraint>
- <validator name="ipv4-address" />
- <validator name="ipv6-address" />
- </constraint>
- <multi/>
- </properties>
- </leafNode>
- <leafNode name="nat-pmp">
- <properties>
- <help>Enable NAT-PMP support</help>
- <valueless />
- </properties>
- </leafNode>
- <leafNode name="secure-mode">
- <properties>
- <help>Enable Secure Mode</help>
- <valueless />
- </properties>
- </leafNode>
- <leafNode name="presentation-url">
- <properties>
- <help>Presentation Url</help>
- <valueHelp>
- <format>txt</format>
- <description>Presentation Url</description>
- </valueHelp>
- </properties>
- </leafNode>
- <node name="pcp-lifetime">
- <properties>
- <help>PCP-base lifetime Option</help>
- </properties>
- <children>
- <leafNode name="max">
- <properties>
- <help>Max lifetime time</help>
- <constraint>
- <validator name="numeric" />
- </constraint>
- </properties>
- </leafNode>
- <leafNode name="min">
- <properties>
- <help>Min lifetime time</help>
- <constraint>
- <validator name="numeric" />
- </constraint>
- </properties>
- </leafNode>
- </children>
- </node>
- <leafNode name="listen">
- <properties>
- <help>Local IP addresses for service to listen on</help>
- <completionHelp>
- <script>${vyos_completion_dir}/list_local_ips.sh --both</script>
- <script>${vyos_completion_dir}/list_interfaces</script>
- </completionHelp>
- <valueHelp>
- <format>&lt;interface&gt;</format>
- <description>Monitor interface address</description>
- </valueHelp>
- <valueHelp>
- <format>ipv4</format>
- <description>IPv4 address to listen for incoming connections</description>
- </valueHelp>
- <valueHelp>
- <format>ipv4net</format>
- <description>IPv4 prefix to listen for incoming connections</description>
- </valueHelp>
- <valueHelp>
- <format>ipv6</format>
- <description>IPv6 address to listen for incoming connections</description>
- </valueHelp>
- <valueHelp>
- <format>ipv6net</format>
- <description>IPv6 prefix to listen for incoming connections</description>
- </valueHelp>
- <multi/>
- <constraint>
- #include <include/constraint/interface-name.xml.i>
- <validator name="ip-address"/>
- <validator name="ipv4-prefix"/>
- <validator name="ipv6-prefix"/>
- </constraint>
- </properties>
- </leafNode>
- <node name="stun">
- <properties>
- <help>Enable STUN probe support (can be used with NAT 1:1 support for WAN interfaces)</help>
- </properties>
- <children>
- <leafNode name="host">
- <properties>
- <help>The STUN server address</help>
- <valueHelp>
- <format>txt</format>
- <description>The STUN server host address</description>
- </valueHelp>
- <constraint>
- <validator name="fqdn"/>
- </constraint>
- </properties>
- </leafNode>
- #include <include/port-number.xml.i>
- </children>
- </node>
- <tagNode name="rule">
- <properties>
- <help>UPnP Rule</help>
- <valueHelp>
- <format>u32:0-65535</format>
- <description>Rule number</description>
- </valueHelp>
- <constraint>
- <validator name="numeric" argument="--range 0-65535"/>
- </constraint>
- </properties>
- <children>
- #include <include/generic-disable-node.xml.i>
- <leafNode name="external-port-range">
- <properties>
- <help>Port range (REQUIRE)</help>
- <valueHelp>
- <format>&lt;port&gt;</format>
- <description>single port</description>
- </valueHelp>
- <valueHelp>
- <format>&lt;portN&gt;-&lt;portM&gt;</format>
- <description>Port range (use '-' as delimiter)</description>
- </valueHelp>
- <constraint>
- <validator name="port-range"/>
- </constraint>
- </properties>
- </leafNode>
- <leafNode name="internal-port-range">
- <properties>
- <help>Port range (REQUIRE)</help>
- <valueHelp>
- <format>&lt;port&gt;</format>
- <description>single port</description>
- </valueHelp>
- <valueHelp>
- <format>&lt;portN&gt;-&lt;portM&gt;</format>
- <description>Port range (use '-' as delimiter)</description>
- </valueHelp>
- <constraint>
- <validator name="port-range"/>
- </constraint>
- </properties>
- </leafNode>
- <leafNode name="ip">
- <properties>
- <help>The IP to which this rule applies (REQUIRE)</help>
- <valueHelp>
- <format>ipv4</format>
- <description>The IPv4 address to which this rule applies</description>
- </valueHelp>
- <valueHelp>
- <format>ipv4net</format>
- <description>The IPv4 to which this rule applies</description>
- </valueHelp>
- <constraint>
- <validator name="ipv4-address"/>
- <validator name="ipv4-host"/>
- <validator name="ipv4-prefix"/>
- </constraint>
- </properties>
- </leafNode>
- <leafNode name="action">
- <properties>
- <help>Actions against the rule (REQUIRE)</help>
- <completionHelp>
- <list>allow deny</list>
- </completionHelp>
- <constraint>
- <regex>(allow|deny)</regex>
- </constraint>
- </properties>
- </leafNode>
- </children>
- </tagNode>
- </children>
- </node>
- </children>
- </node>
-</interfaceDefinition>
diff --git a/interface-definitions/system_conntrack.xml.in b/interface-definitions/system_conntrack.xml.in
index 33aa832a8..0dfa2ea81 100644
--- a/interface-definitions/system_conntrack.xml.in
+++ b/interface-definitions/system_conntrack.xml.in
@@ -406,7 +406,7 @@
<constraint>
<validator name="numeric" argument="--range 1-999999"/>
</constraint>
- <constraintErrorMessage>Ignore rule number must be between 1 and 999999</constraintErrorMessage>
+ <constraintErrorMessage>Timeout rule number must be between 1 and 999999</constraintErrorMessage>
</properties>
<children>
#include <include/generic-description.xml.i>
@@ -421,7 +421,7 @@
</node>
<leafNode name="inbound-interface">
<properties>
- <help>Interface to ignore connections tracking on</help>
+ <help>Interface to apply custom connection timers on</help>
<completionHelp>
<list>any</list>
<script>${vyos_completion_dir}/list_interfaces</script>
@@ -464,7 +464,7 @@
<constraint>
<validator name="numeric" argument="--range 1-999999"/>
</constraint>
- <constraintErrorMessage>Ignore rule number must be between 1 and 999999</constraintErrorMessage>
+ <constraintErrorMessage>Timeout rule number must be between 1 and 999999</constraintErrorMessage>
</properties>
<children>
#include <include/generic-description.xml.i>
@@ -479,7 +479,7 @@
</node>
<leafNode name="inbound-interface">
<properties>
- <help>Interface to ignore connections tracking on</help>
+ <help>Interface to apply custom connection timers on</help>
<completionHelp>
<list>any</list>
<script>${vyos_completion_dir}/list_interfaces</script>
diff --git a/interface-definitions/system_domain-name.xml.in b/interface-definitions/system_domain-name.xml.in
index bfca9b8ce..695af29d9 100644
--- a/interface-definitions/system_domain-name.xml.in
+++ b/interface-definitions/system_domain-name.xml.in
@@ -5,6 +5,7 @@
<leafNode name="domain-name" owner="${vyos_conf_scripts_dir}/system_host-name.py">
<properties>
<help>System domain name</help>
+ <priority>6</priority>
<constraint>
<validator name="fqdn"/>
</constraint>
diff --git a/interface-definitions/system_host-name.xml.in b/interface-definitions/system_host-name.xml.in
index 423531a68..f74baab48 100644
--- a/interface-definitions/system_host-name.xml.in
+++ b/interface-definitions/system_host-name.xml.in
@@ -6,6 +6,7 @@
<leafNode name="host-name" owner="${vyos_conf_scripts_dir}/system_host-name.py">
<properties>
<help>System host name (default: vyos)</help>
+ <priority>5</priority>
<constraint>
#include <include/constraint/host-name.xml.i>
</constraint>
diff --git a/interface-definitions/system_login.xml.in b/interface-definitions/system_login.xml.in
index e94bb7219..f6c8021d3 100644
--- a/interface-definitions/system_login.xml.in
+++ b/interface-definitions/system_login.xml.in
@@ -202,17 +202,8 @@
<tagNode name="server">
<children>
#include <include/radius-timeout.xml.i>
+ #include <include/radius-priority.xml.i>
<leafNode name="priority">
- <properties>
- <help>Server priority</help>
- <valueHelp>
- <format>u32:1-255</format>
- <description>Server priority</description>
- </valueHelp>
- <constraint>
- <validator name="numeric" argument="--range 1-255"/>
- </constraint>
- </properties>
<defaultValue>255</defaultValue>
</leafNode>
</children>
diff --git a/interface-definitions/vpn_l2tp.xml.in b/interface-definitions/vpn_l2tp.xml.in
index 85a375db4..c00e82534 100644
--- a/interface-definitions/vpn_l2tp.xml.in
+++ b/interface-definitions/vpn_l2tp.xml.in
@@ -140,6 +140,7 @@
#include <include/accel-ppp/wins-server.xml.i>
#include <include/generic-description.xml.i>
#include <include/name-server-ipv4-ipv6.xml.i>
+ #include <include/accel-ppp/log.xml.i>
</children>
</node>
</children>
diff --git a/interface-definitions/vpn_pptp.xml.in b/interface-definitions/vpn_pptp.xml.in
index a63633f57..8aec0cb1c 100644
--- a/interface-definitions/vpn_pptp.xml.in
+++ b/interface-definitions/vpn_pptp.xml.in
@@ -56,6 +56,7 @@
#include <include/accel-ppp/wins-server.xml.i>
#include <include/generic-description.xml.i>
#include <include/name-server-ipv4-ipv6.xml.i>
+ #include <include/accel-ppp/log.xml.i>
</children>
</node>
</children>
diff --git a/interface-definitions/vpn_sstp.xml.in b/interface-definitions/vpn_sstp.xml.in
index d9ed1c040..5fd5c95ca 100644
--- a/interface-definitions/vpn_sstp.xml.in
+++ b/interface-definitions/vpn_sstp.xml.in
@@ -62,6 +62,7 @@
<constraintErrorMessage>Host-name must be alphanumeric and can contain hyphens</constraintErrorMessage>
</properties>
</leafNode>
+ #include <include/accel-ppp/log.xml.i>
</children>
</node>
</children>
diff --git a/interface-definitions/xml-component-version.xml.in b/interface-definitions/xml-component-version.xml.in
index 10a1be242..67d86a1d0 100644
--- a/interface-definitions/xml-component-version.xml.in
+++ b/interface-definitions/xml-component-version.xml.in
@@ -48,4 +48,5 @@
#include <include/version/vyos-accel-ppp-version.xml.i>
#include <include/version/wanloadbalance-version.xml.i>
#include <include/version/webproxy-version.xml.i>
+ #include <include/version/reverseproxy-version.xml.i>
</interfaceDefinition>
diff --git a/op-mode-definitions/force-commit-archive.xml.in b/op-mode-definitions/force-commit-archive.xml.in
index 162323c20..46836f967 100644
--- a/op-mode-definitions/force-commit-archive.xml.in
+++ b/op-mode-definitions/force-commit-archive.xml.in
@@ -6,7 +6,7 @@
<properties>
<help>Manually archive configuration</help>
</properties>
- <command>/usr/bin/config-mgmt</command>
+ <command>/etc/commit/post-hooks.d/02vyos-commit-archive; printf "\n"</command>
</leafNode>
</children>
</node>
diff --git a/op-mode-definitions/include/vni-tagnode-all.xml.i b/op-mode-definitions/include/vni-tagnode-all.xml.i
index 0fedb9371..fabab19d7 100644
--- a/op-mode-definitions/include/vni-tagnode-all.xml.i
+++ b/op-mode-definitions/include/vni-tagnode-all.xml.i
@@ -3,9 +3,10 @@
<properties>
<help>VXLAN network identifier (VNI) number</help>
<completionHelp>
- <list>1-16777215 all</list>
+ <list>&lt;1-16777215&gt; all</list>
+ <script>${vyos_completion_dir}/list_vni.sh</script>
</completionHelp>
</properties>
- <command>${vyos_op_scripts_dir}/vtysh_wrapper.sh $@</command>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
</tagNode>
<!-- included end -->
diff --git a/op-mode-definitions/include/vni-tagnode.xml.i b/op-mode-definitions/include/vni-tagnode.xml.i
index 22f2d33bd..f5b99dcc8 100644
--- a/op-mode-definitions/include/vni-tagnode.xml.i
+++ b/op-mode-definitions/include/vni-tagnode.xml.i
@@ -3,9 +3,10 @@
<properties>
<help>VXLAN network identifier (VNI) number</help>
<completionHelp>
- <list>1-16777215</list>
+ <list>&lt;1-16777215&gt;</list>
+ <script>${vyos_completion_dir}/list_vni.sh</script>
</completionHelp>
</properties>
- <command>${vyos_op_scripts_dir}/vtysh_wrapper.sh $@</command>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
</tagNode>
<!-- included end -->
diff --git a/op-mode-definitions/mtr.xml.in b/op-mode-definitions/mtr.xml.in
index 8239aec4c..66729e2bc 100644
--- a/op-mode-definitions/mtr.xml.in
+++ b/op-mode-definitions/mtr.xml.in
@@ -13,7 +13,7 @@
<children>
<leafNode name="node.tag">
<properties>
- <help>mtr options</help>
+ <help>Traceroute options</help>
<completionHelp>
<script>${vyos_op_scripts_dir}/mtr.py --get-options-nested "${COMP_WORDS[@]}"</script>
</completionHelp>
@@ -35,7 +35,7 @@
<children>
<leafNode name="node.tag">
<properties>
- <help>Traceroute options</help>
+ <help>mtr options</help>
<completionHelp>
<script>${vyos_op_scripts_dir}/mtr.py --get-options "${COMP_WORDS[@]}"</script>
</completionHelp>
diff --git a/op-mode-definitions/nat.xml.in b/op-mode-definitions/nat.xml.in
index 307a91337..13e7fd81d 100644
--- a/op-mode-definitions/nat.xml.in
+++ b/op-mode-definitions/nat.xml.in
@@ -7,6 +7,39 @@
<help>Show IPv4 Network Address Translation (NAT) information</help>
</properties>
<children>
+ <node name="cgnat">
+ <properties>
+ <help>Show Carrier-Grade Network Address Translation (CGNAT)</help>
+ </properties>
+ <children>
+ <node name="allocation">
+ <properties>
+ <help>Show allocated CGNAT parameters</help>
+ </properties>
+ <children>
+ <tagNode name="external-address">
+ <properties>
+ <help>Show CGNAT allocations for an external IP address</help>
+ <completionHelp>
+ <list>&lt;x.x.x.x&gt;</list>
+ </completionHelp>
+ </properties>
+ <command>sudo ${vyos_op_scripts_dir}/cgnat.py show_allocation --external-address "$6"</command>
+ </tagNode>
+ <tagNode name="internal-address">
+ <properties>
+ <help>Show CGNAT allocations for an internal IP address</help>
+ <completionHelp>
+ <list>&lt;x.x.x.x&gt;</list>
+ </completionHelp>
+ </properties>
+ <command>sudo ${vyos_op_scripts_dir}/cgnat.py show_allocation --internal-address "$6"</command>
+ </tagNode>
+ </children>
+ <command>sudo ${vyos_op_scripts_dir}/cgnat.py show_allocation</command>
+ </node>
+ </children>
+ </node>
<node name="source">
<properties>
<help>Show source IPv4 to IPv4 Network Address Translation (NAT) information</help>
diff --git a/op-mode-definitions/pki.xml.in b/op-mode-definitions/pki.xml.in
index a5e01bade..f76b4f4e1 100644
--- a/op-mode-definitions/pki.xml.in
+++ b/op-mode-definitions/pki.xml.in
@@ -495,7 +495,7 @@
<properties>
<help>Show x509 CA certificates</help>
</properties>
- <command>${vyos_op_scripts_dir}/pki.py --action show --ca "all"</command>
+ <command>sudo ${vyos_op_scripts_dir}/pki.py --action show --ca "all"</command>
</leafNode>
<tagNode name="ca">
<properties>
@@ -504,13 +504,13 @@
<path>pki ca</path>
</completionHelp>
</properties>
- <command>${vyos_op_scripts_dir}/pki.py --action show --ca "$4"</command>
+ <command>sudo ${vyos_op_scripts_dir}/pki.py --action show --ca "$4"</command>
<children>
<leafNode name="pem">
<properties>
<help>Show x509 CA certificate in PEM format</help>
</properties>
- <command>${vyos_op_scripts_dir}/pki.py --action show --ca "$4" --pem</command>
+ <command>sudo ${vyos_op_scripts_dir}/pki.py --action show --ca "$4" --pem</command>
</leafNode>
</children>
</tagNode>
@@ -518,7 +518,7 @@
<properties>
<help>Show x509 certificates</help>
</properties>
- <command>${vyos_op_scripts_dir}/pki.py --action show --certificate "all"</command>
+ <command>sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "all"</command>
</leafNode>
<tagNode name="certificate">
<properties>
@@ -527,7 +527,7 @@
<path>pki certificate</path>
</completionHelp>
</properties>
- <command>${vyos_op_scripts_dir}/pki.py --action show --certificate "$4"</command>
+ <command>sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "$4"</command>
<children>
<leafNode name="pem">
<properties>
@@ -542,7 +542,7 @@
<list>sha256 sha384 sha512</list>
</completionHelp>
</properties>
- <command>${vyos_op_scripts_dir}/pki.py --action show --certificate "$4" --fingerprint "$6"</command>
+ <command>sudo ${vyos_op_scripts_dir}/pki.py --action show --certificate "$4" --fingerprint "$6"</command>
</tagNode>
</children>
</tagNode>
diff --git a/op-mode-definitions/reverse-proxy.xml.in b/op-mode-definitions/reverse-proxy.xml.in
new file mode 100644
index 000000000..4af24880b
--- /dev/null
+++ b/op-mode-definitions/reverse-proxy.xml.in
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<interfaceDefinition>
+ <node name="restart">
+ <children>
+ <node name="reverse-proxy">
+ <properties>
+ <help>Restart reverse-proxy service</help>
+ </properties>
+ <command>if cli-shell-api existsActive load-balancing reverse-proxy; then sudo systemctl restart haproxy.service; else echo "Reverse-Proxy not configured"; fi</command>
+ </node>
+ </children>
+ </node>
+ <node name="show">
+ <children>
+ <node name="reverse-proxy">
+ <properties>
+ <help>Show load-balancing reverse-proxy</help>
+ </properties>
+ <command>sudo ${vyos_op_scripts_dir}/reverseproxy.py show</command>
+ </node>
+ </children>
+ </node>
+</interfaceDefinition>
diff --git a/op-mode-definitions/show-evpn.xml.in b/op-mode-definitions/show-evpn.xml.in
index a005cbc30..3c1e5c7d6 100644
--- a/op-mode-definitions/show-evpn.xml.in
+++ b/op-mode-definitions/show-evpn.xml.in
@@ -14,7 +14,7 @@
<children>
#include <include/frr-detail.xml.i>
</children>
- <command>${vyos_op_scripts_dir}/vtysh_wrapper.sh $@</command>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
</node>
<tagNode name="access-vlan">
<properties>
@@ -31,7 +31,7 @@
<list>&lt;1-4094&gt;</list>
</completionHelp>
</properties>
- <command>${vyos_op_scripts_dir}/vtysh_wrapper.sh $@</command>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
</node>
</children>
</tagNode>
@@ -43,6 +43,45 @@
#include <include/vni-tagnode-all.xml.i>
</children>
</node>
+ <tagNode name="es">
+ <properties>
+ <help>Show ESI information for specified ESI</help>
+ <completionHelp>
+ <list>&lt;esi&gt;</list>
+ <script>${vyos_completion_dir}/list_esi.sh</script>
+ </completionHelp>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ </tagNode>
+ <node name="es">
+ <properties>
+ <help>Show ESI information</help>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ <children>
+ <leafNode name="detail">
+ <properties>
+ <help>Show ESI details</help>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ </leafNode>
+ </children>
+ </node>
+ <node name="es-evi">
+ <properties>
+ <help>Show ESI information per EVI</help>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ <children>
+ <leafNode name="detail">
+ <properties>
+ <help>Show ESI per EVI details</help>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ </leafNode>
+ #include <include/vni-tagnode.xml.i>
+ </children>
+ </node>
<node name="mac">
<properties>
<help>MAC addresses</help>
@@ -67,7 +106,23 @@
#include <include/vni-tagnode-all.xml.i>
</children>
</node>
+ #include <include/vni-tagnode.xml.i>
+ <node name="vni">
+ <properties>
+ <help>Show VNI information</help>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ <children>
+ <leafNode name="detail">
+ <properties>
+ <help>Show VNI details</help>
+ </properties>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
+ </leafNode>
+ </children>
+ </node>
</children>
+ <command>${vyos_op_scripts_dir}/evpn.py show_evpn --command "$*"</command>
</node>
</children>
</node>
diff --git a/op-mode-definitions/show-log.xml.in b/op-mode-definitions/show-log.xml.in
index e13270364..c3aa324ba 100644
--- a/op-mode-definitions/show-log.xml.in
+++ b/op-mode-definitions/show-log.xml.in
@@ -464,12 +464,56 @@
</properties>
<command>journalctl --no-hostname --boot --unit lldpd.service</command>
</leafNode>
- <leafNode name="nat">
+ <node name="nat">
<properties>
<help>Show log for Network Address Translation (NAT)</help>
</properties>
- <command>egrep -i "kernel:.*\[NAT-[A-Z]{3,}-[0-9]+(-MASQ)?\]" $(find /var/log -maxdepth 1 -type f -name messages\* | sort -t. -k2nr)</command>
- </leafNode>
+ <children>
+ <node name="destination">
+ <properties>
+ <help>Show NAT destination log</help>
+ </properties>
+ <command>journalctl --no-hostname --boot -k | egrep "\[DST-NAT-[0-9]+\]"</command>
+ <children>
+ <tagNode name="rule">
+ <properties>
+ <help>Show NAT destination log for specified rule</help>
+ </properties>
+ <command>journalctl --no-hostname --boot -k | egrep "\[DST-NAT-$6\]"</command>
+ </tagNode>
+ </children>
+ </node>
+ <node name="source">
+ <properties>
+ <help>Show NAT source log</help>
+ </properties>
+ <command>journalctl --no-hostname --boot -k | egrep "\[SRC-NAT-[0-9]+(-MASQ)?\]"&quot;"</command>
+ <children>
+ <tagNode name="rule">
+ <properties>
+ <help>Show NAT source log for specified rule</help>
+ </properties>
+ <command>journalctl --no-hostname --boot -k | egrep "\[SRC-NAT-$6(-MASQ)?\]"</command>
+ </tagNode>
+ </children>
+ </node>
+ <node name="static">
+ <properties>
+ <help>Show NAT static log</help>
+ </properties>
+ <command>journalctl --no-hostname --boot -k | egrep "\[STATIC-(SRC|DST)-NAT-[0-9]+\]"</command>
+ <children>
+ <tagNode name="rule">
+ <properties>
+ <help>Show NAT static log for specified rule</help>
+ </properties>
+ <command>journalctl --no-hostname --boot -k | egrep "\[STATIC-(SRC|DST)-NAT-$6\]"</command>
+ </tagNode>
+ </children>
+ </node>
+ </children>
+ <command>journalctl --no-hostname --boot -k | egrep "\[(STATIC-)?(DST|SRC)-NAT-[0-9]+(-MASQ)?\]"</command>
+ </node>
<leafNode name="ndp-proxy">
<properties>
<help>Show log for Neighbor Discovery Protocol (NDP) Proxy</help>
diff --git a/op-mode-definitions/show-reverse-proxy.xml.in b/op-mode-definitions/show-reverse-proxy.xml.in
deleted file mode 100644
index ed0fee843..000000000
--- a/op-mode-definitions/show-reverse-proxy.xml.in
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0"?>
-<interfaceDefinition>
- <node name="show">
- <children>
- <node name="reverse-proxy">
- <properties>
- <help>Show load-balancing reverse-proxy</help>
- </properties>
- <command>sudo ${vyos_op_scripts_dir}/reverseproxy.py show</command>
- </node>
- </children>
- </node>
-</interfaceDefinition>
diff --git a/op-mode-definitions/suricata.xml.in b/op-mode-definitions/suricata.xml.in
new file mode 100644
index 000000000..a5025afba
--- /dev/null
+++ b/op-mode-definitions/suricata.xml.in
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<interfaceDefinition>
+ <node name="update">
+ <children>
+ <node name="suricata">
+ <properties>
+ <help>Update Suricata</help>
+ </properties>
+ <command>if test -f /run/suricata/suricata.yaml; then sudo suricata-update --suricata-conf /run/suricata/suricata.yaml; sudo systemctl restart suricata; else echo "Service Suricata not configured"; fi </command>
+ </node>
+ </children>
+ </node>
+ <node name="restart">
+ <children>
+ <node name="suricata">
+ <properties>
+ <help>Restart Suricata service</help>
+ </properties>
+ <command>if systemctl is-active --quiet suricata; then sudo systemctl restart suricata.service; else echo "Service Suricata not configured"; fi</command>
+ </node>
+ </children>
+ </node>
+</interfaceDefinition>
diff --git a/python/vyos/base.py b/python/vyos/base.py
index 9b93cb2f2..054b1d837 100644
--- a/python/vyos/base.py
+++ b/python/vyos/base.py
@@ -41,7 +41,7 @@ class BaseWarning:
isfirstmessage = False
initial_indent = self.standardindent
print(f'{mes}')
- print('')
+ print('', flush=True)
class Warning():
diff --git a/python/vyos/compose_config.py b/python/vyos/compose_config.py
new file mode 100644
index 000000000..efa28babe
--- /dev/null
+++ b/python/vyos/compose_config.py
@@ -0,0 +1,84 @@
+# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module allows iterating over function calls to modify an existing
+config.
+"""
+
+from pathlib import Path
+from typing import TypeAlias, Union, Callable
+
+from vyos.configtree import ConfigTree
+from vyos.configtree import deep_copy as ct_deep_copy
+from vyos.utils.system import load_as_module
+
+ConfigObj: TypeAlias = Union[str, ConfigTree]
+
+class ComposeConfigError(Exception):
+ """Raised when an error occurs modifying a config object.
+ """
+
+class ComposeConfig:
+ """Apply function to config tree: for iteration over functions or files.
+ """
+ def __init__(self, config_obj: ConfigObj, checkpoint_file=None):
+ if isinstance(config_obj, ConfigTree):
+ self.config_tree = config_obj
+ else:
+ self.config_tree = ConfigTree(config_obj)
+
+ self.checkpoint = self.config_tree
+ self.checkpoint_file = checkpoint_file
+
+ def apply_func(self, func: Callable):
+ """Apply the function to the config tree.
+ """
+ if not callable(func):
+ raise ComposeConfigError(f'{func.__name__} is not callable')
+
+ if self.checkpoint_file is not None:
+ self.checkpoint = ct_deep_copy(self.config_tree)
+
+ try:
+ func(self.config_tree)
+ except Exception as e:
+ self.config_tree = self.checkpoint
+ raise ComposeConfigError(e) from e
+
+ def apply_file(self, func_file: str, func_name: str):
+ """Apply named function from file.
+ """
+ try:
+ mod_name = Path(func_file).stem.replace('-', '_')
+ mod = load_as_module(mod_name, func_file)
+ func = getattr(mod, func_name)
+ except Exception as e:
+ raise ComposeConfigError(f'Error with {func_file}: {e}') from e
+
+ try:
+ self.apply_func(func)
+ except ComposeConfigError as e:
+ raise ComposeConfigError(f'Error in {func_file}: {e}') from e
+
+ def to_string(self, with_version=False) -> str:
+ """Return the rendered config tree.
+ """
+ return self.config_tree.to_string(no_version=not with_version)
+
+ def write(self, config_file: str, with_version=False):
+ """Write the config tree to a file.
+ """
+ config_str = self.to_string(with_version=with_version)
+ Path(config_file).write_text(config_str)
diff --git a/python/vyos/config_mgmt.py b/python/vyos/config_mgmt.py
index fc51d781c..70b6ea203 100644
--- a/python/vyos/config_mgmt.py
+++ b/python/vyos/config_mgmt.py
@@ -283,6 +283,8 @@ Proceed ?'''
rollback_ct = self._get_config_tree_revision(rev)
try:
load(rollback_ct, switch='explicit')
+ print('Rollback diff has been applied.')
+ print('Use "compare" to review the changes or "commit" to apply them.')
except LoadConfigError as e:
raise ConfigMgmtError(e) from e
diff --git a/python/vyos/configsession.py b/python/vyos/configsession.py
index ab7a631bb..beec6010b 100644
--- a/python/vyos/configsession.py
+++ b/python/vyos/configsession.py
@@ -34,6 +34,8 @@ INSTALL_IMAGE = ['/usr/libexec/vyos/op_mode/image_installer.py',
'--action', 'add', '--no-prompt', '--image-path']
REMOVE_IMAGE = ['/usr/libexec/vyos/op_mode/image_manager.py',
'--action', 'delete', '--no-prompt', '--image-name']
+SET_DEFAULT_IMAGE = ['/usr/libexec/vyos/op_mode/image_manager.py',
+ '--action', 'set', '--no-prompt', '--image-name']
GENERATE = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'generate']
SHOW = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'show']
RESET = ['/opt/vyatta/bin/vyatta-op-cmd-wrapper', 'reset']
@@ -235,6 +237,10 @@ class ConfigSession(object):
out = self.__run_command(REMOVE_IMAGE + [name])
return out
+ def set_default_image(self, name):
+ out = self.__run_command(SET_DEFAULT_IMAGE + [name])
+ return out
+
def generate(self, path):
out = self.__run_command(GENERATE + path)
return out
diff --git a/python/vyos/configtree.py b/python/vyos/configtree.py
index e4b282d72..afd6e030b 100644
--- a/python/vyos/configtree.py
+++ b/python/vyos/configtree.py
@@ -175,9 +175,11 @@ class ConfigTree(object):
def get_version_string(self):
return self.__version
- def to_string(self, ordered_values=False):
+ def to_string(self, ordered_values=False, no_version=False):
config_string = self.__to_string(self.__config, ordered_values).decode()
config_string = unescape_backslash(config_string)
+ if no_version:
+ return config_string
config_string = "{0}\n{1}".format(config_string, self.__version)
return config_string
@@ -482,3 +484,9 @@ class DiffTree:
add = self.add.to_commands()
delete = self.delete.to_commands(op="delete")
return delete + "\n" + add
+
+def deep_copy(config_tree: ConfigTree) -> ConfigTree:
+ """An inelegant, but reasonably fast, copy; replace with backend copy
+ """
+ D = DiffTree(None, config_tree)
+ return D.add
diff --git a/python/vyos/defaults.py b/python/vyos/defaults.py
index 64145a42e..e7cd69a8b 100644
--- a/python/vyos/defaults.py
+++ b/python/vyos/defaults.py
@@ -25,6 +25,7 @@ directories = {
'services' : f'{base_dir}/services',
'config' : '/opt/vyatta/etc/config',
'migrate' : '/opt/vyatta/etc/config-migrate/migrate',
+ 'activate' : f'{base_dir}/activate',
'log' : '/var/log/vyatta',
'templates' : '/usr/share/vyos/templates/',
'certbot' : '/config/auth/letsencrypt',
@@ -46,3 +47,5 @@ cfg_vintage = 'vyos'
commit_lock = '/opt/vyatta/config/.lock'
component_version_json = os.path.join(directories['data'], 'component-versions.json')
+
+config_default = os.path.join(directories['data'], 'config.boot.default')
diff --git a/python/vyos/ifconfig/interface.py b/python/vyos/ifconfig/interface.py
index f0897bc21..117479ade 100644
--- a/python/vyos/ifconfig/interface.py
+++ b/python/vyos/ifconfig/interface.py
@@ -42,6 +42,7 @@ from vyos.utils.process import is_systemd_service_active
from vyos.utils.process import run
from vyos.template import is_ipv4
from vyos.template import is_ipv6
+from vyos.utils.file import read_file
from vyos.utils.network import is_intf_addr_assigned
from vyos.utils.network import is_ipv6_link_local
from vyos.utils.assertion import assert_boolean
@@ -1356,12 +1357,13 @@ class Interface(Control):
if enable and 'disable' not in self.config:
if dict_search('dhcp_options.host_name', self.config) == None:
# read configured system hostname.
- # maybe change to vyos hostd client ???
+ # maybe change to vyos-hostsd client ???
hostname = 'vyos'
- with open('/etc/hostname', 'r') as f:
- hostname = f.read().rstrip('\n')
- tmp = {'dhcp_options' : { 'host_name' : hostname}}
- self.config = dict_merge(tmp, self.config)
+ hostname_file = '/etc/hostname'
+ if os.path.isfile(hostname_file):
+ hostname = read_file(hostname_file)
+ tmp = {'dhcp_options' : { 'host_name' : hostname}}
+ self.config = dict_merge(tmp, self.config)
render(systemd_override_file, 'dhcp-client/override.conf.j2', self.config)
render(dhclient_config_file, 'dhcp-client/ipv4.j2', self.config)
diff --git a/python/vyos/ifconfig/vxlan.py b/python/vyos/ifconfig/vxlan.py
index bdb48e303..918aea202 100644
--- a/python/vyos/ifconfig/vxlan.py
+++ b/python/vyos/ifconfig/vxlan.py
@@ -138,10 +138,13 @@ class VXLANIf(Interface):
raise ValueError('Value out of range')
if 'vlan_to_vni_removed' in self.config:
- cur_vni_filter = get_vxlan_vni_filter(self.ifname)
+ cur_vni_filter = None
+ if dict_search('parameters.vni_filter', self.config) != None:
+ cur_vni_filter = get_vxlan_vni_filter(self.ifname)
+
for vlan, vlan_config in self.config['vlan_to_vni_removed'].items():
# If VNI filtering is enabled, remove matching VNI filter
- if dict_search('parameters.vni_filter', self.config) != None:
+ if cur_vni_filter != None:
vni = vlan_config['vni']
if vni in cur_vni_filter:
self._cmd(f'bridge vni delete dev {self.ifname} vni {vni}')
diff --git a/python/vyos/nat.py b/python/vyos/nat.py
index 2ada29add..e54548788 100644
--- a/python/vyos/nat.py
+++ b/python/vyos/nat.py
@@ -300,12 +300,12 @@ def parse_nat_static_rule(rule_conf, rule_id, nat_type):
output.append('counter')
- if translation_str:
- output.append(translation_str)
-
if 'log' in rule_conf:
output.append(f'log prefix "[{log_prefix}{log_suffix}]"')
+ if translation_str:
+ output.append(translation_str)
+
output.append(f'comment "{log_prefix}"')
return " ".join(output)
diff --git a/python/vyos/qos/base.py b/python/vyos/qos/base.py
index 87927ba9d..98e486e42 100644
--- a/python/vyos/qos/base.py
+++ b/python/vyos/qos/base.py
@@ -247,9 +247,15 @@ class QoSBase:
filter_cmd_base += ' protocol all'
if 'match' in cls_config:
- is_filtered = False
+ has_filter = False
for index, (match, match_config) in enumerate(cls_config['match'].items(), start=1):
filter_cmd = filter_cmd_base
+ if not has_filter:
+ for key in ['mark', 'vif', 'ip', 'ipv6']:
+ if key in match_config:
+ has_filter = True
+ break
+
if self.qostype == 'shaper' and 'prio ' not in filter_cmd:
filter_cmd += f' prio {index}'
if 'mark' in match_config:
@@ -332,13 +338,12 @@ class QoSBase:
cls = int(cls)
filter_cmd += f' flowid {self._parent:x}:{cls:x}'
self._cmd(filter_cmd)
- is_filtered = True
vlan_expression = "match.*.vif"
match_vlan = jmespath.search(vlan_expression, cls_config)
if any(tmp in ['exceed', 'bandwidth', 'burst'] for tmp in cls_config) \
- and is_filtered:
+ and has_filter:
# For "vif" "basic match" is used instead of "action police" T5961
if not match_vlan:
filter_cmd += f' action police'
diff --git a/python/vyos/system/image.py b/python/vyos/system/image.py
index ba9a6dfa7..aae52e770 100644
--- a/python/vyos/system/image.py
+++ b/python/vyos/system/image.py
@@ -18,8 +18,9 @@ from re import compile as re_compile
from functools import wraps
from tempfile import TemporaryDirectory
from typing import TypedDict
+from json import loads
-from vyos import version
+from vyos.defaults import directories
from vyos.system import disk, grub
# Define variables
@@ -201,9 +202,12 @@ def get_running_image() -> str:
if running_image_result:
running_image: str = running_image_result.groupdict().get(
'image_version', '')
- # we need to have a fallback for live systems
+ # we need to have a fallback for live systems:
+ # explicit read from version file
if not running_image:
- running_image: str = version.get_version()
+ json_data: str = Path(directories['data']).joinpath('version.json').read_text()
+ dict_data: dict = loads(json_data)
+ running_image: str = dict_data['version']
return running_image
diff --git a/python/vyos/utils/io.py b/python/vyos/utils/io.py
index a8c430f28..205210b66 100644
--- a/python/vyos/utils/io.py
+++ b/python/vyos/utils/io.py
@@ -72,6 +72,8 @@ def ask_yes_no(question, default=False) -> bool:
stdout.write("Please respond with yes/y or no/n\n")
except EOFError:
stdout.write("\nPlease respond with yes/y or no/n\n")
+ except KeyboardInterrupt:
+ return False
def is_interactive():
"""Try to determine if the routine was called from an interactive shell."""
diff --git a/python/vyos/version.py b/python/vyos/version.py
index b5ed2705b..86e96d0ec 100644
--- a/python/vyos/version.py
+++ b/python/vyos/version.py
@@ -33,11 +33,11 @@ import os
import requests
import vyos.defaults
+from vyos.system.image import is_live_boot
from vyos.utils.file import read_file
from vyos.utils.file import read_json
from vyos.utils.process import popen
-from vyos.utils.process import run
from vyos.utils.process import DEVNULL
version_file = os.path.join(vyos.defaults.directories['data'], 'version.json')
@@ -81,16 +81,14 @@ def get_full_version_data(fname=version_file):
else:
version_data['system_type'] = f"{hypervisor} guest"
- # Get boot type, it can be livecd, installed image, or, possible, a system installed
- # via legacy "install system" mechanism
+ # Get boot type, it can be livecd or installed image
# In installed images, the squashfs image file is named after its image version,
# while on livecd it's just "filesystem.squashfs", that's how we tell a livecd boot
# from an installed image
- boot_via = "installed image"
- if run(""" grep -e '^overlay.*/filesystem.squashfs' /proc/mounts >/dev/null""") == 0:
+ if is_live_boot():
boot_via = "livecd"
- elif run(""" grep '^overlay /' /proc/mounts >/dev/null """) != 0:
- boot_via = "legacy non-image installation"
+ else:
+ boot_via = "installed image"
version_data['boot_via'] = boot_via
# Get hardware details from DMI
diff --git a/scripts/check-pr-title-and-commit-messages.py b/scripts/check-pr-title-and-commit-messages.py
deleted file mode 100755
index 001f6cf82..000000000
--- a/scripts/check-pr-title-and-commit-messages.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python3
-
-import re
-import sys
-import time
-
-import requests
-
-# Use the same regex for PR title and commit messages for now
-title_regex = r'^(([a-zA-Z\-_.]+:\s)?)T\d+:\s+[^\s]+.*'
-commit_regex = title_regex
-
-def check_pr_title(title):
- if not re.match(title_regex, title):
- print("PR title '{}' does not match the required format!".format(title))
- print("Valid title example: T99999: make IPsec secure")
- sys.exit(1)
-
-def check_commit_message(title):
- if not re.match(commit_regex, title):
- print("Commit title '{}' does not match the required format!".format(title))
- print("Valid title example: T99999: make IPsec secure")
- sys.exit(1)
-
-if __name__ == '__main__':
- if len(sys.argv) < 2:
- print("Please specify pull request URL!")
- sys.exit(1)
-
- # There seems to be a race condition that causes this scripts to receive
- # an incomplete PR object that is missing certain fields,
- # which causes temporary CI failures that require re-running the script
- #
- # It's probably better to add a small delay to prevent that
- time.sleep(5)
-
- # Get the pull request object
- pr = requests.get(sys.argv[1]).json()
- if "title" not in pr:
- print("The PR object does not have a title field!")
- print("Did not receive a valid pull request object, please check the URL!")
- sys.exit(1)
-
- check_pr_title(pr["title"])
-
- # Get the list of commits
- commits = requests.get(pr["commits_url"]).json()
- for c in commits:
- # Retrieve every individual commit and check its title
- co = requests.get(c["url"]).json()
- check_commit_message(co["commit"]["message"])
diff --git a/smoketest/config-tests/container-simple b/smoketest/config-tests/container-simple
index 299af64cb..cc80ef4cf 100644
--- a/smoketest/config-tests/container-simple
+++ b/smoketest/config-tests/container-simple
@@ -8,5 +8,6 @@ set container name c01 capability 'net-bind-service'
set container name c01 capability 'net-raw'
set container name c01 image 'busybox:stable'
set container name c02 allow-host-networks
+set container name c02 allow-host-pid
set container name c02 capability 'sys-time'
set container name c02 image 'busybox:stable'
diff --git a/smoketest/config-tests/nat-basic b/smoketest/config-tests/nat-basic
new file mode 100644
index 000000000..9fea08b02
--- /dev/null
+++ b/smoketest/config-tests/nat-basic
@@ -0,0 +1,85 @@
+set interfaces ethernet eth0 offload rps
+set interfaces ethernet eth0 disable
+set interfaces ethernet eth1 offload gro
+set interfaces ethernet eth1 offload gso
+set interfaces ethernet eth1 offload rps
+set interfaces ethernet eth1 offload sg
+set interfaces ethernet eth1 offload tso
+set interfaces ethernet eth2 offload gro
+set interfaces ethernet eth2 offload gso
+set interfaces ethernet eth2 offload rps
+set interfaces ethernet eth2 offload sg
+set interfaces ethernet eth2 offload tso
+set interfaces ethernet eth3 offload gro
+set interfaces ethernet eth3 offload gso
+set interfaces ethernet eth3 offload rps
+set interfaces ethernet eth3 offload sg
+set interfaces ethernet eth3 offload tso
+set interfaces bonding bond10 hash-policy 'layer3+4'
+set interfaces bonding bond10 member interface 'eth2'
+set interfaces bonding bond10 member interface 'eth3'
+set interfaces bonding bond10 mode '802.3ad'
+set interfaces bonding bond10 vif 50 address '192.168.189.1/24'
+set interfaces loopback lo
+set interfaces pppoe pppoe7 authentication password 'vyos'
+set interfaces pppoe pppoe7 authentication username 'vyos'
+set interfaces pppoe pppoe7 dhcpv6-options pd 0 interface bond10.50 address '1'
+set interfaces pppoe pppoe7 dhcpv6-options pd 0 length '56'
+set interfaces pppoe pppoe7 ip adjust-mss '1452'
+set interfaces pppoe pppoe7 ipv6 address autoconf
+set interfaces pppoe pppoe7 ipv6 adjust-mss '1432'
+set interfaces pppoe pppoe7 mtu '1492'
+set interfaces pppoe pppoe7 no-peer-dns
+set interfaces pppoe pppoe7 source-interface 'eth1'
+set service lldp interface eth1 disable
+set service ntp allow-client address '192.168.189.0/24'
+set service ntp server time1.vyos.net
+set service ntp server time2.vyos.net
+set service ntp listen-address '192.168.189.1'
+set service ssh dynamic-protection
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 lease '604800'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 option default-router '192.168.189.1'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 option domain-name 'vyos.net'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 option name-server '1.1.1.1'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 option name-server '9.9.9.9'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 range 0 start '192.168.189.20'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 range 0 stop '192.168.189.254'
+set service dhcp-server shared-network-name LAN subnet 192.168.189.0/24 subnet-id '1'
+set service router-advert interface bond10.50 prefix ::/64 preferred-lifetime '2700'
+set service router-advert interface bond10.50 prefix ::/64 valid-lifetime '5400'
+set system config-management commit-revisions '100'
+set system domain-name 'vyos.net'
+set system host-name 'R1'
+set system login user vyos authentication encrypted-password '$6$2Ta6TWHd/U$NmrX0x9kexCimeOcYK1MfhMpITF9ELxHcaBU/znBq.X2ukQOj61fVI2UYP/xBzP4QtiTcdkgs7WOQMHWsRymO/'
+set system login user vyos authentication plaintext-password ''
+set system name-server '1.1.1.1'
+set system name-server '9.9.9.9'
+set system console device ttyS0 speed '115200'
+set nat destination rule 1000 destination port '3389'
+set nat destination rule 1000 inbound-interface name 'pppoe7'
+set nat destination rule 1000 protocol 'tcp'
+set nat destination rule 1000 translation address '192.168.189.5'
+set nat destination rule 1000 translation port '3389'
+set nat destination rule 10022 destination port '10022'
+set nat destination rule 10022 inbound-interface name 'pppoe7'
+set nat destination rule 10022 protocol 'tcp'
+set nat destination rule 10022 translation address '192.168.189.2'
+set nat destination rule 10022 translation port '22'
+set nat destination rule 10300 destination port '10300'
+set nat destination rule 10300 inbound-interface name 'pppoe7'
+set nat destination rule 10300 protocol 'udp'
+set nat destination rule 10300 translation address '192.168.189.2'
+set nat destination rule 10300 translation port '10300'
+set nat source rule 10 outbound-interface name 'eth1'
+set nat source rule 10 source address '192.168.189.0/24'
+set nat source rule 10 translation address 'masquerade'
+set nat source rule 10 translation options port-mapping 'random'
+set nat source rule 50 outbound-interface name 'pppoe7'
+set nat source rule 50 protocol 'udp'
+set nat source rule 50 source address '192.168.189.2'
+set nat source rule 50 source port '10300'
+set nat source rule 50 translation address 'masquerade'
+set nat source rule 50 translation port '10300'
+set nat source rule 100 outbound-interface name 'pppoe7'
+set nat source rule 100 source address '192.168.189.0/24'
+set nat source rule 100 translation address 'masquerade'
diff --git a/smoketest/configs/container-simple b/smoketest/configs/container-simple
index 05efe05e9..82983afb7 100644
--- a/smoketest/configs/container-simple
+++ b/smoketest/configs/container-simple
@@ -7,6 +7,7 @@ container {
}
name c02 {
allow-host-networks
+ allow-host-pid
cap-add sys-time
image busybox:stable
}
diff --git a/smoketest/configs/nat-basic b/smoketest/configs/nat-basic
new file mode 100644
index 000000000..52f369f34
--- /dev/null
+++ b/smoketest/configs/nat-basic
@@ -0,0 +1,256 @@
+interfaces {
+ bonding bond10 {
+ hash-policy "layer3+4"
+ member {
+ interface "eth2"
+ interface "eth3"
+ }
+ mode "802.3ad"
+ vif 50 {
+ address "192.168.189.1/24"
+ }
+ }
+ ethernet eth0 {
+ disable
+ offload {
+ gro
+ gso
+ rps
+ sg
+ tso
+ }
+ }
+ ethernet eth1 {
+ offload {
+ gro
+ gso
+ rps
+ sg
+ tso
+ }
+ }
+ ethernet eth2 {
+ offload {
+ gro
+ gso
+ rps
+ sg
+ tso
+ }
+ }
+ ethernet eth3 {
+ offload {
+ gro
+ gso
+ rps
+ sg
+ tso
+ }
+ }
+ loopback lo {
+ }
+ pppoe pppoe7 {
+ authentication {
+ password "vyos"
+ username "vyos"
+ }
+ dhcpv6-options {
+ pd 0 {
+ interface bond10.50 {
+ address "1"
+ }
+ length "56"
+ }
+ }
+ ip {
+ adjust-mss "1452"
+ }
+ ipv6 {
+ address {
+ autoconf
+ }
+ adjust-mss "1432"
+ }
+ mtu "1492"
+ no-peer-dns
+ source-interface "eth1"
+ }
+}
+nat {
+ destination {
+ rule 1000 {
+ destination {
+ port "3389"
+ }
+ inbound-interface {
+ name "pppoe7"
+ }
+ protocol "tcp"
+ translation {
+ address "192.168.189.5"
+ port "3389"
+ }
+ }
+ rule 10022 {
+ destination {
+ port "10022"
+ }
+ inbound-interface {
+ name "pppoe7"
+ }
+ protocol "tcp"
+ translation {
+ address "192.168.189.2"
+ port "22"
+ }
+ }
+ rule 10300 {
+ destination {
+ port "10300"
+ }
+ inbound-interface {
+ name "pppoe7"
+ }
+ protocol "udp"
+ translation {
+ address "192.168.189.2"
+ port "10300"
+ }
+ }
+ }
+ source {
+ rule 10 {
+ outbound-interface {
+ name "eth1"
+ }
+ source {
+ address "192.168.189.0/24"
+ }
+ translation {
+ address "masquerade"
+ options {
+ port-mapping fully-random
+ }
+ }
+ }
+ rule 50 {
+ outbound-interface {
+ name "pppoe7"
+ }
+ protocol "udp"
+ source {
+ address "192.168.189.2"
+ port "10300"
+ }
+ translation {
+ address "masquerade"
+ port "10300"
+ }
+ }
+ rule 100 {
+ outbound-interface {
+ name "pppoe7"
+ }
+ source {
+ address "192.168.189.0/24"
+ }
+ translation {
+ address "masquerade"
+ }
+ }
+ }
+}
+service {
+ dhcp-server {
+ shared-network-name LAN {
+ subnet 192.168.189.0/24 {
+ default-router "192.168.189.1"
+ domain-name "vyos.net"
+ lease "604800"
+ name-server "1.1.1.1"
+ name-server "9.9.9.9"
+ range 0 {
+ start "192.168.189.20"
+ stop "192.168.189.254"
+ }
+ }
+ }
+ }
+ lldp {
+ interface all {
+ }
+ interface eth1 {
+ disable
+ }
+ }
+ ntp {
+ allow-client {
+ address "192.168.189.0/24"
+ }
+ listen-address "192.168.189.1"
+ server time1.vyos.net {
+ }
+ server time2.vyos.net {
+ }
+ }
+ router-advert {
+ interface bond10.50 {
+ prefix ::/64 {
+ preferred-lifetime "2700"
+ valid-lifetime "5400"
+ }
+ }
+ }
+ ssh {
+ disable-host-validation
+ dynamic-protection {
+ }
+ }
+}
+system {
+ config-management {
+ commit-revisions "100"
+ }
+ conntrack {
+ modules {
+ ftp
+ h323
+ nfs
+ pptp
+ sip
+ sqlnet
+ tftp
+ }
+ }
+ console {
+ device ttyS0 {
+ speed "115200"
+ }
+ }
+ domain-name "vyos.net"
+ host-name "R1"
+ login {
+ user vyos {
+ authentication {
+ encrypted-password $6$2Ta6TWHd/U$NmrX0x9kexCimeOcYK1MfhMpITF9ELxHcaBU/znBq.X2ukQOj61fVI2UYP/xBzP4QtiTcdkgs7WOQMHWsRymO/
+ plaintext-password ""
+ }
+ }
+ }
+ name-server "1.1.1.1"
+ name-server "9.9.9.9"
+ syslog {
+ global {
+ facility all {
+ level "info"
+ }
+ facility local7 {
+ level "debug"
+ }
+ }
+ }
+}
+
+// Warning: Do not remove the following line.
+// vyos-config-version: "bgp@5:broadcast-relay@1:cluster@2:config-management@1:conntrack@5:conntrack-sync@2:container@2:dhcp-relay@2:dhcp-server@8:dhcpv6-server@1:dns-dynamic@4:dns-forwarding@4:firewall@15:flow-accounting@1:https@6:ids@1:interfaces@32:ipoe-server@3:ipsec@13:isis@3:l2tp@9:lldp@2:mdns@1:monitoring@1:nat@7:nat66@3:ntp@3:openconnect@3:ospf@2:pim@1:policy@8:pppoe-server@10:pptp@5:qos@2:quagga@11:rip@1:rpki@2:salt@1:snmp@3:ssh@2:sstp@6:system@27:vrf@3:vrrp@4:vyos-accel-ppp@2:wanloadbalance@3:webproxy@2"
+// Release version: 1.4.0-epa3
diff --git a/smoketest/scripts/cli/base_accel_ppp_test.py b/smoketest/scripts/cli/base_accel_ppp_test.py
index 383adc445..212dc58ab 100644
--- a/smoketest/scripts/cli/base_accel_ppp_test.py
+++ b/smoketest/scripts/cli/base_accel_ppp_test.py
@@ -367,6 +367,27 @@ class BasicAccelPPPTest:
]
)
+ self.set(
+ [
+ "authentication",
+ "radius",
+ "server",
+ radius_server,
+ "backup",
+ ]
+ )
+
+ self.set(
+ [
+ "authentication",
+ "radius",
+ "server",
+ radius_server,
+ "priority",
+ "10",
+ ]
+ )
+
# commit changes
self.cli_commit()
@@ -379,6 +400,8 @@ class BasicAccelPPPTest:
self.assertEqual(f"acct-port=0", server[3])
self.assertEqual(f"req-limit=0", server[4])
self.assertEqual(f"fail-time=0", server[5])
+ self.assertIn('weight=10', server)
+ self.assertIn('backup', server)
def test_accel_ipv4_pool(self):
self.basic_config(is_gateway=False, is_client_pool=False)
@@ -605,3 +628,21 @@ delegate={delegate_2_prefix},{delegate_mask},name={pool_name}"""
self.assertEqual(conf['connlimit']['limit'], limits)
self.assertEqual(conf['connlimit']['burst'], burst)
self.assertEqual(conf['connlimit']['timeout'], timeout)
+
+ def test_accel_log_level(self):
+ self.basic_config()
+ self.cli_commit()
+
+ # check default value
+ conf = ConfigParser(allow_no_value=True)
+ conf.read(self._config_file)
+ self.assertEqual(conf['log']['level'], '3')
+
+ for log_level in range(0, 5):
+ self.set(['log', 'level', str(log_level)])
+ self.cli_commit()
+ # Validate configuration values
+ conf = ConfigParser(allow_no_value=True)
+ conf.read(self._config_file)
+
+ self.assertEqual(conf['log']['level'], str(log_level))
diff --git a/smoketest/scripts/cli/test_cgnat.py b/smoketest/scripts/cli/test_cgnat.py
new file mode 100755
index 000000000..02dad3de5
--- /dev/null
+++ b/smoketest/scripts/cli/test_cgnat.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import unittest
+
+from base_vyostest_shim import VyOSUnitTestSHIM
+from vyos.configsession import ConfigSessionError
+
+
+base_path = ['nat', 'cgnat']
+nftables_cgnat_config = '/run/nftables-cgnat.nft'
+
+
+class TestCGNAT(VyOSUnitTestSHIM.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(TestCGNAT, cls).setUpClass()
+
+ # ensure we can also run this test on a live system - so lets clean
+ # out the current configuration :)
+ cls.cli_delete(cls, base_path)
+
+ def tearDown(self):
+ self.cli_delete(base_path)
+ self.cli_commit()
+ self.assertFalse(os.path.exists(nftables_cgnat_config))
+
+ def test_cgnat(self):
+ internal_name = 'vyos-int-01'
+ external_name = 'vyos-ext-01'
+ internal_net = '100.64.0.0/29'
+ external_net = '192.0.2.1-192.0.2.2'
+ external_ports = '40000-60000'
+ ports_per_subscriber = '5000'
+ rule = '100'
+
+ nftables_search = [
+ ['map tcp_nat_map'],
+ ['map udp_nat_map'],
+ ['map icmp_nat_map'],
+ ['map other_nat_map'],
+ ['100.64.0.0 : 192.0.2.1 . 40000-44999'],
+ ['100.64.0.1 : 192.0.2.1 . 45000-49999'],
+ ['100.64.0.2 : 192.0.2.1 . 50000-54999'],
+ ['100.64.0.3 : 192.0.2.1 . 55000-59999'],
+ ['100.64.0.4 : 192.0.2.2 . 40000-44999'],
+ ['100.64.0.5 : 192.0.2.2 . 45000-49999'],
+ ['100.64.0.6 : 192.0.2.2 . 50000-54999'],
+ ['100.64.0.7 : 192.0.2.2 . 55000-59999'],
+ ['chain POSTROUTING'],
+ ['type nat hook postrouting priority srcnat'],
+ ['ip protocol tcp counter snat ip to ip saddr map @tcp_nat_map'],
+ ['ip protocol udp counter snat ip to ip saddr map @udp_nat_map'],
+ ['ip protocol icmp counter snat ip to ip saddr map @icmp_nat_map'],
+ ['counter snat ip to ip saddr map @other_nat_map'],
+ ]
+
+ self.cli_set(base_path + ['pool', 'external', external_name, 'external-port-range', external_ports])
+ self.cli_set(base_path + ['pool', 'external', external_name, 'range', external_net])
+
+ # allocation out of the available ports
+ with self.assertRaises(ConfigSessionError):
+ self.cli_set(base_path + ['pool', 'external', external_name, 'per-user-limit', 'port', '8000'])
+ self.cli_commit()
+ self.cli_set(base_path + ['pool', 'external', external_name, 'per-user-limit', 'port', ports_per_subscriber])
+
+ # internal pool not set
+ with self.assertRaises(ConfigSessionError):
+ self.cli_commit()
+ self.cli_set(base_path + ['pool', 'internal', internal_name, 'range', internal_net])
+
+ self.cli_set(base_path + ['rule', rule, 'source', 'pool', internal_name])
+ # non-exist translation pool
+ with self.assertRaises(ConfigSessionError):
+ self.cli_set(base_path + ['rule', rule, 'translation', 'pool', 'fake-pool'])
+ self.cli_commit()
+
+ self.cli_set(base_path + ['rule', rule, 'translation', 'pool', external_name])
+ self.cli_commit()
+
+ self.verify_nftables(nftables_search, 'ip cgnat', inverse=False, args='-s')
+
+
+ def test_cgnat_sequence(self):
+ internal_name = 'earth'
+ external_name = 'milky_way'
+ internal_net = '100.64.0.0/28'
+
+ ext_addr_alpha_proxima = '192.0.2.121/32'
+ ext_addr_beta_cygni = '198.51.100.23/32'
+ ext_addr_gamma_leonis = '203.0.113.102/32'
+
+ ext_seq_beta_cygni = '3'
+ ext_seq_gamma_leonis = '10'
+
+ external_ports = '1024-65535'
+ ports_per_subscriber = '10000'
+ rule = '100'
+
+ nftables_search = [
+ ['100.64.0.0 : 198.51.100.23 . 1024-11023, 100.64.0.1 : 198.51.100.23 . 11024-21023'],
+ ['100.64.0.4 : 198.51.100.23 . 41024-51023, 100.64.0.5 : 198.51.100.23 . 51024-61023'],
+ ['100.64.0.6 : 203.0.113.102 . 1024-11023, 100.64.0.7 : 203.0.113.102 . 11024-21023'],
+ ['100.64.0.8 : 203.0.113.102 . 21024-31023, 100.64.0.9 : 203.0.113.102 . 31024-41023'],
+ ['100.64.0.10 : 203.0.113.102 . 41024-51023, 100.64.0.11 : 203.0.113.102 . 51024-61023'],
+ ['100.64.0.12 : 192.0.2.121 . 1024-11023, 100.64.0.13 : 192.0.2.121 . 11024-21023'],
+ ['100.64.0.14 : 192.0.2.121 . 21024-31023, 100.64.0.15 : 192.0.2.121 . 31024-41023'],
+ ]
+
+ self.cli_set(base_path + ['pool', 'external', external_name, 'external-port-range', external_ports])
+ self.cli_set(base_path + ['pool', 'external', external_name, 'per-user-limit', 'port', ports_per_subscriber])
+ self.cli_set(base_path + ['pool', 'external', external_name, 'range', ext_addr_alpha_proxima])
+ self.cli_set(base_path + ['pool', 'external', external_name, 'range', ext_addr_beta_cygni, 'seq', ext_seq_beta_cygni])
+ self.cli_set(base_path + ['pool', 'external', external_name, 'range', ext_addr_gamma_leonis, 'seq', ext_seq_gamma_leonis])
+ self.cli_set(base_path + ['pool', 'internal', internal_name, 'range', internal_net])
+ self.cli_set(base_path + ['rule', rule, 'source', 'pool', internal_name])
+ self.cli_set(base_path + ['rule', rule, 'translation', 'pool', external_name])
+ self.cli_commit()
+
+ self.verify_nftables(nftables_search, 'ip cgnat', inverse=False, args='-s')
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/smoketest/scripts/cli/test_container.py b/smoketest/scripts/cli/test_container.py
index 3201883b8..90f821c60 100755
--- a/smoketest/scripts/cli/test_container.py
+++ b/smoketest/scripts/cli/test_container.py
@@ -91,6 +91,22 @@ class TestContainer(VyOSUnitTestSHIM.TestCase):
# Check for running process
self.assertEqual(process_named_running(PROCESS_NAME), pid)
+ def test_cpu_limit(self):
+ cont_name = 'c2'
+
+ self.cli_set(base_path + ['name', cont_name, 'allow-host-networks'])
+ self.cli_set(base_path + ['name', cont_name, 'image', cont_image])
+ self.cli_set(base_path + ['name', cont_name, 'cpu-quota', '1.25'])
+
+ self.cli_commit()
+
+ pid = 0
+ with open(PROCESS_PIDFILE.format(cont_name), 'r') as f:
+ pid = int(f.read())
+
+ # Check for running process
+ self.assertEqual(process_named_running(PROCESS_NAME), pid)
+
def test_ipv4_network(self):
prefix = '192.0.2.0/24'
base_name = 'ipv4'
diff --git a/smoketest/scripts/cli/test_load-balancing_reverse-proxy.py b/smoketest/scripts/cli/test_load-balancing_reverse-proxy.py
index c8b17316f..aa796f59f 100755
--- a/smoketest/scripts/cli/test_load-balancing_reverse-proxy.py
+++ b/smoketest/scripts/cli/test_load-balancing_reverse-proxy.py
@@ -218,7 +218,7 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase):
# Frontend
self.assertIn(f'frontend {frontend}', config)
- self.assertIn(f'bind :::{front_port} v4v6', config)
+ self.assertIn(f'bind [::]:{front_port} v4v6', config)
self.assertIn(f'mode {mode}', config)
for domain in domains_bk_first:
self.assertIn(f'acl {rule_ten} hdr(host) -i {domain}', config)
@@ -338,6 +338,11 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase):
self.assertIn('http-check send meth GET uri /health', config)
self.assertIn('http-check expect string success', config)
+ # Test configuring both http-check & health-check fails validation script
+ self.cli_set(base_path + ['backend', 'bk-01', 'health-check', 'ldap'])
+ with self.assertRaises(ConfigSessionError) as e:
+ self.cli_commit()
+
def test_06_lb_reverse_proxy_tcp_mode(self):
frontend = 'tcp_8443'
mode = 'tcp'
@@ -371,7 +376,7 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase):
# Frontend
self.assertIn(f'frontend {frontend}', config)
- self.assertIn(f'bind :::{front_port} v4v6', config)
+ self.assertIn(f'bind [::]:{front_port} v4v6', config)
self.assertIn(f'mode {mode}', config)
self.assertIn(f'tcp-request inspect-delay {tcp_request_delay}', config)
@@ -385,5 +390,74 @@ class TestLoadBalancingReverseProxy(VyOSUnitTestSHIM.TestCase):
self.assertIn(f'mode {mode}', config)
self.assertIn(f'server {bk_name} {bk_server}:{bk_server_port}', config)
+ def test_07_lb_reverse_proxy_http_response_headers(self):
+ # Setup base
+ self.configure_pki()
+ self.base_config()
+
+ # Set example headers in both frontend and backend
+ self.cli_set(base_path + ['service', 'https_front', 'http-response-headers', 'Cache-Control', 'value', 'max-age=604800'])
+ self.cli_set(base_path + ['backend', 'bk-01', 'http-response-headers', 'Proxy-Backend-ID', 'value', 'bk-01'])
+ self.cli_commit()
+
+ # Test headers are present in generated configuration file
+ config = read_file(HAPROXY_CONF)
+ self.assertIn('http-response set-header Cache-Control \'max-age=604800\'', config)
+ self.assertIn('http-response set-header Proxy-Backend-ID \'bk-01\'', config)
+
+ # Test setting alongside modes other than http is blocked by validation conditions
+ self.cli_set(base_path + ['service', 'https_front', 'mode', 'tcp'])
+ with self.assertRaises(ConfigSessionError) as e:
+ self.cli_commit()
+
+ def test_08_lb_reverse_proxy_tcp_health_checks(self):
+ # Setup PKI
+ self.configure_pki()
+
+ # Define variables
+ frontend = 'fe_ldaps'
+ mode = 'tcp'
+ health_check = 'ldap'
+ front_port = '636'
+ bk_name = 'bk_ldap'
+ bk_servers = ['192.0.2.11', '192.0.2.12']
+ bk_server_port = '389'
+
+ # Configure frontend
+ self.cli_set(base_path + ['service', frontend, 'mode', mode])
+ self.cli_set(base_path + ['service', frontend, 'port', front_port])
+ self.cli_set(base_path + ['service', frontend, 'ssl', 'certificate', 'smoketest'])
+
+ # Configure backend
+ self.cli_set(base_path + ['backend', bk_name, 'mode', mode])
+ self.cli_set(base_path + ['backend', bk_name, 'health-check', health_check])
+ for index, bk_server in enumerate(bk_servers):
+ self.cli_set(base_path + ['backend', bk_name, 'server', f'srv-{index}', 'address', bk_server])
+ self.cli_set(base_path + ['backend', bk_name, 'server', f'srv-{index}', 'port', bk_server_port])
+
+ # Commit & read config
+ self.cli_commit()
+ config = read_file(HAPROXY_CONF)
+
+ # Validate Frontend
+ self.assertIn(f'frontend {frontend}', config)
+ self.assertIn(f'bind [::]:{front_port} v4v6 ssl crt /run/haproxy/smoketest.pem', config)
+ self.assertIn(f'mode {mode}', config)
+ self.assertIn(f'backend {bk_name}', config)
+
+ # Validate Backend
+ self.assertIn(f'backend {bk_name}', config)
+ self.assertIn(f'option {health_check}-check', config)
+ self.assertIn(f'mode {mode}', config)
+ for index, bk_server in enumerate(bk_servers):
+ self.assertIn(f'server srv-{index} {bk_server}:{bk_server_port}', config)
+
+ # Validate SMTP option renders correctly
+ self.cli_set(base_path + ['backend', bk_name, 'health-check', 'smtp'])
+ self.cli_commit()
+ config = read_file(HAPROXY_CONF)
+ self.assertIn(f'option smtpchk', config)
+
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/smoketest/scripts/cli/test_protocols_isis.py b/smoketest/scripts/cli/test_protocols_isis.py
index 0fd18a6da..769f3dd33 100755
--- a/smoketest/scripts/cli/test_protocols_isis.py
+++ b/smoketest/scripts/cli/test_protocols_isis.py
@@ -60,6 +60,7 @@ class TestProtocolsISIS(VyOSUnitTestSHIM.TestCase):
prefix_list = 'EXPORT-ISIS'
route_map = 'EXPORT-ISIS'
rule = '10'
+ metric_style = 'transition'
self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', rule, 'action', 'permit'])
self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', rule, 'prefix', '203.0.113.0/24'])
@@ -80,6 +81,7 @@ class TestProtocolsISIS(VyOSUnitTestSHIM.TestCase):
self.cli_commit()
self.cli_set(base_path + ['redistribute', 'ipv4', 'connected', 'level-2', 'route-map', route_map])
+ self.cli_set(base_path + ['metric-style', metric_style])
self.cli_set(base_path + ['log-adjacency-changes'])
# Commit all changes
@@ -88,6 +90,7 @@ class TestProtocolsISIS(VyOSUnitTestSHIM.TestCase):
# Verify all changes
tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd')
self.assertIn(f' net {net}', tmp)
+ self.assertIn(f' metric-style {metric_style}', tmp)
self.assertIn(f' log-adjacency-changes', tmp)
self.assertIn(f' redistribute ipv4 connected level-2 route-map {route_map}', tmp)
@@ -395,5 +398,19 @@ class TestProtocolsISIS(VyOSUnitTestSHIM.TestCase):
self.cli_delete(['policy', 'prefix-list', prefix_list])
self.cli_commit()
+ def test_isis_10_topology(self):
+ topologies = ['ipv4-multicast', 'ipv4-mgmt', 'ipv6-unicast', 'ipv6-multicast', 'ipv6-mgmt']
+ interface = 'lo'
+
+ # Set a basic IS-IS config
+ self.cli_set(base_path + ['net', net])
+ self.cli_set(base_path + ['interface', interface])
+ for topology in topologies:
+ self.cli_set(base_path + ['topology', topology])
+ self.cli_commit()
+ tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd')
+ self.assertIn(f' net {net}', tmp)
+ self.assertIn(f' topology {topology}', tmp)
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/smoketest/scripts/cli/test_protocols_ospf.py b/smoketest/scripts/cli/test_protocols_ospf.py
index 1b9cc50fe..585c1dc89 100755
--- a/smoketest/scripts/cli/test_protocols_ospf.py
+++ b/smoketest/scripts/cli/test_protocols_ospf.py
@@ -16,6 +16,7 @@
import unittest
+from time import sleep
from base_vyostest_shim import VyOSUnitTestSHIM
from vyos.configsession import ConfigSessionError
@@ -480,6 +481,8 @@ class TestProtocolsOSPF(VyOSUnitTestSHIM.TestCase):
# Commit main OSPF changes
self.cli_commit()
+ sleep(10)
+
# Verify main OSPF changes
frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME)
self.assertIn(f'router ospf', frrconfig)
diff --git a/smoketest/scripts/cli/test_qos.py b/smoketest/scripts/cli/test_qos.py
index bcf5139c7..b98c0e9b7 100755
--- a/smoketest/scripts/cli/test_qos.py
+++ b/smoketest/scripts/cli/test_qos.py
@@ -738,6 +738,122 @@ class TestQoS(VyOSUnitTestSHIM.TestCase):
self.cli_commit()
self.assertEqual('', cmd(f'tc filter show dev {interface}'))
+ def test_14_policy_limiter_marked_traffic(self):
+ policy_name = 'smoke_test'
+ base_policy_path = ['qos', 'policy', 'limiter', policy_name]
+
+ self.cli_set(['qos', 'interface', self._interfaces[0], 'ingress', policy_name])
+ self.cli_set(base_policy_path + ['class', '100', 'bandwidth', '20gbit'])
+ self.cli_set(base_policy_path + ['class', '100', 'burst', '3760k'])
+ self.cli_set(base_policy_path + ['class', '100', 'match', 'INTERNAL', 'mark', '100'])
+ self.cli_set(base_policy_path + ['class', '100', 'priority', '20'])
+ self.cli_set(base_policy_path + ['default', 'bandwidth', '1gbit'])
+ self.cli_set(base_policy_path + ['default', 'burst', '125000000b'])
+ self.cli_commit()
+
+ tc_filters = cmd(f'tc filter show dev {self._interfaces[0]} ingress')
+ # class 100
+ self.assertIn('filter parent ffff: protocol all pref 20 fw chain 0', tc_filters)
+ self.assertIn('action order 1: police 0x1 rate 20Gbit burst 3847500b mtu 2Kb action drop overhead 0b', tc_filters)
+ # default
+ self.assertIn('filter parent ffff: protocol all pref 255 basic chain 0', tc_filters)
+ self.assertIn('action order 1: police 0x2 rate 1Gbit burst 125000000b mtu 2Kb action drop overhead 0b', tc_filters)
+
+ def test_15_traffic_match_group(self):
+ interface = self._interfaces[0]
+ self.cli_set(['qos', 'interface', interface, 'egress', 'VyOS-HTB'])
+ base_policy_path = ['qos', 'policy', 'shaper', 'VyOS-HTB']
+
+ #old syntax
+ self.cli_set(base_policy_path + ['bandwidth', '100mbit'])
+ self.cli_set(base_policy_path + ['class', '10', 'bandwidth', '40%'])
+ self.cli_set(base_policy_path + ['class', '10', 'match', 'AF11', 'ip', 'dscp', 'AF11'])
+ self.cli_set(base_policy_path + ['class', '10', 'match', 'AF41', 'ip', 'dscp', 'AF41'])
+ self.cli_set(base_policy_path + ['class', '10', 'match', 'AF43', 'ip', 'dscp', 'AF43'])
+ self.cli_set(base_policy_path + ['class', '10', 'match', 'CS4', 'ip', 'dscp', 'CS4'])
+ self.cli_set(base_policy_path + ['class', '10', 'priority', '1'])
+ self.cli_set(base_policy_path + ['class', '10', 'queue-type', 'fair-queue'])
+ self.cli_set(base_policy_path + ['class', '20', 'bandwidth', '30%'])
+ self.cli_set(base_policy_path + ['class', '20', 'match', 'EF', 'ip', 'dscp', 'EF'])
+ self.cli_set(base_policy_path + ['class', '20', 'match', 'CS5', 'ip', 'dscp', 'CS5'])
+ self.cli_set(base_policy_path + ['class', '20', 'priority', '2'])
+ self.cli_set(base_policy_path + ['class', '20', 'queue-type', 'fair-queue'])
+ self.cli_set(base_policy_path + ['default', 'bandwidth', '20%'])
+ self.cli_set(base_policy_path + ['default', 'queue-type', 'fair-queue'])
+ self.cli_commit()
+
+ tc_filters_old = cmd(f'tc -details filter show dev {interface}')
+ self.assertIn('match 00280000/00ff0000', tc_filters_old)
+ self.assertIn('match 00880000/00ff0000', tc_filters_old)
+ self.assertIn('match 00980000/00ff0000', tc_filters_old)
+ self.assertIn('match 00800000/00ff0000', tc_filters_old)
+ self.assertIn('match 00a00000/00ff0000', tc_filters_old)
+ self.assertIn('match 00b80000/00ff0000', tc_filters_old)
+ # delete config by old syntax
+ self.cli_delete(base_policy_path)
+ self.cli_delete(['qos', 'interface', interface, 'egress', 'VyOS-HTB'])
+ self.cli_commit()
+ self.assertEqual('', cmd(f'tc -s filter show dev {interface}'))
+
+ self.cli_set(['qos', 'interface', interface, 'egress', 'VyOS-HTB'])
+ # prepare traffic match group
+ self.cli_set(['qos', 'traffic-match-group', 'VOICE', 'description', 'voice shaper'])
+ self.cli_set(['qos', 'traffic-match-group', 'VOICE', 'match', 'EF', 'ip', 'dscp', 'EF'])
+ self.cli_set(['qos', 'traffic-match-group', 'VOICE', 'match', 'CS5', 'ip', 'dscp', 'CS5'])
+
+ self.cli_set(['qos', 'traffic-match-group', 'REAL_TIME_COMMON', 'description', 'real time common filters'])
+ self.cli_set(['qos', 'traffic-match-group', 'REAL_TIME_COMMON', 'match', 'AF43', 'ip', 'dscp', 'AF43'])
+ self.cli_set(['qos', 'traffic-match-group', 'REAL_TIME_COMMON', 'match', 'CS4', 'ip', 'dscp', 'CS4'])
+
+ self.cli_set(['qos', 'traffic-match-group', 'REAL_TIME', 'description', 'real time shaper'])
+ self.cli_set(['qos', 'traffic-match-group', 'REAL_TIME', 'match', 'AF41', 'ip', 'dscp', 'AF41'])
+ self.cli_set(['qos', 'traffic-match-group', 'REAL_TIME', 'match-group', 'REAL_TIME_COMMON'])
+
+ # new syntax
+ self.cli_set(base_policy_path + ['bandwidth', '100mbit'])
+ self.cli_set(base_policy_path + ['class', '10', 'bandwidth', '40%'])
+ self.cli_set(base_policy_path + ['class', '10', 'match', 'AF11', 'ip', 'dscp', 'AF11'])
+ self.cli_set(base_policy_path + ['class', '10', 'match-group', 'REAL_TIME'])
+ self.cli_set(base_policy_path + ['class', '10', 'priority', '1'])
+ self.cli_set(base_policy_path + ['class', '10', 'queue-type', 'fair-queue'])
+ self.cli_set(base_policy_path + ['class', '20', 'bandwidth', '30%'])
+ self.cli_set(base_policy_path + ['class', '20', 'match-group', 'VOICE'])
+ self.cli_set(base_policy_path + ['class', '20', 'priority', '2'])
+ self.cli_set(base_policy_path + ['class', '20', 'queue-type', 'fair-queue'])
+ self.cli_set(base_policy_path + ['default', 'bandwidth', '20%'])
+ self.cli_set(base_policy_path + ['default', 'queue-type', 'fair-queue'])
+ self.cli_commit()
+
+ self.assertEqual(tc_filters_old, cmd(f'tc -details filter show dev {interface}'))
+
+ def test_16_wrong_traffic_match_group(self):
+ interface = self._interfaces[0]
+ self.cli_set(['qos', 'interface', interface])
+
+ # Can not use both IPv6 and IPv4 in one match
+ self.cli_set(['qos', 'traffic-match-group', '1', 'match', 'one', 'ip', 'dscp', 'EF'])
+ self.cli_set(['qos', 'traffic-match-group', '1', 'match', 'one', 'ipv6', 'dscp', 'EF'])
+ with self.assertRaises(ConfigSessionError) as e:
+ self.cli_commit()
+
+ # check contain itself, should commit success
+ self.cli_delete(['qos', 'traffic-match-group', '1', 'match', 'one', 'ipv6'])
+ self.cli_set(['qos', 'traffic-match-group', '1', 'match-group', '1'])
+ self.cli_commit()
+
+ # check cycle dependency, should commit success
+ self.cli_set(['qos', 'traffic-match-group', '1', 'match-group', '3'])
+ self.cli_set(['qos', 'traffic-match-group', '2', 'match', 'one', 'ip', 'dscp', 'CS4'])
+ self.cli_set(['qos', 'traffic-match-group', '2', 'match-group', '1'])
+
+ self.cli_set(['qos', 'traffic-match-group', '3', 'match', 'one', 'ipv6', 'dscp', 'CS4'])
+ self.cli_set(['qos', 'traffic-match-group', '3', 'match-group', '2'])
+ self.cli_commit()
+
+ # inherit from non exist group, should commit success with warning
+ self.cli_set(['qos', 'traffic-match-group', '3', 'match-group', 'unexpected'])
+ self.cli_commit()
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/smoketest/scripts/cli/test_service_dns_forwarding.py b/smoketest/scripts/cli/test_service_dns_forwarding.py
index 079c584ba..4db1d7495 100755
--- a/smoketest/scripts/cli/test_service_dns_forwarding.py
+++ b/smoketest/scripts/cli/test_service_dns_forwarding.py
@@ -291,5 +291,15 @@ class TestServicePowerDNS(VyOSUnitTestSHIM.TestCase):
tmp = get_config_value('edns-subnet-allow-list')
self.assertEqual(tmp, ','.join(options))
+ def test_multiple_ns_records(self):
+ test_zone = 'example.com'
+ self.cli_set(base_path + ['authoritative-domain', test_zone, 'records', 'ns', 'test', 'target', f'ns1.{test_zone}'])
+ self.cli_set(base_path + ['authoritative-domain', test_zone, 'records', 'ns', 'test', 'target', f'ns2.{test_zone}'])
+ self.cli_commit()
+ zone_config = read_file(f'{PDNS_REC_RUN_DIR}/zone.{test_zone}.conf')
+ self.assertRegex(zone_config, fr'test\s+\d+\s+NS\s+ns1\.{test_zone}\.')
+ self.assertRegex(zone_config, fr'test\s+\d+\s+NS\s+ns2\.{test_zone}\.')
+
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/smoketest/scripts/cli/test_service_https.py b/smoketest/scripts/cli/test_service_https.py
index f2a64627f..8a6386e4f 100755
--- a/smoketest/scripts/cli/test_service_https.py
+++ b/smoketest/scripts/cli/test_service_https.py
@@ -412,6 +412,47 @@ class TestHTTPSService(VyOSUnitTestSHIM.TestCase):
self.assertEqual(r.status_code, 200)
@ignore_warning(InsecureRequestWarning)
+ def test_api_image(self):
+ address = '127.0.0.1'
+ key = 'VyOS-key'
+ url = f'https://{address}/image'
+ headers = {}
+
+ self.cli_set(base_path + ['api', 'keys', 'id', 'key-01', 'key', key])
+ self.cli_commit()
+
+ payload = {
+ 'data': '{"op": "add"}',
+ 'key': f'{key}',
+ }
+ r = request('POST', url, verify=False, headers=headers, data=payload)
+ self.assertEqual(r.status_code, 400)
+ self.assertIn('Missing required field "url"', r.json().get('error'))
+
+ payload = {
+ 'data': '{"op": "delete"}',
+ 'key': f'{key}',
+ }
+ r = request('POST', url, verify=False, headers=headers, data=payload)
+ self.assertEqual(r.status_code, 400)
+ self.assertIn('Missing required field "name"', r.json().get('error'))
+
+ payload = {
+ 'data': '{"op": "set_default"}',
+ 'key': f'{key}',
+ }
+ r = request('POST', url, verify=False, headers=headers, data=payload)
+ self.assertEqual(r.status_code, 400)
+ self.assertIn('Missing required field "name"', r.json().get('error'))
+
+ payload = {
+ 'data': '{"op": "show"}',
+ 'key': f'{key}',
+ }
+ r = request('POST', url, verify=False, headers=headers, data=payload)
+ self.assertEqual(r.status_code, 200)
+
+ @ignore_warning(InsecureRequestWarning)
def test_api_config_file_load_http(self):
# Test load config from HTTP URL
address = '127.0.0.1'
diff --git a/smoketest/scripts/cli/test_service_upnp.py b/smoketest/scripts/cli/test_service_upnp.py
deleted file mode 100755
index fd67b0ced..000000000
--- a/smoketest/scripts/cli/test_service_upnp.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2021-2024 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import unittest
-
-from base_vyostest_shim import VyOSUnitTestSHIM
-
-from vyos.configsession import ConfigSessionError
-from vyos.template import ip_from_cidr
-from vyos.utils.file import read_file
-from vyos.utils.process import process_named_running
-
-UPNP_CONF = '/run/upnp/miniupnp.conf'
-DAEMON = 'miniupnpd'
-interface = 'eth0'
-base_path = ['service', 'upnp']
-address_base = ['interfaces', 'ethernet', interface, 'address']
-
-ipv4_addr = '100.64.0.1/24'
-ipv6_addr = '2001:db8::1/64'
-
-class TestServiceUPnP(VyOSUnitTestSHIM.TestCase):
- @classmethod
- def setUpClass(cls):
- super(TestServiceUPnP, cls).setUpClass()
-
- # ensure we can also run this test on a live system - so lets clean
- # out the current configuration :)
- cls.cli_delete(cls, base_path)
-
- cls.cli_set(cls, address_base + [ipv4_addr])
- cls.cli_set(cls, address_base + [ipv6_addr])
-
- @classmethod
- def tearDownClass(cls):
- cls.cli_delete(cls, address_base)
- cls._session.commit()
-
- super(TestServiceUPnP, cls).tearDownClass()
-
- def tearDown(self):
- # Check for running process
- self.assertTrue(process_named_running(DAEMON))
-
- self.cli_delete(base_path)
- self.cli_commit()
-
- # Check for running process
- self.assertFalse(process_named_running(DAEMON))
-
- def test_ipv4_base(self):
- self.cli_set(base_path + ['nat-pmp'])
- self.cli_set(base_path + ['listen', interface])
-
- # check validate() - WAN interface is mandatory
- with self.assertRaises(ConfigSessionError):
- self.cli_commit()
- self.cli_set(base_path + ['wan-interface', interface])
-
- self.cli_commit()
-
- config = read_file(UPNP_CONF)
- self.assertIn(f'ext_ifname={interface}', config)
- self.assertIn(f'listening_ip={interface}', config)
- self.assertIn(f'enable_natpmp=yes', config)
- self.assertIn(f'enable_upnp=yes', config)
-
- def test_ipv6_base(self):
- v6_addr = ip_from_cidr(ipv6_addr)
-
- self.cli_set(base_path + ['nat-pmp'])
- self.cli_set(base_path + ['listen', interface])
- self.cli_set(base_path + ['listen', v6_addr])
-
- # check validate() - WAN interface is mandatory
- with self.assertRaises(ConfigSessionError):
- self.cli_commit()
- self.cli_set(base_path + ['wan-interface', interface])
-
- self.cli_commit()
-
- config = read_file(UPNP_CONF)
- self.assertIn(f'ext_ifname={interface}', config)
- self.assertIn(f'listening_ip={interface}', config)
- self.assertIn(f'ipv6_listening_ip={v6_addr}', config)
- self.assertIn(f'enable_natpmp=yes', config)
- self.assertIn(f'enable_upnp=yes', config)
-
-if __name__ == '__main__':
- unittest.main(verbosity=2)
diff --git a/smoketest/scripts/cli/test_vpn_l2tp.py b/smoketest/scripts/cli/test_vpn_l2tp.py
index 8c4e53895..07a7e2906 100755
--- a/smoketest/scripts/cli/test_vpn_l2tp.py
+++ b/smoketest/scripts/cli/test_vpn_l2tp.py
@@ -95,6 +95,29 @@ class TestVPNL2TPServer(BasicAccelPPPTest.TestCase):
self.cli_set(base_path + ['authentication', 'protocols', 'chap'])
self.cli_commit()
+ def test_l2tp_radius_server(self):
+ base_path = ['vpn', 'l2tp', 'remote-access']
+ radius_server = "192.0.2.22"
+ radius_key = "secretVyOS"
+
+ self.cli_set(base_path + ['authentication', 'mode', 'radius'])
+ self.cli_set(base_path + ['gateway-address', '192.0.2.1'])
+ self.cli_set(base_path + ['client-ip-pool', 'SIMPLE-POOL', 'range', '192.0.2.0/24'])
+ self.cli_set(base_path + ['default-pool', 'SIMPLE-POOL'])
+ self.cli_set(base_path + ['authentication', 'radius', 'server', radius_server, 'key', radius_key])
+ self.cli_set(base_path + ['authentication', 'radius', 'server', radius_server, 'priority', '10'])
+ self.cli_set(base_path + ['authentication', 'radius', 'server', radius_server, 'backup'])
+
+ # commit changes
+ self.cli_commit()
+
+ # Validate configuration values
+ conf = ConfigParser(allow_no_value=True)
+ conf.read(self._config_file)
+ server = conf["radius"]["server"].split(",")
+ self.assertIn('weight=10', server)
+ self.assertIn('backup', server)
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/smoketest/scripts/system/test_kernel_options.py b/smoketest/scripts/system/test_kernel_options.py
index 18922d93d..4666e98e7 100755
--- a/smoketest/scripts/system/test_kernel_options.py
+++ b/smoketest/scripts/system/test_kernel_options.py
@@ -111,5 +111,22 @@ class TestKernelModules(unittest.TestCase):
tmp = re.findall(f'{option}=(y|m)', self._config_data)
self.assertTrue(tmp)
+ def test_vfio(self):
+ options_to_check = [
+ 'CONFIG_VFIO', 'CONFIG_VFIO_GROUP', 'CONFIG_VFIO_CONTAINER',
+ 'CONFIG_VFIO_IOMMU_TYPE1', 'CONFIG_VFIO_NOIOMMU', 'CONFIG_VFIO_VIRQFD'
+ ]
+ for option in options_to_check:
+ tmp = re.findall(f'{option}=(y|m)', self._config_data)
+ self.assertTrue(tmp)
+
+ def test_container_cpu(self):
+ options_to_check = [
+ 'CONFIG_CGROUP_SCHED', 'CONFIG_CPUSETS', 'CONFIG_CGROUP_CPUACCT', 'CONFIG_CFS_BANDWIDTH'
+ ]
+ for option in options_to_check:
+ tmp = re.findall(f'{option}=(y|m)', self._config_data)
+ self.assertTrue(tmp)
+
if __name__ == '__main__':
unittest.main(verbosity=2)
diff --git a/src/activation-scripts/20-ethernet_offload.py b/src/activation-scripts/20-ethernet_offload.py
new file mode 100755
index 000000000..33b0ea469
--- /dev/null
+++ b/src/activation-scripts/20-ethernet_offload.py
@@ -0,0 +1,103 @@
+# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+# T3619: mirror Linux Kernel defaults for ethernet offloading options into VyOS
+# CLI. See https://vyos.dev/T3619#102254 for all the details.
+# T3787: Remove deprecated UDP fragmentation offloading option
+# T6006: add to activation-scripts: migration-scripts/interfaces/20-to-21
+
+from vyos.ethtool import Ethtool
+from vyos.configtree import ConfigTree
+
+def activate(config: ConfigTree):
+ base = ['interfaces', 'ethernet']
+
+ if not config.exists(base):
+ return
+
+ for ifname in config.list_nodes(base):
+ eth = Ethtool(ifname)
+
+ # If GRO is enabled by the Kernel - we reflect this on the CLI. If GRO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'gro'])
+ enabled, fixed = eth.get_generic_receive_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'gro'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'gro'])
+
+ # If GSO is enabled by the Kernel - we reflect this on the CLI. If GSO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'gso'])
+ enabled, fixed = eth.get_generic_segmentation_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'gso'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'gso'])
+
+ # If LRO is enabled by the Kernel - we reflect this on the CLI. If LRO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'lro'])
+ enabled, fixed = eth.get_large_receive_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'lro'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'lro'])
+
+ # If SG is enabled by the Kernel - we reflect this on the CLI. If SG is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'sg'])
+ enabled, fixed = eth.get_scatter_gather()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'sg'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'sg'])
+
+ # If TSO is enabled by the Kernel - we reflect this on the CLI. If TSO is
+ # enabled via CLI but not supported by the NIC - we remove it from the CLI
+ configured = config.exists(base + [ifname, 'offload', 'tso'])
+ enabled, fixed = eth.get_tcp_segmentation_offload()
+ if configured and fixed:
+ config.delete(base + [ifname, 'offload', 'tso'])
+ elif enabled and not fixed:
+ config.set(base + [ifname, 'offload', 'tso'])
+
+ # Remove deprecated UDP fragmentation offloading option
+ if config.exists(base + [ifname, 'offload', 'ufo']):
+ config.delete(base + [ifname, 'offload', 'ufo'])
+
+ # Also while processing the interface configuration, not all adapters support
+ # changing the speed and duplex settings. If the desired speed and duplex
+ # values do not work for the NIC driver, we change them back to the default
+ # value of "auto" - which will be applied if the CLI node is deleted.
+ speed_path = base + [ifname, 'speed']
+ duplex_path = base + [ifname, 'duplex']
+ # speed and duplex must always be set at the same time if not set to "auto"
+ if config.exists(speed_path) and config.exists(duplex_path):
+ speed = config.return_value(speed_path)
+ duplex = config.return_value(duplex_path)
+ if speed != 'auto' and duplex != 'auto':
+ if not eth.check_speed_duplex(speed, duplex):
+ config.delete(speed_path)
+ config.delete(duplex_path)
+
+ # Also while processing the interface configuration, not all adapters support
+ # changing disabling flow-control - or change this setting. If disabling
+ # flow-control is not supported by the NIC, we remove the setting from CLI
+ flow_control_path = base + [ifname, 'disable-flow-control']
+ if config.exists(flow_control_path):
+ if not eth.check_flow_control():
+ config.delete(flow_control_path)
diff --git a/src/completion/list_esi.sh b/src/completion/list_esi.sh
new file mode 100755
index 000000000..b8373fa57
--- /dev/null
+++ b/src/completion/list_esi.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This script is completion helper to list all valid ESEs that are visible to FRR
+
+esiJson=$(vtysh -c 'show evpn es json')
+echo "$(echo "$esiJson" | jq -r '.[] | .esi')"
diff --git a/src/completion/list_vni.sh b/src/completion/list_vni.sh
new file mode 100755
index 000000000..f8bd4a993
--- /dev/null
+++ b/src/completion/list_vni.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This script is completion helper to list all configured VNIs that are visible to FRR
+
+vniJson=$(vtysh -c 'show evpn vni json')
+echo "$(echo "$vniJson" | jq -r 'keys | .[]')"
diff --git a/src/completion/qos/list_traffic_match_group.py b/src/completion/qos/list_traffic_match_group.py
new file mode 100644
index 000000000..015d7ada9
--- /dev/null
+++ b/src/completion/qos/list_traffic_match_group.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from vyos.config import Config
+
+
+def get_qos_traffic_match_group():
+ config = Config()
+ base = ['qos', 'traffic-match-group']
+ conf = config.get_config_dict(base, key_mangling=('-', '_'))
+ groups = []
+
+ for group in conf.get('traffic_match_group', []):
+ groups.append(group)
+
+ return groups
+
+
+if __name__ == "__main__":
+ groups = get_qos_traffic_match_group()
+ print(" ".join(groups))
+
diff --git a/src/conf_mode/container.py b/src/conf_mode/container.py
index a73a18ffa..3efeb9b40 100755
--- a/src/conf_mode/container.py
+++ b/src/conf_mode/container.py
@@ -16,6 +16,7 @@
import os
+from decimal import Decimal
from hashlib import sha256
from ipaddress import ip_address
from ipaddress import ip_network
@@ -28,6 +29,7 @@ from vyos.configdict import node_changed
from vyos.configdict import is_node_changed
from vyos.configverify import verify_vrf
from vyos.ifconfig import Interface
+from vyos.cpu import get_core_count
from vyos.utils.file import write_file
from vyos.utils.process import call
from vyos.utils.process import cmd
@@ -127,6 +129,11 @@ def verify(container):
f'locally. Please use "add container image {image}" to add it '\
f'to the system! Container "{name}" will not be started!')
+ if 'cpu_quota' in container_config:
+ cores = get_core_count()
+ if Decimal(container_config['cpu_quota']) > cores:
+ raise ConfigError(f'Cannot set limit to more cores than available "{name}"!')
+
if 'network' in container_config:
if len(container_config['network']) > 1:
raise ConfigError(f'Only one network can be specified for container "{name}"!')
@@ -257,6 +264,7 @@ def verify(container):
def generate_run_arguments(name, container_config):
image = container_config['image']
+ cpu_quota = container_config['cpu_quota']
memory = container_config['memory']
shared_memory = container_config['shared_memory']
restart = container_config['restart']
@@ -329,9 +337,13 @@ def generate_run_arguments(name, container_config):
prop = vol_config['propagation']
volume += f' --volume {svol}:{dvol}:{mode},{prop}'
- container_base_cmd = f'--detach --interactive --tty --replace {capabilities} ' \
+ host_pid = ''
+ if 'allow_host_pid' in container_config:
+ host_pid = '--pid host'
+
+ container_base_cmd = f'--detach --interactive --tty --replace {capabilities} --cpus {cpu_quota} ' \
f'--memory {memory}m --shm-size {shared_memory}m --memory-swap 0 --restart {restart} ' \
- f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid}'
+ f'--name {name} {hostname} {device} {port} {volume} {env_opt} {label} {uid} {host_pid}'
entrypoint = ''
if 'entrypoint' in container_config:
@@ -339,11 +351,6 @@ def generate_run_arguments(name, container_config):
entrypoint = json_write(container_config['entrypoint'].split()).replace('"', "&quot;")
entrypoint = f'--entrypoint &apos;{entrypoint}&apos;'
- hostname = ''
- if 'host_name' in container_config:
- hostname = container_config['host_name']
- hostname = f'--hostname {hostname}'
-
command = ''
if 'command' in container_config:
command = container_config['command'].strip()
diff --git a/src/conf_mode/interfaces_openvpn.py b/src/conf_mode/interfaces_openvpn.py
index 0ecffd3be..627cc90ba 100755
--- a/src/conf_mode/interfaces_openvpn.py
+++ b/src/conf_mode/interfaces_openvpn.py
@@ -168,6 +168,14 @@ def verify_pki(openvpn):
'verification, consult the documentation for details.')
if tls:
+ if mode == 'site-to-site':
+ # XXX: site-to-site with PSKs is the only mode that can work without TLS,
+ # so 'tls role' is not mandatory for it,
+ # but we need to check that if it uses peer certificate fingerprints rather than PSKs,
+ # then the TLS role is set
+ if ('shared_secret_key' not in tls) and ('role' not in tls):
+ raise ConfigError('"tls role" is required for site-to-site OpenVPN with TLS')
+
if (mode in ['server', 'client']) and ('ca_certificate' not in tls):
raise ConfigError(f'Must specify "tls ca-certificate" on openvpn interface {interface},\
it is required in server and client modes')
diff --git a/src/conf_mode/interfaces_tunnel.py b/src/conf_mode/interfaces_tunnel.py
index 43ba72857..98ef98d12 100755
--- a/src/conf_mode/interfaces_tunnel.py
+++ b/src/conf_mode/interfaces_tunnel.py
@@ -145,11 +145,20 @@ def verify(tunnel):
# If no IP GRE key is defined we can not have more then one GRE tunnel
# bound to any one interface/IP address and the same remote. This will
# result in a OS PermissionError: add tunnel "gre0" failed: File exists
- if (their_address == our_address or our_source_if == their_source_if) and \
- our_remote == their_remote:
- raise ConfigError(f'Missing required "ip key" parameter when '\
- 'running more then one GRE based tunnel on the '\
- 'same source-interface/source-address')
+ if our_remote == their_remote:
+ if our_address is not None and their_address == our_address:
+ # If set to the same values, this is always a fail
+ raise ConfigError(f'Missing required "ip key" parameter when '\
+ 'running more then one GRE based tunnel on the '\
+ 'same source-address')
+
+ if their_source_if == our_source_if and their_address == our_address:
+ # Note that lack of None check on these is deliberate.
+ # source-if and source-ip matching while unset (all None) is a fail
+ # source-ifs set and matching with unset source-ips is a fail
+ raise ConfigError(f'Missing required "ip key" parameter when '\
+ 'running more then one GRE based tunnel on the '\
+ 'same source-interface')
# Keys are not allowed with ipip and sit tunnels
if tunnel['encapsulation'] in ['ipip', 'sit']:
diff --git a/src/conf_mode/load-balancing_reverse-proxy.py b/src/conf_mode/load-balancing_reverse-proxy.py
index 1569d8d71..09c68dadd 100755
--- a/src/conf_mode/load-balancing_reverse-proxy.py
+++ b/src/conf_mode/load-balancing_reverse-proxy.py
@@ -26,9 +26,13 @@ from vyos.utils.dict import dict_search
from vyos.utils.process import call
from vyos.utils.network import check_port_availability
from vyos.utils.network import is_listen_port_bind_service
-from vyos.pki import wrap_certificate
-from vyos.pki import wrap_private_key
+from vyos.pki import find_chain
+from vyos.pki import load_certificate
+from vyos.pki import load_private_key
+from vyos.pki import encode_certificate
+from vyos.pki import encode_private_key
from vyos.template import render
+from vyos.utils.file import write_file
from vyos import ConfigError
from vyos import airbag
airbag.enable()
@@ -75,12 +79,21 @@ def verify(lb):
raise ConfigError(f'"TCP" port "{tmp_port}" is used by another service')
for back, back_config in lb['backend'].items():
- if 'http-check' in back_config:
- http_check = back_config['http-check']
+ if 'http_check' in back_config:
+ http_check = back_config['http_check']
if 'expect' in http_check and 'status' in http_check['expect'] and 'string' in http_check['expect']:
raise ConfigError(f'"expect status" and "expect string" can not be configured together!')
+
+ if 'health_check' in back_config:
+ if 'mode' not in back_config or back_config['mode'] != 'tcp':
+ raise ConfigError(f'backend "{back}" can only be configured with {back_config["health_check"]} ' +
+ f'health-check whilst in TCP mode!')
+ if 'http_check' in back_config:
+ raise ConfigError(f'backend "{back}" cannot be configured with both http-check and health-check!')
+
if 'server' not in back_config:
raise ConfigError(f'"{back} server" must be configured!')
+
for bk_server, bk_server_conf in back_config['server'].items():
if 'address' not in bk_server_conf or 'port' not in bk_server_conf:
raise ConfigError(f'"backend {back} server {bk_server} address and port" must be configured!')
@@ -92,12 +105,18 @@ def verify(lb):
if {'no_verify', 'ca_certificate'} <= set(back_config['ssl']):
raise ConfigError(f'backend {back} cannot have both ssl options no-verify and ca-certificate set!')
+ # Check if http-response-headers are configured in any frontend/backend where mode != http
+ for group in ['service', 'backend']:
+ for config_name, config in lb[group].items():
+ if 'http_response_headers' in config and ('mode' not in config or config['mode'] != 'http'):
+ raise ConfigError(f'{group} {config_name} must be set to http mode to use http_response_headers!')
+
for front, front_config in lb['service'].items():
for cert in dict_search('ssl.certificate', front_config) or []:
verify_pki_certificate(lb, cert)
for back, back_config in lb['backend'].items():
- tmp = dict_search('ssl.ca_certificate', front_config)
+ tmp = dict_search('ssl.ca_certificate', back_config)
if tmp: verify_pki_ca_certificate(lb, tmp)
@@ -118,51 +137,54 @@ def generate(lb):
if not os.path.isdir(load_balancing_dir):
os.mkdir(load_balancing_dir)
+ loaded_ca_certs = {load_certificate(c['certificate'])
+ for c in lb['pki']['ca'].values()} if 'ca' in lb['pki'] else {}
+
# SSL Certificates for frontend
for front, front_config in lb['service'].items():
- if 'ssl' in front_config:
-
- if 'certificate' in front_config['ssl']:
- cert_names = front_config['ssl']['certificate']
+ if 'ssl' not in front_config:
+ continue
- for cert_name in cert_names:
- pki_cert = lb['pki']['certificate'][cert_name]
- cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem')
- cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key')
+ if 'certificate' in front_config['ssl']:
+ cert_names = front_config['ssl']['certificate']
- with open(cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_cert['certificate']))
+ for cert_name in cert_names:
+ pki_cert = lb['pki']['certificate'][cert_name]
+ cert_file_path = os.path.join(load_balancing_dir, f'{cert_name}.pem')
+ cert_key_path = os.path.join(load_balancing_dir, f'{cert_name}.pem.key')
- if 'private' in pki_cert and 'key' in pki_cert['private']:
- with open(cert_key_path, 'w') as f:
- f.write(wrap_private_key(pki_cert['private']['key']))
+ loaded_pki_cert = load_certificate(pki_cert['certificate'])
+ cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
- if 'ca_certificate' in front_config['ssl']:
- ca_name = front_config['ssl']['ca_certificate']
- pki_ca_cert = lb['pki']['ca'][ca_name]
- ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
+ write_file(cert_file_path,
+ '\n'.join(encode_certificate(c) for c in cert_full_chain))
- with open(ca_cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_ca_cert['certificate']))
+ if 'private' in pki_cert and 'key' in pki_cert['private']:
+ loaded_key = load_private_key(pki_cert['private']['key'], passphrase=None, wrap_tags=True)
+ key_pem = encode_private_key(loaded_key, passphrase=None)
+ write_file(cert_key_path, key_pem)
# SSL Certificates for backend
for back, back_config in lb['backend'].items():
- if 'ssl' in back_config:
+ if 'ssl' not in back_config:
+ continue
- if 'ca_certificate' in back_config['ssl']:
- ca_name = back_config['ssl']['ca_certificate']
- pki_ca_cert = lb['pki']['ca'][ca_name]
- ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
+ if 'ca_certificate' in back_config['ssl']:
+ ca_name = back_config['ssl']['ca_certificate']
+ ca_cert_file_path = os.path.join(load_balancing_dir, f'{ca_name}.pem')
+ ca_chains = []
- with open(ca_cert_file_path, 'w') as f:
- f.write(wrap_certificate(pki_ca_cert['certificate']))
+ pki_ca_cert = lb['pki']['ca'][ca_name]
+ loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
+ ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
+ ca_chains.append('\n'.join(encode_certificate(c) for c in ca_full_chain))
+ write_file(ca_cert_file_path, '\n'.join(ca_chains))
render(load_balancing_conf_file, 'load-balancing/haproxy.cfg.j2', lb)
render(systemd_override, 'load-balancing/override_haproxy.conf.j2', lb)
return None
-
def apply(lb):
call('systemctl daemon-reload')
if not lb:
diff --git a/src/conf_mode/nat.py b/src/conf_mode/nat.py
index 4cd9b570d..f74bb217e 100755
--- a/src/conf_mode/nat.py
+++ b/src/conf_mode/nat.py
@@ -17,7 +17,6 @@
import os
from sys import exit
-from netifaces import interfaces
from vyos.base import Warning
from vyos.config import Config
@@ -30,6 +29,7 @@ from vyos.utils.dict import dict_search_args
from vyos.utils.process import cmd
from vyos.utils.process import run
from vyos.utils.network import is_addr_assigned
+from vyos.utils.network import interface_exists
from vyos import ConfigError
from vyos import airbag
@@ -149,8 +149,12 @@ def verify(nat):
if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
elif 'name' in config['outbound_interface']:
- if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
- Warning(f'NAT interface "{config["outbound_interface"]["name"]}" for source NAT rule "{rule}" does not exist!')
+ interface_name = config['outbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for source NAT rule "{rule}" does not exist!')
else:
group_name = config['outbound_interface']['group']
if group_name[0] == '!':
@@ -182,8 +186,12 @@ def verify(nat):
if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
elif 'name' in config['inbound_interface']:
- if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
- Warning(f'NAT interface "{config["inbound_interface"]["name"]}" for destination NAT rule "{rule}" does not exist!')
+ interface_name = config['inbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for destination NAT rule "{rule}" does not exist!')
else:
group_name = config['inbound_interface']['group']
if group_name[0] == '!':
diff --git a/src/conf_mode/nat64.py b/src/conf_mode/nat64.py
index c1e7ebf85..32a1c47d1 100755
--- a/src/conf_mode/nat64.py
+++ b/src/conf_mode/nat64.py
@@ -20,7 +20,7 @@ import csv
import os
import re
-from ipaddress import IPv6Network
+from ipaddress import IPv6Network, IPv6Address
from json import dumps as json_write
from vyos import ConfigError
@@ -103,8 +103,14 @@ def verify(nat64) -> None:
# Verify that source.prefix is set and is a /96
if not dict_search("source.prefix", instance):
raise ConfigError(f"Source NAT64 rule {rule} missing source prefix")
- if IPv6Network(instance["source"]["prefix"]).prefixlen != 96:
+ src_prefix = IPv6Network(instance["source"]["prefix"])
+ if src_prefix.prefixlen != 96:
raise ConfigError(f"Source NAT64 rule {rule} source prefix must be /96")
+ if (int(src_prefix[0]) & int(IPv6Address('0:0:0:0:ff00::'))) != 0:
+ raise ConfigError(
+ f'Source NAT64 rule {rule} source prefix is not RFC6052-compliant: '
+ 'bits 64 to 71 (9th octet) must be zeroed'
+ )
pools = dict_search("translation.pool", instance)
if pools:
diff --git a/src/conf_mode/nat66.py b/src/conf_mode/nat66.py
index fe017527d..075738dad 100755
--- a/src/conf_mode/nat66.py
+++ b/src/conf_mode/nat66.py
@@ -17,15 +17,15 @@
import os
from sys import exit
-from netifaces import interfaces
from vyos.base import Warning
from vyos.config import Config
from vyos.configdep import set_dependents, call_dependents
from vyos.template import render
-from vyos.utils.process import cmd
-from vyos.utils.kernel import check_kmod
from vyos.utils.dict import dict_search
+from vyos.utils.kernel import check_kmod
+from vyos.utils.network import interface_exists
+from vyos.utils.process import cmd
from vyos.template import is_ipv6
from vyos import ConfigError
from vyos import airbag
@@ -64,8 +64,12 @@ def verify(nat):
if 'name' in config['outbound_interface'] and 'group' in config['outbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for nat source rule "{rule}"')
elif 'name' in config['outbound_interface']:
- if config['outbound_interface']['name'] not in 'any' and config['outbound_interface']['name'] not in interfaces():
- Warning(f'NAT66 interface "{config["outbound_interface"]["name"]}" for source NAT66 rule "{rule}" does not exist!')
+ interface_name = config['outbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for source NAT66 rule "{rule}" does not exist!')
addr = dict_search('translation.address', config)
if addr != None:
@@ -88,8 +92,12 @@ def verify(nat):
if 'name' in config['inbound_interface'] and 'group' in config['inbound_interface']:
raise ConfigError(f'{err_msg} cannot specify both interface group and interface name for destination nat rule "{rule}"')
elif 'name' in config['inbound_interface']:
- if config['inbound_interface']['name'] not in 'any' and config['inbound_interface']['name'] not in interfaces():
- Warning(f'NAT66 interface "{config["inbound_interface"]["name"]}" for destination NAT66 rule "{rule}" does not exist!')
+ interface_name = config['inbound_interface']['name']
+ if interface_name not in 'any':
+ if interface_name.startswith('!'):
+ interface_name = interface_name[1:]
+ if not interface_exists(interface_name):
+ Warning(f'Interface "{interface_name}" for destination NAT66 rule "{rule}" does not exist!')
return None
diff --git a/src/conf_mode/nat_cgnat.py b/src/conf_mode/nat_cgnat.py
index f41d66c66..957b12c28 100755
--- a/src/conf_mode/nat_cgnat.py
+++ b/src/conf_mode/nat_cgnat.py
@@ -189,11 +189,6 @@ def verify(config):
if 'rule' not in config:
raise ConfigError(f'Rule must be defined!')
- # As PoC allow only one rule for CGNAT translations
- # one internal pool and one external pool
- if len(config['rule']) > 1:
- raise ConfigError(f'Only one rule is allowed for translations!')
-
for pool in ('external', 'internal'):
if pool not in config['pool']:
raise ConfigError(f'{pool} pool must be defined!')
@@ -203,6 +198,13 @@ def verify(config):
f'Range for "{pool} pool {pool_name}" must be defined!'
)
+ external_pools_query = "keys(pool.external)"
+ external_pools: list = jmespath.search(external_pools_query, config)
+ internal_pools_query = "keys(pool.internal)"
+ internal_pools: list = jmespath.search(internal_pools_query, config)
+
+ used_external_pools = {}
+ used_internal_pools = {}
for rule, rule_config in config['rule'].items():
if 'source' not in rule_config:
raise ConfigError(f'Rule "{rule}" source pool must be defined!')
@@ -212,49 +214,86 @@ def verify(config):
if 'translation' not in rule_config:
raise ConfigError(f'Rule "{rule}" translation pool must be defined!')
+ # Check if pool exists
+ internal_pool = rule_config['source']['pool']
+ if internal_pool not in internal_pools:
+ raise ConfigError(f'Internal pool "{internal_pool}" does not exist!')
+ external_pool = rule_config['translation']['pool']
+ if external_pool not in external_pools:
+ raise ConfigError(f'External pool "{external_pool}" does not exist!')
+
+ # Check pool duplication in different rules
+ if external_pool in used_external_pools:
+ raise ConfigError(
+ f'External pool "{external_pool}" is already used in rule '
+ f'{used_external_pools[external_pool]} and cannot be used in '
+ f'rule {rule}!'
+ )
+
+ if internal_pool in used_internal_pools:
+ raise ConfigError(
+ f'Internal pool "{internal_pool}" is already used in rule '
+ f'{used_internal_pools[internal_pool]} and cannot be used in '
+ f'rule {rule}!'
+ )
+
+ used_external_pools[external_pool] = rule
+ used_internal_pools[internal_pool] = rule
+
def generate(config):
if not config:
return None
- # first external pool as we allow only one as PoC
- ext_pool_name = jmespath.search("rule.*.translation | [0]", config).get('pool')
- int_pool_name = jmespath.search("rule.*.source | [0]", config).get('pool')
- ext_query = f"pool.external.{ext_pool_name}.range | keys(@)"
- int_query = f"pool.internal.{int_pool_name}.range"
- external_ranges = jmespath.search(ext_query, config)
- internal_ranges = [jmespath.search(int_query, config)]
-
- external_list_count = []
- external_list_hosts = []
- internal_list_count = []
- internal_list_hosts = []
- for ext_range in external_ranges:
- # External hosts count
- e_count = IPOperations(ext_range).get_ips_count()
- external_list_count.append(e_count)
- # External hosts list
- e_hosts = IPOperations(ext_range).convert_prefix_to_list_ips()
- external_list_hosts.extend(e_hosts)
- for int_range in internal_ranges:
- # Internal hosts count
- i_count = IPOperations(int_range).get_ips_count()
- internal_list_count.append(i_count)
- # Internal hosts list
- i_hosts = IPOperations(int_range).convert_prefix_to_list_ips()
- internal_list_hosts.extend(i_hosts)
-
- external_host_count = sum(external_list_count)
- internal_host_count = sum(internal_list_count)
- ports_per_user = int(
- jmespath.search(f'pool.external.{ext_pool_name}.per_user_limit.port', config)
- )
- external_port_range: str = jmespath.search(
- f'pool.external.{ext_pool_name}.external_port_range', config
- )
- proto_maps, other_maps = generate_port_rules(
- external_list_hosts, internal_list_hosts, ports_per_user, external_port_range
- )
+ proto_maps = []
+ other_maps = []
+
+ for rule, rule_config in config['rule'].items():
+ ext_pool_name: str = rule_config['translation']['pool']
+ int_pool_name: str = rule_config['source']['pool']
+
+ # Sort the external ranges by sequence
+ external_ranges: list = sorted(
+ config['pool']['external'][ext_pool_name]['range'],
+ key=lambda r: int(config['pool']['external'][ext_pool_name]['range'][r].get('seq', 999999))
+ )
+ internal_ranges: list = [range for range in config['pool']['internal'][int_pool_name]['range']]
+ external_list_hosts_count = []
+ external_list_hosts = []
+ internal_list_hosts_count = []
+ internal_list_hosts = []
+
+ for ext_range in external_ranges:
+ # External hosts count
+ e_count = IPOperations(ext_range).get_ips_count()
+ external_list_hosts_count.append(e_count)
+ # External hosts list
+ e_hosts = IPOperations(ext_range).convert_prefix_to_list_ips()
+ external_list_hosts.extend(e_hosts)
+
+ for int_range in internal_ranges:
+ # Internal hosts count
+ i_count = IPOperations(int_range).get_ips_count()
+ internal_list_hosts_count.append(i_count)
+ # Internal hosts list
+ i_hosts = IPOperations(int_range).convert_prefix_to_list_ips()
+ internal_list_hosts.extend(i_hosts)
+
+ external_host_count = sum(external_list_hosts_count)
+ internal_host_count = sum(internal_list_hosts_count)
+ ports_per_user = int(
+ jmespath.search(f'pool.external."{ext_pool_name}".per_user_limit.port', config)
+ )
+ external_port_range: str = jmespath.search(
+ f'pool.external."{ext_pool_name}".external_port_range', config
+ )
+
+ rule_proto_maps, rule_other_maps = generate_port_rules(
+ external_list_hosts, internal_list_hosts, ports_per_user, external_port_range
+ )
+
+ proto_maps.extend(rule_proto_maps)
+ other_maps.extend(rule_other_maps)
config['proto_map_elements'] = ', '.join(proto_maps)
config['other_map_elements'] = ', '.join(other_maps)
diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py
index 1c01a9013..1361bb1a9 100755
--- a/src/conf_mode/protocols_bfd.py
+++ b/src/conf_mode/protocols_bfd.py
@@ -49,7 +49,7 @@ def verify(bfd):
for peer, peer_config in bfd['peer'].items():
# IPv6 link local peers require an explicit local address/interface
if is_ipv6_link_local(peer):
- if 'source' not in peer_config or len(peer_config['source'] < 2):
+ if 'source' not in peer_config or len(peer_config['source']) < 2:
raise ConfigError('BFD IPv6 link-local peers require explicit local address and interface setting')
# IPv6 peers require an explicit local address
diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py
index 8a590cbc6..45248fb4a 100755
--- a/src/conf_mode/qos.py
+++ b/src/conf_mode/qos.py
@@ -17,6 +17,7 @@
from sys import exit
from netifaces import interfaces
+from vyos.base import Warning
from vyos.config import Config
from vyos.configdep import set_dependents
from vyos.configdep import call_dependents
@@ -89,6 +90,36 @@ def _clean_conf_dict(conf):
return conf
+def _get_group_filters(config: dict, group_name: str, visited=None) -> dict:
+ filters = dict()
+ if not visited:
+ visited = [group_name, ]
+ else:
+ if group_name in visited:
+ return filters
+ visited.append(group_name)
+
+ for filter, filter_config in config.get(group_name, {}).items():
+ if filter == 'match':
+ for match, match_config in filter_config.items():
+ filters[f'{group_name}-{match}'] = match_config
+ elif filter == 'match_group':
+ for group in filter_config:
+ filters.update(_get_group_filters(config, group, visited))
+
+ return filters
+
+
+def _get_group_match(config:dict, group_name:str) -> dict:
+ match = dict()
+ for key, val in _get_group_filters(config, group_name).items():
+ # delete duplicate matches
+ if val not in match.values():
+ match[key] = val
+
+ return match
+
+
def get_config(config=None):
if config:
conf = config
@@ -135,11 +166,27 @@ def get_config(config=None):
qos = conf.merge_defaults(qos, recursive=True)
+ if 'traffic_match_group' in qos:
+ for group, group_config in qos['traffic_match_group'].items():
+ if 'match_group' in group_config:
+ qos['traffic_match_group'][group]['match'] = _get_group_match(qos['traffic_match_group'], group)
+
for policy in qos.get('policy', []):
for p_name, p_config in qos['policy'][policy].items():
# cleanup empty match config
if 'class' in p_config:
for cls, cls_config in p_config['class'].items():
+ if 'match_group' in cls_config:
+ # merge group match to match
+ for group in cls_config['match_group']:
+ for match, match_conf in qos['traffic_match_group'].get(group, {'match': {}})['match'].items():
+ if 'match' not in cls_config:
+ cls_config['match'] = dict()
+ if match in cls_config['match']:
+ cls_config['match'][f'{group}-{match}'] = match_conf
+ else:
+ cls_config['match'][match] = match_conf
+
if 'match' in cls_config:
cls_config['match'] = _clean_conf_dict(cls_config['match'])
if cls_config['match'] == {}:
@@ -147,6 +194,22 @@ def get_config(config=None):
return qos
+
+def _verify_match(cls_config: dict) -> None:
+ if 'match' in cls_config:
+ for match, match_config in cls_config['match'].items():
+ if {'ip', 'ipv6'} <= set(match_config):
+ raise ConfigError(
+ f'Can not use both IPv6 and IPv4 in one match ({match})!')
+
+
+def _verify_match_group_exist(cls_config, qos):
+ if 'match_group' in cls_config:
+ for group in cls_config['match_group']:
+ if 'traffic_match_group' not in qos or group not in qos['traffic_match_group']:
+ Warning(f'Match group "{group}" does not exist!')
+
+
def verify(qos):
if not qos or 'interface' not in qos:
return None
@@ -174,11 +237,8 @@ def verify(qos):
# bandwidth is not mandatory for priority-queue - that is why this is on the exception list
if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']:
raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!')
- if 'match' in cls_config:
- for match, match_config in cls_config['match'].items():
- if {'ip', 'ipv6'} <= set(match_config):
- raise ConfigError(f'Can not use both IPv6 and IPv4 in one match ({match})!')
-
+ _verify_match(cls_config)
+ _verify_match_group_exist(cls_config, qos)
if policy_type in ['random_detect']:
if 'precedence' in policy_config:
for precedence, precedence_config in policy_config['precedence'].items():
@@ -216,8 +276,14 @@ def verify(qos):
if direction not in tmp:
raise ConfigError(f'Selected QoS policy on interface "{interface}" only supports "{tmp}"!')
+ if 'traffic_match_group' in qos:
+ for group, group_config in qos['traffic_match_group'].items():
+ _verify_match(group_config)
+ _verify_match_group_exist(group_config, qos)
+
return None
+
def generate(qos):
if not qos or 'interface' not in qos:
return None
@@ -254,6 +320,7 @@ def apply(qos):
return None
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/conf_mode/service_dhcpv6-server.py b/src/conf_mode/service_dhcpv6-server.py
index c7333dd3a..7af88007c 100755
--- a/src/conf_mode/service_dhcpv6-server.py
+++ b/src/conf_mode/service_dhcpv6-server.py
@@ -106,14 +106,14 @@ def verify(dhcpv6):
# Stop address must be greater or equal to start address
if not ip_address(stop) >= ip_address(start):
- raise ConfigError(f'Range stop address "{stop}" must be greater then or equal ' \
+ raise ConfigError(f'Range stop address "{stop}" must be greater than or equal ' \
f'to the range start address "{start}"!')
# DHCPv6 range start address must be unique - two ranges can't
# start with the same address - makes no sense
if start in range6_start:
raise ConfigError(f'Conflicting DHCPv6 lease range: '\
- f'Pool start address "{start}" defined multipe times!')
+ f'Pool start address "{start}" defined multiple times!')
range6_start.append(start)
@@ -121,7 +121,7 @@ def verify(dhcpv6):
# end with the same address - makes no sense
if stop in range6_stop:
raise ConfigError(f'Conflicting DHCPv6 lease range: '\
- f'Pool stop address "{stop}" defined multipe times!')
+ f'Pool stop address "{stop}" defined multiple times!')
range6_stop.append(stop)
@@ -180,7 +180,7 @@ def verify(dhcpv6):
if 'option' in subnet_config:
if 'vendor_option' in subnet_config['option']:
if len(dict_search('option.vendor_option.cisco.tftp_server', subnet_config)) > 2:
- raise ConfigError(f'No more then two Cisco tftp-servers should be defined for subnet "{subnet}"!')
+ raise ConfigError(f'No more than two Cisco tftp-servers should be defined for subnet "{subnet}"!')
# Subnets must be unique
if subnet in subnets:
diff --git a/src/conf_mode/service_dns_forwarding.py b/src/conf_mode/service_dns_forwarding.py
index 7e863073a..70686534f 100755
--- a/src/conf_mode/service_dns_forwarding.py
+++ b/src/conf_mode/service_dns_forwarding.py
@@ -102,7 +102,7 @@ def get_config(config=None):
'ttl': rdata['ttl'],
'value': address
})
- elif rtype in ['cname', 'ptr', 'ns']:
+ elif rtype in ['cname', 'ptr']:
if not 'target' in rdata:
dns['authoritative_zone_errors'].append(f'{subnode}.{node}: target is required')
continue
@@ -113,6 +113,19 @@ def get_config(config=None):
'ttl': rdata['ttl'],
'value': '{}.'.format(rdata['target'])
})
+ elif rtype == 'ns':
+ if not 'target' in rdata:
+ dns['authoritative_zone_errors'].append(f'{subnode}.{node}: at least one target is required')
+ continue
+
+ for target in rdata['target']:
+ zone['records'].append({
+ 'name': subnode,
+ 'type': rtype.upper(),
+ 'ttl': rdata['ttl'],
+ 'value': f'{target}.'
+ })
+
elif rtype == 'mx':
if not 'server' in rdata:
dns['authoritative_zone_errors'].append(f'{subnode}.{node}: at least one server is required')
diff --git a/src/conf_mode/service_suricata.py b/src/conf_mode/service_suricata.py
new file mode 100755
index 000000000..69b369e0b
--- /dev/null
+++ b/src/conf_mode/service_suricata.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from sys import exit
+
+from vyos.base import Warning
+from vyos.config import Config
+from vyos.template import render
+from vyos.utils.process import call
+from vyos import ConfigError
+from vyos import airbag
+airbag.enable()
+
+config_file = '/run/suricata/suricata.yaml'
+rotate_file = '/etc/logrotate.d/suricata'
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+ base = ['service', 'suricata']
+
+ if not conf.exists(base):
+ return None
+
+ suricata = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True, with_recursive_defaults=True)
+
+ return suricata
+
+# https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search
+def topological_sort(source):
+ sorted_nodes = []
+ permanent_marks = set()
+ temporary_marks = set()
+
+ def visit(n, v):
+ if n in permanent_marks:
+ return
+ if n in temporary_marks:
+ raise ConfigError('At least one cycle exists in the referenced groups')
+
+ temporary_marks.add(n)
+
+ for m in v.get('group', []):
+ m = m.lstrip('!')
+ if m not in source:
+ raise ConfigError(f'Undefined referenced group "{m}"')
+ visit(m, source[m])
+
+ temporary_marks.remove(n)
+ permanent_marks.add(n)
+ sorted_nodes.append((n, v))
+
+ while len(permanent_marks) < len(source):
+ n = next(n for n in source.keys() if n not in permanent_marks)
+ visit(n, source[n])
+
+ return sorted_nodes
+
+def verify(suricata):
+ if not suricata:
+ return None
+
+ if 'interface' not in suricata:
+ raise ConfigError('No interfaces configured!')
+
+ if 'address_group' not in suricata:
+ raise ConfigError('No address-group configured!')
+
+ if 'port_group' not in suricata:
+ raise ConfigError('No port-group configured!')
+
+ try:
+ topological_sort(suricata['address_group'])
+ except (ConfigError,StopIteration) as e:
+ raise ConfigError(f'Invalid address-group: {e}')
+
+ try:
+ topological_sort(suricata['port_group'])
+ except (ConfigError,StopIteration) as e:
+ raise ConfigError(f'Invalid port-group: {e}')
+
+def generate(suricata):
+ if not suricata:
+ for file in [config_file, rotate_file]:
+ if os.path.isfile(file):
+ os.unlink(file)
+
+ return None
+
+ # Config-related formatters
+ def to_var(s:str):
+ return s.replace('-','_').upper()
+
+ def to_val(s:str):
+ return s.replace('-',':')
+
+ def to_ref(s:str):
+ if s[0] == '!':
+ return '!$' + to_var(s[1:])
+ return '$' + to_var(s)
+
+ def to_config(kind:str):
+ def format_group(group):
+ (name, value) = group
+ property = [to_val(property) for property in value.get(kind,[])]
+ group = [to_ref(group) for group in value.get('group',[])]
+ return (to_var(name), property + group)
+ return format_group
+
+ # Format the address group
+ suricata['address_group'] = map(to_config('address'),
+ topological_sort(suricata['address_group']))
+
+ # Format the port group
+ suricata['port_group'] = map(to_config('port'),
+ topological_sort(suricata['port_group']))
+
+ render(config_file, 'ids/suricata.j2', {'suricata': suricata})
+ render(rotate_file, 'ids/suricata_logrotate.j2', suricata)
+ return None
+
+def apply(suricata):
+ systemd_service = 'suricata.service'
+ if not suricata or 'interface' not in suricata:
+ # Stop suricata service if removed
+ call(f'systemctl stop {systemd_service}')
+ else:
+ Warning('To fetch the latest rules, use "update suricata"; '
+ 'To periodically fetch the latest rules, '
+ 'use the task scheduler!')
+ call(f'systemctl restart {systemd_service}')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/service_upnp.py b/src/conf_mode/service_upnp.py
deleted file mode 100755
index 0df8dc09e..000000000
--- a/src/conf_mode/service_upnp.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2021-2022 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from sys import exit
-import uuid
-import netifaces
-from ipaddress import IPv4Network
-from ipaddress import IPv6Network
-
-from vyos.config import Config
-from vyos.utils.process import call
-from vyos.template import render
-from vyos.template import is_ipv4
-from vyos.template import is_ipv6
-from vyos import ConfigError
-from vyos import airbag
-airbag.enable()
-
-config_file = r'/run/upnp/miniupnp.conf'
-
-def get_config(config=None):
- if config:
- conf = config
- else:
- conf = Config()
-
- base = ['service', 'upnp']
- upnpd = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
-
- if not upnpd:
- return None
-
- upnpd = conf.merge_defaults(upnpd, recursive=True)
-
- uuidgen = uuid.uuid1()
- upnpd.update({'uuid': uuidgen})
-
- return upnpd
-
-def get_all_interface_addr(prefix, filter_dev, filter_family):
- list_addr = []
- for interface in netifaces.interfaces():
- if filter_dev and interface in filter_dev:
- continue
- addrs = netifaces.ifaddresses(interface)
- if netifaces.AF_INET in addrs.keys():
- if netifaces.AF_INET in filter_family:
- for addr in addrs[netifaces.AF_INET]:
- if prefix:
- # we need to manually assemble a list of IPv4 address/prefix
- prefix = '/' + \
- str(IPv4Network('0.0.0.0/' + addr['netmask']).prefixlen)
- list_addr.append(addr['addr'] + prefix)
- else:
- list_addr.append(addr['addr'])
- if netifaces.AF_INET6 in addrs.keys():
- if netifaces.AF_INET6 in filter_family:
- for addr in addrs[netifaces.AF_INET6]:
- if prefix:
- # we need to manually assemble a list of IPv4 address/prefix
- bits = bin(int(addr['netmask'].replace(':', '').split('/')[0], 16)).count('1')
- prefix = '/' + str(bits)
- list_addr.append(addr['addr'] + prefix)
- else:
- list_addr.append(addr['addr'])
-
- return list_addr
-
-def verify(upnpd):
- if not upnpd:
- return None
-
- if 'wan_interface' not in upnpd:
- raise ConfigError('To enable UPNP, you must have the "wan-interface" option!')
-
- if 'rule' in upnpd:
- for rule, rule_config in upnpd['rule'].items():
- for option in ['external_port_range', 'internal_port_range', 'ip', 'action']:
- if option not in rule_config:
- tmp = option.replace('_', '-')
- raise ConfigError(f'Every UPNP rule requires "{tmp}" to be set!')
-
- if 'stun' in upnpd:
- for option in ['host', 'port']:
- if option not in upnpd['stun']:
- raise ConfigError(f'A UPNP stun support must have an "{option}" option!')
-
- # Check the validity of the IP address
- listen_dev = []
- system_addrs_cidr = get_all_interface_addr(True, [], [netifaces.AF_INET, netifaces.AF_INET6])
- system_addrs = get_all_interface_addr(False, [], [netifaces.AF_INET, netifaces.AF_INET6])
- if 'listen' not in upnpd:
- raise ConfigError(f'Listen address or interface is required!')
- for listen_if_or_addr in upnpd['listen']:
- if listen_if_or_addr not in netifaces.interfaces():
- listen_dev.append(listen_if_or_addr)
- if (listen_if_or_addr not in system_addrs) and (listen_if_or_addr not in system_addrs_cidr) and \
- (listen_if_or_addr not in netifaces.interfaces()):
- if is_ipv4(listen_if_or_addr) and IPv4Network(listen_if_or_addr).is_multicast:
- raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed'
- f'to listen on. It is not an interface address nor a multicast address!')
- if is_ipv6(listen_if_or_addr) and IPv6Network(listen_if_or_addr).is_multicast:
- raise ConfigError(f'The address "{listen_if_or_addr}" is an address that is not allowed'
- f'to listen on. It is not an interface address nor a multicast address!')
-
- system_listening_dev_addrs_cidr = get_all_interface_addr(True, listen_dev, [netifaces.AF_INET6])
- system_listening_dev_addrs = get_all_interface_addr(False, listen_dev, [netifaces.AF_INET6])
- for listen_if_or_addr in upnpd['listen']:
- if listen_if_or_addr not in netifaces.interfaces() and \
- (listen_if_or_addr not in system_listening_dev_addrs_cidr) and \
- (listen_if_or_addr not in system_listening_dev_addrs) and \
- is_ipv6(listen_if_or_addr) and \
- (not IPv6Network(listen_if_or_addr).is_multicast):
- raise ConfigError(f'{listen_if_or_addr} must listen on the interface of the network card')
-
-def generate(upnpd):
- if not upnpd:
- return None
-
- if os.path.isfile(config_file):
- os.unlink(config_file)
-
- render(config_file, 'firewall/upnpd.conf.j2', upnpd)
-
-def apply(upnpd):
- systemd_service_name = 'miniupnpd.service'
- if not upnpd:
- # Stop the UPNP service
- call(f'systemctl stop {systemd_service_name}')
- else:
- # Start the UPNP service
- call(f'systemctl restart {systemd_service_name}')
-
-if __name__ == '__main__':
- try:
- c = get_config()
- verify(c)
- generate(c)
- apply(c)
- except ConfigError as e:
- print(e)
- exit(1)
diff --git a/src/etc/systemd/system/suricata.service.d/10-override.conf b/src/etc/systemd/system/suricata.service.d/10-override.conf
new file mode 100644
index 000000000..781256cf5
--- /dev/null
+++ b/src/etc/systemd/system/suricata.service.d/10-override.conf
@@ -0,0 +1,9 @@
+[Service]
+ExecStart=
+ExecStart=/usr/bin/suricata -D --af-packet -c /run/suricata/suricata.yaml --pidfile /run/suricata/suricata.pid
+PIDFile=
+PIDFile=/run/suricata/suricata.pid
+ExecReload=
+ExecReload=/usr/bin/suricatasc -c reload-rules /run/suricata/suricata.socket ; /bin/kill -HUP $MAINPID
+ExecStop=
+ExecStop=/usr/bin/suricatasc -c shutdown /run/suricata/suricata.socket
diff --git a/src/helpers/run-config-activation.py b/src/helpers/run-config-activation.py
new file mode 100755
index 000000000..58293702a
--- /dev/null
+++ b/src/helpers/run-config-activation.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import logging
+from pathlib import Path
+from argparse import ArgumentParser
+
+from vyos.compose_config import ComposeConfig
+from vyos.compose_config import ComposeConfigError
+from vyos.defaults import directories
+
+parser = ArgumentParser()
+parser.add_argument('config_file', type=str,
+ help="configuration file to modify with system-specific settings")
+parser.add_argument('--test-script', type=str,
+ help="test effect of named script")
+
+args = parser.parse_args()
+
+checkpoint_file = '/run/vyos-activate-checkpoint'
+log_file = Path(directories['config']).joinpath('vyos-activate.log')
+
+logger = logging.getLogger(__name__)
+fh = logging.FileHandler(log_file)
+formatter = logging.Formatter('%(message)s')
+fh.setFormatter(formatter)
+logger.addHandler(fh)
+
+if 'vyos-activate-debug' in Path('/proc/cmdline').read_text():
+ print(f'\nactivate-debug enabled: file {checkpoint_file}_* on error')
+ debug = checkpoint_file
+ logger.setLevel(logging.DEBUG)
+else:
+ debug = None
+ logger.setLevel(logging.INFO)
+
+def sort_key(s: Path):
+ s = s.stem
+ pre, rem = re.match(r'(\d*)(?:-)?(.+)', s).groups()
+ return int(pre or 0), rem
+
+def file_ext(file_name: str) -> str:
+ """Return an identifier from file name for checkpoint file extension.
+ """
+ return Path(file_name).stem
+
+script_dir = Path(directories['activate'])
+
+if args.test_script:
+ script_list = [script_dir.joinpath(args.test_script)]
+else:
+ script_list = sorted(script_dir.glob('*.py'), key=sort_key)
+
+config_file = args.config_file
+config_str = Path(config_file).read_text()
+
+compose = ComposeConfig(config_str, checkpoint_file=debug)
+
+for file in script_list:
+ file = file.as_posix()
+ logger.info(f'calling {file}')
+ try:
+ compose.apply_file(file, func_name='activate')
+ except ComposeConfigError as e:
+ if debug:
+ compose.write(f'{compose.checkpoint_file}_{file_ext(file)}')
+ logger.error(f'config-activation error in {file}: {e}')
+
+compose.write(config_file, with_version=True)
diff --git a/src/init/vyos-router b/src/init/vyos-router
index 15e37df07..59004fdc1 100755
--- a/src/init/vyos-router
+++ b/src/init/vyos-router
@@ -22,6 +22,7 @@ declare progname=${0##*/}
declare action=$1; shift
declare -x BOOTFILE=$vyatta_sysconfdir/config/config.boot
+declare -x DEFAULT_BOOTFILE=$vyatta_sysconfdir/config.boot.default
# If vyos-config= boot option is present, use that file instead
for x in $(cat /proc/cmdline); do
@@ -129,9 +130,16 @@ unmount_encrypted_config() {
# if necessary, provide initial config
init_bootfile () {
+ # define and version default boot config if not present
+ if [ ! -r $DEFAULT_BOOTFILE ]; then
+ if [ -f $vyos_data_dir/config.boot.default ]; then
+ cp $vyos_data_dir/config.boot.default $DEFAULT_BOOTFILE
+ $vyos_libexec_dir/system-versions-foot.py >> $DEFAULT_BOOTFILE
+ fi
+ fi
if [ ! -r $BOOTFILE ] ; then
- if [ -f $vyatta_sysconfdir/config.boot.default ]; then
- cp $vyatta_sysconfdir/config.boot.default $BOOTFILE
+ if [ -f $DEFAULT_BOOTFILE ]; then
+ cp $DEFAULT_BOOTFILE $BOOTFILE
else
$vyos_libexec_dir/system-versions-foot.py > $BOOTFILE
fi
@@ -149,6 +157,15 @@ migrate_bootfile ()
fi
}
+# configure system-specific settings
+system_config ()
+{
+ if [ -x $vyos_libexec_dir/run-config-activation.py ]; then
+ log_progress_msg system
+ sg ${GROUP} -c "$vyos_libexec_dir/run-config-activation.py $BOOTFILE"
+ fi
+}
+
# load the initial config
load_bootfile ()
{
@@ -493,6 +510,8 @@ start ()
update_interface_config
+ disabled system_config || system_config
+
for s in ${subinit[@]} ; do
if ! disabled $s; then
log_progress_msg $s
diff --git a/src/migration-scripts/nat/7-to-8 b/src/migration-scripts/nat/7-to-8
new file mode 100755
index 000000000..ab2ffa6d3
--- /dev/null
+++ b/src/migration-scripts/nat/7-to-8
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# T6345: random - In kernel 5.0 and newer this is the same as fully-random.
+# In earlier kernels the port mapping will be randomized using a seeded
+# MD5 hash mix using source and destination address and destination port.
+# drop fully-random from CLI
+
+from sys import argv,exit
+from vyos.configtree import ConfigTree
+
+if len(argv) < 2:
+ print("Must specify file name!")
+ exit(1)
+
+file_name = argv[1]
+
+with open(file_name, 'r') as f:
+ config_file = f.read()
+
+config = ConfigTree(config_file)
+
+if not config.exists(['nat']):
+ # Nothing to do
+ exit(0)
+
+for direction in ['source', 'destination']:
+ # If a node doesn't exist, we obviously have nothing to do.
+ if not config.exists(['nat', direction]):
+ continue
+
+ # However, we also need to handle the case when a 'source' or 'destination' sub-node does exist,
+ # but there are no rules under it.
+ if not config.list_nodes(['nat', direction]):
+ continue
+
+ for rule in config.list_nodes(['nat', direction, 'rule']):
+ port_mapping = ['nat', direction, 'rule', rule, 'translation', 'options', 'port-mapping']
+ if config.exists(port_mapping):
+ tmp = config.return_value(port_mapping)
+ if tmp == 'fully-random':
+ config.set(port_mapping, value='random')
+
+try:
+ with open(file_name, 'w') as f:
+ f.write(config.to_string())
+except OSError as e:
+ print(f'Failed to save the modified config: {e}')
+ exit(1)
diff --git a/src/migration-scripts/reverse-proxy/0-to-1 b/src/migration-scripts/reverse-proxy/0-to-1
new file mode 100755
index 000000000..d61493815
--- /dev/null
+++ b/src/migration-scripts/reverse-proxy/0-to-1
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# T6409: Remove unused 'backend bk-example parameters' node
+
+from sys import argv, exit
+from vyos.configtree import ConfigTree
+
+if len(argv) < 2:
+ print("Must specify file name!")
+ exit(1)
+
+file_name = argv[1]
+
+with open(file_name, 'r') as f:
+ config_file = f.read()
+
+config = ConfigTree(config_file)
+base = ['load-balancing', 'reverse-proxy', 'backend']
+if not config.exists(base):
+ # Nothing to do
+ exit(0)
+
+# we need to run this for every configured network
+for backend in config.list_nodes(base):
+ param_node = base + [backend, 'parameters']
+ if config.exists(param_node):
+ config.delete(param_node)
+
+try:
+ with open(file_name, 'w') as f:
+ f.write(config.to_string())
+except OSError as e:
+ print("Failed to save the modified config: {}".format(e))
+ exit(1)
diff --git a/src/op_mode/cgnat.py b/src/op_mode/cgnat.py
new file mode 100755
index 000000000..9ad8f92f9
--- /dev/null
+++ b/src/op_mode/cgnat.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import sys
+import typing
+
+from tabulate import tabulate
+
+import vyos.opmode
+
+from vyos.configquery import ConfigTreeQuery
+from vyos.utils.process import cmd
+
+CGNAT_TABLE = 'cgnat'
+
+
+def _get_raw_data(external_address: str = '', internal_address: str = '') -> list[dict]:
+ """Get CGNAT dictionary and filter by external or internal address if provided."""
+ cmd_output = cmd(f'nft --json list table ip {CGNAT_TABLE}')
+ data = json.loads(cmd_output)
+
+ elements = data['nftables'][2]['map']['elem']
+ allocations = []
+ for elem in elements:
+ internal = elem[0] # internal
+ external = elem[1]['concat'][0] # external
+ start_port = elem[1]['concat'][1]['range'][0]
+ end_port = elem[1]['concat'][1]['range'][1]
+ port_range = f'{start_port}-{end_port}'
+
+ if (internal_address and internal != internal_address) or (
+ external_address and external != external_address
+ ):
+ continue
+
+ allocations.append(
+ {
+ 'internal_address': internal,
+ 'external_address': external,
+ 'port_range': port_range,
+ }
+ )
+
+ return allocations
+
+
+def _get_formatted_output(allocations: list[dict]) -> str:
+ # Convert the list of dictionaries to a list of tuples for tabulate
+ headers = ['Internal IP', 'External IP', 'Port range']
+ data = [
+ (alloc['internal_address'], alloc['external_address'], alloc['port_range'])
+ for alloc in allocations
+ ]
+ output = tabulate(data, headers, numalign="left")
+ return output
+
+
+def show_allocation(
+ raw: bool,
+ external_address: typing.Optional[str],
+ internal_address: typing.Optional[str],
+) -> str:
+ config = ConfigTreeQuery()
+ if not config.exists('nat cgnat'):
+ raise vyos.opmode.UnconfiguredSubsystem('CGNAT is not configured')
+
+ if raw:
+ return _get_raw_data(external_address, internal_address)
+
+ else:
+ raw_data = _get_raw_data(external_address, internal_address)
+ return _get_formatted_output(raw_data)
+
+
+if __name__ == '__main__':
+ try:
+ res = vyos.opmode.run(sys.modules[__name__])
+ if res:
+ print(res)
+ except (ValueError, vyos.opmode.Error) as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/op_mode/evpn.py b/src/op_mode/evpn.py
new file mode 100644
index 000000000..cae4ab9f5
--- /dev/null
+++ b/src/op_mode/evpn.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2016-2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This script is a helper to run VTYSH commands for "show evpn", allowing for the --raw flag to output JSON
+
+import sys
+import typing
+import json
+
+import vyos.opmode
+from vyos.utils.process import cmd
+
+def show_evpn(raw: bool, command: typing.Optional[str]):
+ if raw:
+ command = f"{command} json"
+ evpnDict = {}
+ try:
+ evpnDict['evpn'] = json.loads(cmd(f"vtysh -c '{command}'"))
+ except:
+ raise vyos.opmode.DataUnavailable(f"\"{command.replace(' json', '')}\" is invalid or has no JSON option")
+
+ return evpnDict
+ else:
+ return cmd(f"vtysh -c '{command}'")
+
+if __name__ == '__main__':
+ try:
+ res = vyos.opmode.run(sys.modules[__name__])
+ if res:
+ print(res)
+ except (ValueError, vyos.opmode.Error) as e:
+ print(e)
+ sys.exit(1)
diff --git a/src/op_mode/ikev2_profile_generator.py b/src/op_mode/ikev2_profile_generator.py
index 2b29f94bf..4ac4fb14a 100755
--- a/src/op_mode/ikev2_profile_generator.py
+++ b/src/op_mode/ikev2_profile_generator.py
@@ -144,15 +144,22 @@ tmp = reversed(tmp)
data['rfqdn'] = '.'.join(tmp)
pki = conf.get_config_dict(pki_base, get_first_key=True)
-ca_name = data['authentication']['x509']['ca_certificate']
cert_name = data['authentication']['x509']['certificate']
-ca_cert = load_certificate(pki['ca'][ca_name]['certificate'])
-cert = load_certificate(pki['certificate'][cert_name]['certificate'])
+data['certs'] = []
+
+for ca_name in data['authentication']['x509']['ca_certificate']:
+ tmp = {}
+ ca_cert = load_certificate(pki['ca'][ca_name]['certificate'])
+ cert = load_certificate(pki['certificate'][cert_name]['certificate'])
+
+
+ tmp['ca_cn'] = ca_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
+ tmp['cert_cn'] = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
+ tmp['ca_cert'] = conf.value(pki_base + ['ca', ca_name, 'certificate'])
+
+ data['certs'].append(tmp)
-data['ca_cn'] = ca_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
-data['cert_cn'] = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
-data['ca_cert'] = conf.value(pki_base + ['ca', ca_name, 'certificate'])
esp_proposals = conf.get_config_dict(ipsec_base + ['esp-group', data['esp_group'], 'proposal'],
key_mangling=('-', '_'), get_first_key=True)
diff --git a/src/op_mode/image_installer.py b/src/op_mode/image_installer.py
index 0d2d7076c..bdc16de15 100755
--- a/src/op_mode/image_installer.py
+++ b/src/op_mode/image_installer.py
@@ -40,13 +40,14 @@ from vyos.template import render
from vyos.utils.io import ask_input, ask_yes_no, select_entry
from vyos.utils.file import chmod_2775
from vyos.utils.process import cmd, run
-from vyos.version import get_remote_version
+from vyos.version import get_remote_version, get_version_data
# define text messages
MSG_ERR_NOT_LIVE: str = 'The system is already installed. Please use "add system image" instead.'
MSG_ERR_LIVE: str = 'The system is in live-boot mode. Please use "install image" instead.'
MSG_ERR_NO_DISK: str = 'No suitable disk was found. There must be at least one disk of 2GB or greater size.'
MSG_ERR_IMPROPER_IMAGE: str = 'Missing sha256sum.txt.\nEither this image is corrupted, or of era 1.2.x (md5sum) and would downgrade image tools;\ndisallowed in either case.'
+MSG_ERR_ARCHITECTURE_MISMATCH: str = 'Upgrading to a different image architecture will break your system.'
MSG_INFO_INSTALL_WELCOME: str = 'Welcome to VyOS installation!\nThis command will install VyOS to your permanent storage.'
MSG_INFO_INSTALL_EXIT: str = 'Exiting from VyOS installation'
MSG_INFO_INSTALL_SUCCESS: str = 'The image installed successfully; please reboot now.'
@@ -79,6 +80,9 @@ MSG_WARN_ROOT_SIZE_TOOSMALL: str = 'The size is too small. Try again'
MSG_WARN_IMAGE_NAME_WRONG: str = 'The suggested name is unsupported!\n'\
'It must be between 1 and 64 characters long and contains only the next characters: .+-_ a-z A-Z 0-9'
MSG_WARN_PASSWORD_CONFIRM: str = 'The entered values did not match. Try again'
+MSG_WARN_FLAVOR_MISMATCH: str = 'The running image flavor is "{0}". The new image flavor is "{1}".\n' \
+'Installing a different image flavor may cause functionality degradation or break your system.\n' \
+'Do you want to continue with installation?'
CONST_MIN_DISK_SIZE: int = 2147483648 # 2 GB
CONST_MIN_ROOT_SIZE: int = 1610612736 # 1.5 GB
# a reserved space: 2MB for header, 1 MB for BIOS partition, 256 MB for EFI
@@ -693,6 +697,31 @@ def is_raid_install(install_object: Union[disk.DiskDetails, raid.RaidDetails]) -
return False
+def validate_compatibility(iso_path: str) -> None:
+ """Check architecture and flavor compatibility with the running image
+
+ Args:
+ iso_path (str): a path to the mounted ISO image
+ """
+ old_data = get_version_data()
+ old_flavor = old_data.get('flavor', '')
+ old_architecture = old_data.get('architecture') or cmd('dpkg --print-architecture')
+
+ new_data = get_version_data(f'{iso_path}/version.json')
+ new_flavor = new_data.get('flavor', '')
+ new_architecture = new_data.get('architecture', '')
+
+ if not old_architecture == new_architecture:
+ print(MSG_ERR_ARCHITECTURE_MISMATCH)
+ cleanup()
+ exit(MSG_INFO_INSTALL_EXIT)
+
+ if not old_flavor == new_flavor:
+ if not ask_yes_no(MSG_WARN_FLAVOR_MISMATCH.format(old_flavor, new_flavor), default=False):
+ cleanup()
+ exit(MSG_INFO_INSTALL_EXIT)
+
+
def install_image() -> None:
"""Install an image to a disk
"""
@@ -876,6 +905,9 @@ def add_image(image_path: str, vrf: str = None, username: str = '',
Path(DIR_ISO_MOUNT).mkdir(mode=0o755, parents=True)
disk.partition_mount(iso_path, DIR_ISO_MOUNT, 'iso9660')
+ print('Validating image compatibility')
+ validate_compatibility(DIR_ISO_MOUNT)
+
# check sums
print('Validating image checksums')
if not Path(DIR_ISO_MOUNT).joinpath('sha256sum.txt').exists():
diff --git a/src/op_mode/nat.py b/src/op_mode/nat.py
index 2bc7e24fe..16a545cda 100755
--- a/src/op_mode/nat.py
+++ b/src/op_mode/nat.py
@@ -99,6 +99,23 @@ def _get_raw_translation(direction, family, address=None):
def _get_formatted_output_rules(data, direction, family):
+ def _get_ports_for_output(my_dict):
+ # Get and insert all configured ports or port ranges into output string
+ for index, port in enumerate(my_dict['set']):
+ if 'range' in str(my_dict['set'][index]):
+ output = my_dict['set'][index]['range']
+ output = '-'.join(map(str, output))
+ else:
+ output = str(port)
+ if index == 0:
+ output = str(output)
+ else:
+ output = ','.join([output,output])
+ # Handle case where configured ports are a negated list
+ if my_dict['op'] == '!=':
+ output = '!' + output
+ return(output)
+
# Add default values before loop
sport, dport, proto = 'any', 'any', 'any'
saddr = '::/0' if family == 'inet6' else '0.0.0.0/0'
@@ -126,21 +143,9 @@ def _get_formatted_output_rules(data, direction, family):
elif my_dict['field'] == 'daddr':
daddr = f'{op}{my_dict["prefix"]["addr"]}/{my_dict["prefix"]["len"]}'
elif my_dict['field'] == 'sport':
- # Port range or single port
- if jmespath.search('set[*].range', my_dict):
- sport = my_dict['set'][0]['range']
- sport = '-'.join(map(str, sport))
- else:
- sport = my_dict.get('set')
- sport = ','.join(map(str, sport))
+ sport = _get_ports_for_output(my_dict)
elif my_dict['field'] == 'dport':
- # Port range or single port
- if jmespath.search('set[*].range', my_dict):
- dport = my_dict["set"][0]["range"]
- dport = '-'.join(map(str, dport))
- else:
- dport = my_dict.get('set')
- dport = ','.join(map(str, dport))
+ dport = _get_ports_for_output(my_dict)
else:
field = jmespath.search('left.payload.field', match)
if field == 'saddr':
@@ -263,7 +268,7 @@ def _get_formatted_translation(dict_data, nat_direction, family, verbose):
proto = meta['layer4']['protoname']
if direction == 'independent':
conn_id = meta['id']
- timeout = meta['timeout']
+ timeout = meta.get('timeout', 'n/a')
orig_src = f'{orig_src}:{orig_sport}' if orig_sport else orig_src
orig_dst = f'{orig_dst}:{orig_dport}' if orig_dport else orig_dst
reply_src = f'{reply_src}:{reply_sport}' if reply_sport else reply_src
diff --git a/src/op_mode/pki.py b/src/op_mode/pki.py
index b1ca6ee29..361b60e0e 100755
--- a/src/op_mode/pki.py
+++ b/src/op_mode/pki.py
@@ -876,7 +876,7 @@ def show_certificate_authority(name=None, pem=False):
print("Certificate Authorities:")
print(tabulate.tabulate(data, headers))
-def show_certificate(name=None, pem=False):
+def show_certificate(name=None, pem=False, fingerprint_hash=None):
headers = ['Name', 'Type', 'Subject CN', 'Issuer CN', 'Issued', 'Expiry', 'Revoked', 'Private Key', 'CA Present']
data = []
certs = get_config_certificate()
@@ -897,6 +897,9 @@ def show_certificate(name=None, pem=False):
if name and pem:
print(encode_certificate(cert))
return
+ elif name and fingerprint_hash:
+ print(get_certificate_fingerprint(cert, fingerprint_hash))
+ return
ca_name = get_certificate_ca(cert, ca_certs)
cert_subject_cn = cert.subject.rfc4514_string().split(",")[0]
@@ -923,12 +926,6 @@ def show_certificate(name=None, pem=False):
print("Certificates:")
print(tabulate.tabulate(data, headers))
-def show_certificate_fingerprint(name, hash):
- cert = get_config_certificate(name=name)
- cert = load_certificate(cert['certificate'])
-
- print(get_certificate_fingerprint(cert, hash))
-
def show_crl(name=None, pem=False):
headers = ['CA Name', 'Updated', 'Revokes']
data = []
@@ -1074,7 +1071,7 @@ if __name__ == '__main__':
if args.fingerprint is None:
show_certificate(None if args.certificate == 'all' else args.certificate, args.pem)
else:
- show_certificate_fingerprint(args.certificate, args.fingerprint)
+ show_certificate(args.certificate, fingerprint_hash=args.fingerprint)
elif args.crl:
show_crl(None if args.crl == 'all' else args.crl, args.pem)
else:
diff --git a/src/op_mode/snmp_v3.py b/src/op_mode/snmp_v3.py
index a1f76f0bc..abeb524dd 100755
--- a/src/op_mode/snmp_v3.py
+++ b/src/op_mode/snmp_v3.py
@@ -85,7 +85,7 @@ if __name__ == '__main__':
'user': [],
'view': []
}
-
+
if c.exists_effective('service snmp v3 group'):
for g in c.list_effective_nodes('service snmp v3 group'):
group = {
@@ -146,7 +146,6 @@ if __name__ == '__main__':
data['trap'].append(trap)
- print(data)
if args.all:
# Special case, print all templates !
tmpl = jinja2.Template(GROUP_OUTP_TMPL_SRC)
diff --git a/src/op_mode/version.py b/src/op_mode/version.py
index ad0293aca..09d69ad1d 100755
--- a/src/op_mode/version.py
+++ b/src/op_mode/version.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2016-2022 VyOS maintainers and contributors
+# Copyright (C) 2016-2024 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -30,11 +30,15 @@ from jinja2 import Template
version_output_tmpl = """
Version: VyOS {{version}}
Release train: {{release_train}}
+Release flavor: {{flavor}}
Built by: {{built_by}}
Built on: {{built_on}}
Build UUID: {{build_uuid}}
Build commit ID: {{build_git}}
+{%- if build_comment %}
+Build comment: {{build_comment}}
+{% endif %}
Architecture: {{system_arch}}
Boot via: {{boot_via}}
diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server
index ecbf6fcf9..7f5233c6b 100755
--- a/src/services/vyos-http-api-server
+++ b/src/services/vyos-http-api-server
@@ -23,16 +23,17 @@ import logging
import signal
import traceback
import threading
+from enum import Enum
from time import sleep
-from typing import List, Union, Callable, Dict
+from typing import List, Union, Callable, Dict, Self
from fastapi import FastAPI, Depends, Request, Response, HTTPException
from fastapi import BackgroundTasks
from fastapi.responses import HTMLResponse
from fastapi.exceptions import RequestValidationError
from fastapi.routing import APIRoute
-from pydantic import BaseModel, StrictStr, validator
+from pydantic import BaseModel, StrictStr, validator, model_validator
from starlette.middleware.cors import CORSMiddleware
from starlette.datastructures import FormData
from starlette.formparsers import FormParser, MultiPartParser
@@ -177,16 +178,35 @@ class ConfigFileModel(ApiModel):
}
}
+
+class ImageOp(str, Enum):
+ add = "add"
+ delete = "delete"
+ show = "show"
+ set_default = "set_default"
+
+
class ImageModel(ApiModel):
- op: StrictStr
+ op: ImageOp
url: StrictStr = None
name: StrictStr = None
+ @model_validator(mode='after')
+ def check_data(self) -> Self:
+ if self.op == 'add':
+ if not self.url:
+ raise ValueError("Missing required field \"url\"")
+ elif self.op in ['delete', 'set_default']:
+ if not self.name:
+ raise ValueError("Missing required field \"name\"")
+
+ return self
+
class Config:
schema_extra = {
"example": {
"key": "id_key",
- "op": "add | delete",
+ "op": "add | delete | show | set_default",
"url": "imagelocation",
"name": "imagename",
}
@@ -668,19 +688,13 @@ def image_op(data: ImageModel):
try:
if op == 'add':
- if data.url:
- url = data.url
- else:
- return error(400, "Missing required field \"url\"")
- res = session.install_image(url)
+ res = session.install_image(data.url)
elif op == 'delete':
- if data.name:
- name = data.name
- else:
- return error(400, "Missing required field \"name\"")
- res = session.remove_image(name)
- else:
- return error(400, f"'{op}' is not a valid operation")
+ res = session.remove_image(data.name)
+ elif op == 'show':
+ res = session.show(["system", "image"])
+ elif op == 'set_default':
+ res = session.set_default_image(data.name)
except ConfigSessionError as e:
return error(400, str(e))
except Exception as e:
diff --git a/src/systemd/miniupnpd.service b/src/systemd/miniupnpd.service
deleted file mode 100644
index 51cb2eed8..000000000
--- a/src/systemd/miniupnpd.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=UPnP service
-ConditionPathExists=/run/upnp/miniupnp.conf
-After=vyos-router.service
-StartLimitIntervalSec=0
-
-[Service]
-WorkingDirectory=/run/upnp
-Type=simple
-ExecStart=/usr/sbin/miniupnpd -d -f /run/upnp/miniupnp.conf
-PrivateTmp=yes
-PIDFile=/run/miniupnpd.pid
-Restart=on-failure
diff --git a/src/validators/port-range-exclude b/src/validators/port-range-exclude
new file mode 100755
index 000000000..4c049e98f
--- /dev/null
+++ b/src/validators/port-range-exclude
@@ -0,0 +1,7 @@
+#!/bin/sh
+arg="$1"
+if [ "${arg:0:1}" != "!" ]; then
+ exit 1
+fi
+path=$(dirname "$0")
+${path}/port-range "${arg:1}"