summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md21
-rw-r--r--.github/workflows/check-scripts-executable.yml32
-rw-r--r--.github/workflows/cleanup-mirror-pr-branch.yml16
-rw-r--r--.github/workflows/mirror-pr-and-sync.yml21
-rw-r--r--.github/workflows/repo-sync.yml17
-rw-r--r--.github/workflows/trigger-docker-image-build.yml47
-rw-r--r--.github/workflows/trigger-pr-mirror-repo-sync.yml18
-rw-r--r--.github/workflows/trigger_rebuild_packages.yml68
-rw-r--r--.gitignore3
-rw-r--r--CODEOWNERS3
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--Jenkinsfile220
-rw-r--r--Jenkinsfile.docker84
-rw-r--r--Makefile18
-rw-r--r--README.md6
-rw-r--r--data/architectures/amd64.toml18
-rw-r--r--data/architectures/arm64.toml12
-rw-r--r--data/architectures/armhf.toml2
-rw-r--r--data/build-flavors/README.md76
-rw-r--r--data/build-flavors/aws-iso.toml3
-rw-r--r--data/build-flavors/azure-iso.toml5
-rw-r--r--data/build-flavors/dell-vep1400.toml116
-rw-r--r--data/build-flavors/dell-vep4600.toml267
-rw-r--r--data/build-flavors/edgecore.toml46
-rw-r--r--data/build-flavors/generic.toml11
-rw-r--r--data/build-flavors/xcpng.toml6
-rw-r--r--data/build-types/development.toml70
-rw-r--r--data/build-types/release.toml441
-rw-r--r--data/build-types/stream.toml8
-rw-r--r--data/certificates/.gitignore1
-rw-r--r--data/defaults.toml4
-rw-r--r--data/live-build-config/archives/buster.list.chroot3
-rw-r--r--data/live-build-config/archives/buster.pref.chroot11
-rw-r--r--data/live-build-config/archives/zabbix-official-repo.key.chrootbin0 -> 1183 bytes
-rwxr-xr-xdata/live-build-config/hooks/live/01-live-serial.binary8
-rw-r--r--data/live-build-config/hooks/live/100-remove-dropbear-keys.chroot7
-rwxr-xr-xdata/live-build-config/hooks/live/18-enable-disable_services.chroot8
-rwxr-xr-xdata/live-build-config/hooks/live/19-kernel_symlinks.chroot5
-rwxr-xr-xdata/live-build-config/hooks/live/40-init-cracklib-db.chroot13
-rwxr-xr-xdata/live-build-config/hooks/live/82-import-vyos-gpg-signing-key.chroot12
-rwxr-xr-xdata/live-build-config/hooks/live/92-strip-symbols.chroot1
-rwxr-xr-xdata/live-build-config/hooks/live/93-sb-sign-kernel.chroot31
-rwxr-xr-xdata/live-build-config/hooks/live/93-sign-kernel.chroot18
-rw-r--r--data/live-build-config/includes.binary/isolinux/splash.pngbin39611 -> 23666 bytes
-rw-r--r--data/live-build-config/includes.chroot/etc/systemd/system.conf1
-rw-r--r--data/live-build-config/includes.chroot/opt/vyatta/etc/grub/default-union-grub-entry20
-rw-r--r--data/live-build-config/includes.chroot/usr/share/vyos/keys/vyos-release.pub.asc52
-rw-r--r--data/live-build-config/includes.chroot/var/lib/shim-signed/mok/README.md22
-rw-r--r--data/live-build-config/package-lists/vyos-base.list.chroot2
-rw-r--r--data/live-build-config/rootfs/excludes3
-rw-r--r--docker/Dockerfile43
-rw-r--r--docker/patches/live-build/0001-save-package-info.patch30
-rw-r--r--packages/.gitignore7
-rw-r--r--packages/aws-gateway-load-balancer-tunnel-handler/.gitignore1
-rw-r--r--packages/aws-gateway-load-balancer-tunnel-handler/Jenkinsfile33
-rwxr-xr-xpackages/aws-gateway-load-balancer-tunnel-handler/build.py57
-rw-r--r--packages/ddclient/.gitignore1
-rw-r--r--packages/ddclient/Jenkinsfile30
-rw-r--r--packages/dropbear/.gitignore1
-rw-r--r--packages/dropbear/Jenkinsfile30
-rwxr-xr-xpackages/dropbear/build.sh27
-rw-r--r--packages/ethtool/.gitignore1
-rw-r--r--packages/ethtool/Jenkinsfile31
-rw-r--r--packages/frr/.gitignore3
-rw-r--r--packages/frr/Jenkinsfile39
-rwxr-xr-xpackages/frr/build-frr.sh40
-rw-r--r--packages/hostap/.gitignore2
-rw-r--r--packages/hostap/Jenkinsfile34
-rwxr-xr-xpackages/hostap/build.sh38
-rw-r--r--packages/hsflowd/.gitignore1
-rw-r--r--packages/hsflowd/Jenkinsfile32
-rwxr-xr-xpackages/hsflowd/build.sh27
-rw-r--r--packages/isc-dhcp/.gitignore1
-rw-r--r--packages/isc-dhcp/Jenkinsfile32
-rwxr-xr-xpackages/isc-dhcp/build.sh21
-rw-r--r--packages/kea/.gitignore1
-rw-r--r--packages/kea/Jenkinsfile32
-rwxr-xr-xpackages/kea/build.sh20
-rw-r--r--packages/keepalived/.gitignore1
-rw-r--r--packages/keepalived/Jenkinsfile33
-rwxr-xr-xpackages/keepalived/build.py50
-rw-r--r--packages/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch129
-rw-r--r--packages/linux-kernel/.gitignore27
-rw-r--r--packages/linux-kernel/Jenkinsfile83
-rw-r--r--packages/linux-kernel/README.md36
-rwxr-xr-xpackages/linux-kernel/build-accel-ppp.sh42
-rwxr-xr-xpackages/linux-kernel/build-intel-ixgbe.sh110
-rwxr-xr-xpackages/linux-kernel/build-intel-ixgbevf.sh102
-rwxr-xr-xpackages/linux-kernel/build-intel-qat.sh114
-rwxr-xr-xpackages/linux-kernel/build-kernel.sh79
-rwxr-xr-xpackages/linux-kernel/build-linux-firmware.sh98
-rwxr-xr-xpackages/linux-kernel/build-nat-rtsp.sh42
-rwxr-xr-xpackages/linux-kernel/build-openvpn-dco.sh33
-rw-r--r--packages/ndppd/.gitignore1
-rw-r--r--packages/ndppd/Jenkinsfile32
-rwxr-xr-xpackages/ndppd/build.sh20
-rw-r--r--packages/net-snmp/.gitignore1
-rw-r--r--packages/net-snmp/Jenkinsfile30
-rwxr-xr-xpackages/net-snmp/build.sh30
-rw-r--r--packages/netfilter/.gitignore3
-rw-r--r--packages/netfilter/Jenkinsfile39
-rwxr-xr-xpackages/netfilter/build.py55
-rw-r--r--packages/netfilter/patches/pkg-nftables/0001-meta-fix-hour-decoding.patch118
-rw-r--r--packages/opennhrp/.gitignore1
-rw-r--r--packages/opennhrp/Jenkinsfile30
-rwxr-xr-xpackages/opennhrp/build.sh30
-rw-r--r--packages/openvpn-otp/.gitignore1
-rw-r--r--packages/openvpn-otp/Jenkinsfile31
-rwxr-xr-xpackages/openvpn-otp/build-openvpn-otp.sh36
-rw-r--r--packages/owamp/.gitignore1
-rw-r--r--packages/owamp/Jenkinsfile32
-rwxr-xr-xpackages/owamp/build.sh16
-rw-r--r--packages/pam_tacplus/.gitignore2
-rw-r--r--packages/pam_tacplus/Jenkinsfile34
-rwxr-xr-xpackages/pam_tacplus/build.sh16
-rw-r--r--packages/pmacct/.gitignore1
-rw-r--r--packages/pmacct/Jenkinsfile33
-rwxr-xr-xpackages/pmacct/build.py49
-rw-r--r--packages/podman/.gitignore1
-rw-r--r--packages/podman/Jenkinsfile31
-rwxr-xr-xpackages/podman/build.sh32
-rw-r--r--packages/pyhumps/.gitignore1
-rw-r--r--packages/pyhumps/Jenkinsfile30
-rw-r--r--packages/radvd/.gitignore2
-rw-r--r--packages/radvd/Jenkinsfile30
-rwxr-xr-xpackages/radvd/build.sh29
-rw-r--r--packages/strongswan/.gitignore1
-rw-r--r--packages/strongswan/Jenkinsfile30
-rwxr-xr-xpackages/strongswan/build.sh56
-rw-r--r--packages/telegraf/.gitignore1
-rw-r--r--packages/telegraf/Jenkinsfile32
-rwxr-xr-xpackages/telegraf/build.sh27
-rw-r--r--packages/telegraf/plugins/inputs/all/all.go72
-rw-r--r--packages/telegraf/plugins/outputs/all/all.go9
-rw-r--r--packages/waagent/Jenkinsfile32
-rwxr-xr-xpackages/waagent/build.py50
-rw-r--r--packages/wide-dhcpv6/.gitignore1
-rw-r--r--packages/wide-dhcpv6/Jenkinsfile30
-rwxr-xr-xpackages/wide-dhcpv6/build.sh23
-rwxr-xr-xscripts/check-qemu-install85
-rwxr-xr-xscripts/image-build/build-vyos-image210
-rw-r--r--scripts/image-build/defaults.py2
-rw-r--r--scripts/image-build/raw_image.py2
-rw-r--r--scripts/package-build/.gitignore (renamed from scripts/package-build/opennhrp/.gitignore)4
-rw-r--r--scripts/package-build/amazon-cloudwatch-agent/.gitignore1
l---------scripts/package-build/amazon-cloudwatch-agent/build.py (renamed from scripts/package-build/opennhrp/build.py)0
-rw-r--r--scripts/package-build/amazon-cloudwatch-agent/package.toml14
-rw-r--r--scripts/package-build/amazon-ssm-agent/.gitignore1
l---------scripts/package-build/amazon-ssm-agent/build.py (renamed from scripts/package-build/pam_tacplus/build.py)0
-rw-r--r--scripts/package-build/amazon-ssm-agent/package.toml16
-rw-r--r--scripts/package-build/aws-gwlbtun/.gitignore9
-rw-r--r--scripts/package-build/bash-completion/.gitignore1
l---------scripts/package-build/bash-completion/build.py1
-rw-r--r--scripts/package-build/bash-completion/package.toml12
-rw-r--r--scripts/package-build/blackbox_exporter/.gitignore1
l---------scripts/package-build/blackbox_exporter/build.py1
-rwxr-xr-xscripts/package-build/blackbox_exporter/build.sh66
-rw-r--r--scripts/package-build/blackbox_exporter/package.toml5
-rwxr-xr-xscripts/package-build/build.py66
-rw-r--r--scripts/package-build/ddclient/.gitignore8
-rw-r--r--scripts/package-build/dropbear/.gitignore8
-rw-r--r--scripts/package-build/dropbear/package.toml4
-rw-r--r--scripts/package-build/dropbear/patches/0001-Enable-PAM-support.patch61
-rw-r--r--scripts/package-build/dropbear/patches/dropbear/0001-Enable-PAM-support.patch (renamed from packages/dropbear/patches/0001-Enable-PAM-support.patch)0
-rw-r--r--scripts/package-build/ethtool/.gitignore8
-rw-r--r--scripts/package-build/ethtool/package.toml2
-rw-r--r--scripts/package-build/frr/.gitignore11
-rw-r--r--scripts/package-build/frr/package.toml13
-rw-r--r--scripts/package-build/frr/patches/frr/0001-Enable-PCRE2-in-Debian-package-builds.patch24
-rw-r--r--scripts/package-build/frr/patches/frr/0001-ldpd-Option-for-disabled-LDP-hello-message-during-TC.patch176
-rw-r--r--scripts/package-build/frr/patches/frr/0003-Clear-Babel-Config-On-Stop.patch29
-rw-r--r--scripts/package-build/frr_exporter/.gitignore1
l---------scripts/package-build/frr_exporter/build.py1
-rw-r--r--scripts/package-build/frr_exporter/package.toml22
-rw-r--r--scripts/package-build/hostap/.gitignore9
-rw-r--r--scripts/package-build/hsflowd/.gitignore7
-rw-r--r--scripts/package-build/hsflowd/package.toml6
-rw-r--r--scripts/package-build/isc-dhcp/.gitignore8
-rw-r--r--scripts/package-build/isc-dhcp/package.toml2
-rw-r--r--scripts/package-build/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch248
-rw-r--r--scripts/package-build/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch170
-rw-r--r--scripts/package-build/isc-dhcp/patches/0003-fix-compilation-errors.patch48
-rw-r--r--scripts/package-build/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch29
-rw-r--r--scripts/package-build/isc-dhcp/patches/isc-dhcp/0001-Add-support-for-raw-IP-interface-type.patch (renamed from packages/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch)0
-rw-r--r--scripts/package-build/isc-dhcp/patches/isc-dhcp/0002-Checkpoint-improved-patch.patch (renamed from packages/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch)0
-rw-r--r--scripts/package-build/isc-dhcp/patches/isc-dhcp/0003-fix-compilation-errors.patch (renamed from packages/isc-dhcp/patches/0003-fix-compilation-errors.patch)0
-rw-r--r--scripts/package-build/isc-dhcp/patches/isc-dhcp/0004-add-support-for-ARPHRD_NONE-interface-type.patch (renamed from packages/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch)0
-rw-r--r--scripts/package-build/kea/.gitignore8
-rw-r--r--scripts/package-build/kea/package.toml2
-rw-r--r--scripts/package-build/kea/patches/isc-kea/0001-Add-multithreading-test-mode.patch135
-rw-r--r--scripts/package-build/kea/patches/isc-kea/0002-Add-ping_check-hook-library.patch13277
-rw-r--r--scripts/package-build/keepalived/.gitignore8
-rw-r--r--scripts/package-build/keepalived/package.toml2
-rw-r--r--scripts/package-build/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch129
-rw-r--r--scripts/package-build/libnss-mapuser/.gitignore1
l---------scripts/package-build/libnss-mapuser/build.py1
-rw-r--r--scripts/package-build/libnss-mapuser/package.toml9
-rw-r--r--scripts/package-build/libpam-radius-auth/.gitignore1
l---------scripts/package-build/libpam-radius-auth/build.py1
-rw-r--r--scripts/package-build/libpam-radius-auth/package.toml10
-rw-r--r--scripts/package-build/linux-kernel/.gitignore6
-rw-r--r--scripts/package-build/linux-kernel/README.md12
l---------scripts/package-build/linux-kernel/arch1
-rw-r--r--scripts/package-build/linux-kernel/arch/arm64/configs/vyos_defconfig (renamed from packages/linux-kernel/arch/arm64/configs/vyos_defconfig)152
-rw-r--r--scripts/package-build/linux-kernel/arch/x86/configs/vyos_defconfig (renamed from packages/linux-kernel/arch/x86/configs/vyos_defconfig)92
-rwxr-xr-xscripts/package-build/linux-kernel/build-accel-ppp.sh8
-rwxr-xr-xscripts/package-build/linux-kernel/build-intel-ixgbe.sh107
-rwxr-xr-xscripts/package-build/linux-kernel/build-intel-nic.sh (renamed from scripts/package-build/linux-kernel/build-intel-ixgbevf.sh)76
-rwxr-xr-xscripts/package-build/linux-kernel/build-intel-qat.sh30
-rwxr-xr-xscripts/package-build/linux-kernel/build-ipt-netflow.sh69
-rwxr-xr-xscripts/package-build/linux-kernel/build-jool.py7
-rwxr-xr-xscripts/package-build/linux-kernel/build-kernel.sh54
-rwxr-xr-xscripts/package-build/linux-kernel/build-mellanox-ofed.sh (renamed from packages/linux-kernel/build-mellanox-ofed.sh)12
-rwxr-xr-xscripts/package-build/linux-kernel/build-nat-rtsp.sh12
-rwxr-xr-xscripts/package-build/linux-kernel/build-openvpn-dco.sh8
-rwxr-xr-xscripts/package-build/linux-kernel/build-realtek-r8152.py (renamed from packages/linux-kernel/build-jool.py)62
-rwxr-xr-xscripts/package-build/linux-kernel/build.py74
-rw-r--r--scripts/package-build/linux-kernel/package.toml33
l---------scripts/package-build/linux-kernel/patches1
-rw-r--r--scripts/package-build/linux-kernel/patches/accel-ppp/0001-L2TP-Include-Calling-Number-to-Calling-Station-ID-RA.patch (renamed from packages/linux-kernel/patches/accel-ppp/0001-L2TP-Include-Calling-Number-to-Calling-Station-ID-RA.patch)0
-rw-r--r--scripts/package-build/linux-kernel/patches/accel-ppp/0002-Radius-Dns-Server-IPv6-Address.patch195
-rw-r--r--scripts/package-build/linux-kernel/patches/ixgbe/0001-ixgbe-always-enable-support-for-unsupported-SFP-modu.patch (renamed from packages/linux-kernel/patches/ixgbe/allow_unsupported_sfp.patch)20
-rw-r--r--scripts/package-build/linux-kernel/patches/ixgbe/0002-BACKPORT-linux-v6.9-PATCH-ixgbe-Add-1000BASE-BX-supp.patch (renamed from packages/linux-kernel/patches/ixgbe/add_1000base-bx_support.patch)74
-rw-r--r--scripts/package-build/linux-kernel/patches/kernel/0001-linkstate-ip-device-attribute.patch (renamed from packages/linux-kernel/patches/kernel/0001-linkstate-ip-device-attribute.patch)18
-rw-r--r--scripts/package-build/linux-kernel/patches/kernel/0002-inotify-support-for-stackable-filesystems.patch (renamed from packages/linux-kernel/patches/kernel/0002-inotify-support-for-stackable-filesystems.patch)0
-rwxr-xr-xscripts/package-build/linux-kernel/sign-modules.sh15
-rw-r--r--scripts/package-build/ndppd/.gitignore8
-rw-r--r--scripts/package-build/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch83
-rw-r--r--scripts/package-build/ndppd/patches/0002-set-vyos-version.patch25
-rw-r--r--scripts/package-build/ndppd/patches/ndppd/0001-skip-route-table-if-there-is-no-auto-rule.patch (renamed from packages/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch)0
-rw-r--r--scripts/package-build/ndppd/patches/ndppd/0002-set-vyos-version.patch (renamed from packages/ndppd/patches/0002-set-vyos-version.patch)0
-rw-r--r--scripts/package-build/net-snmp/.gitignore7
-rw-r--r--scripts/package-build/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch119
-rw-r--r--scripts/package-build/net-snmp/patches/net-snmp/add-linux-6.7-compatibility-parsing.patch (renamed from packages/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch)0
-rw-r--r--scripts/package-build/netfilter/.gitignore6
l---------[-rwxr-xr-x]scripts/package-build/netfilter/build.py190
-rw-r--r--scripts/package-build/node_exporter/.gitignore1
l---------scripts/package-build/node_exporter/build.py1
-rw-r--r--scripts/package-build/node_exporter/package.toml21
-rw-r--r--scripts/package-build/opennhrp/package.toml21
-rw-r--r--scripts/package-build/openvpn-otp/.gitignore7
-rw-r--r--scripts/package-build/openvpn-otp/package.toml2
-rw-r--r--scripts/package-build/owamp/.gitignore7
-rw-r--r--scripts/package-build/pam_tacplus/.gitignore7
-rw-r--r--scripts/package-build/pam_tacplus/package.toml19
-rw-r--r--scripts/package-build/pmacct/.gitignore7
-rw-r--r--scripts/package-build/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch49
-rw-r--r--scripts/package-build/pmacct/patches/pmacct/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch (renamed from packages/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch)0
-rw-r--r--scripts/package-build/podman/.gitignore8
-rw-r--r--scripts/package-build/podman/package.toml2
-rw-r--r--scripts/package-build/pyhumps/.gitignore8
-rw-r--r--scripts/package-build/radvd/.gitignore7
-rw-r--r--scripts/package-build/radvd/package.toml2
-rw-r--r--scripts/package-build/strongswan/.gitignore7
-rwxr-xr-xscripts/package-build/strongswan/build-vici.sh21
-rw-r--r--scripts/package-build/strongswan/package.toml2
-rw-r--r--scripts/package-build/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch579
-rw-r--r--scripts/package-build/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch140
-rw-r--r--scripts/package-build/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch159
-rw-r--r--scripts/package-build/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch115
-rw-r--r--scripts/package-build/strongswan/patches/strongswan/0001-charon-add-optional-source-and-remote-overrides-for-.patch (renamed from packages/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch)0
-rw-r--r--scripts/package-build/strongswan/patches/strongswan/0002-vici-send-certificates-for-ike-sa-events.patch (renamed from packages/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch)0
-rw-r--r--scripts/package-build/strongswan/patches/strongswan/0003-vici-add-support-for-individual-sa-state-changes.patch (renamed from packages/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch)0
-rw-r--r--scripts/package-build/strongswan/patches/strongswan/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch (renamed from packages/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch)0
-rw-r--r--scripts/package-build/tacacs/.gitignore3
l---------scripts/package-build/tacacs/build.py1
-rw-r--r--scripts/package-build/tacacs/package.toml24
-rw-r--r--scripts/package-build/telegraf/.gitignore7
-rw-r--r--scripts/package-build/vpp/.gitignore2
l---------scripts/package-build/vpp/build.py1
-rw-r--r--scripts/package-build/vpp/package.toml35
-rw-r--r--scripts/package-build/vyos-1x/.gitignore2
l---------scripts/package-build/vyos-1x/build.py1
-rw-r--r--scripts/package-build/vyos-1x/package.toml11
-rw-r--r--scripts/package-build/waagent/.gitignore9
-rw-r--r--scripts/package-build/waagent/package.toml2
-rw-r--r--scripts/package-build/wide-dhcpv6/.gitignore8
-rw-r--r--scripts/package-build/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch230
-rw-r--r--scripts/package-build/wide-dhcpv6/patches/0024-bind-to-single-socket.patch17
-rw-r--r--scripts/package-build/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch155
-rw-r--r--scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0023-dhcpc6-support-per-interface-client-DUIDs.patch (renamed from packages/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch)0
-rw-r--r--scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0024-bind-to-single-socket.patch (renamed from packages/wide-dhcpv6/patches/0024-bind-to-single-socket.patch)0
-rw-r--r--scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0025-option-to-prevent-ia-release.patch (renamed from packages/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch)0
-rw-r--r--scripts/package-build/xen-guest-agent/.gitignore1
l---------scripts/package-build/xen-guest-agent/build.py1
-rw-r--r--scripts/package-build/xen-guest-agent/package.toml34
-rwxr-xr-xscripts/utils/merge-flavors76
-rw-r--r--tools/cloud-init/AWS/config.boot.default2
-rw-r--r--tools/container/config.boot.default2
-rw-r--r--vars/README.md6
-rw-r--r--vars/buildPackage.groovy267
-rw-r--r--vars/cloneAndBuild.groovy87
-rw-r--r--vars/getChangeSetPath.groovy26
-rw-r--r--vars/getGitBranchName.groovy21
-rw-r--r--vars/getGitRepoName.groovy20
-rw-r--r--vars/getGitRepoURL.groovy20
-rw-r--r--vars/getJenkinsfilePath.groovy24
-rw-r--r--vars/isCustomBuild.groovy26
-rw-r--r--vars/isPullRequest.groovy21
-rw-r--r--vars/setDescription.groovy42
300 files changed, 16006 insertions, 7557 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 8c71c80d..e686e187 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,7 +1,7 @@
<!-- All PR should follow this template to allow a clean and transparent review -->
<!-- Text placed between these delimiters is considered a comment and is not rendered -->
-## Change Summary
+## Change summary
<!--- Provide a general summary of your changes in the Title above -->
## Types of changes
@@ -21,27 +21,14 @@ the box, please use [x]
<!-- optional: Link to related other tasks on Phabricator. -->
<!-- * https://vyos.dev/Txxxx -->
-## Component(s) name
-<!-- A rather incomplete list of components: ethernet, wireguard, bgp, mpls, ldp, l2tp, dhcp ... -->
-
-## Proposed changes
-<!--- Describe your changes in detail -->
-
-## How to test
-<!---
-Please describe in detail how you tested your changes. Include details of your testing
-environment, and the tests you ran. When pasting configs, logs, shell output, backtraces,
-and other large chunks of text, surround this text with triple backtics
-```
-like this
-```
--->
+## Related PR(s)
+<!-- Link here any PRs in other repositories that are required by this PR -->
## Checklist:
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
<!--- The entire development process is outlined here: https://docs.vyos.io/en/latest/contributing/development.html -->
-- [ ] I have read the [**CONTRIBUTING**](https://github.com/vyos/vyos-build/blob/current/CONTRIBUTING.md) document
+- [ ] I have read the [**CONTRIBUTING**](https://github.com/vyos/vyos-1x/blob/current/CONTRIBUTING.md) document
- [ ] I have linked this PR to one or more Phabricator Task(s)
- [ ] My commit headlines contain a valid Task id
- [ ] My change requires a change to the documentation
diff --git a/.github/workflows/check-scripts-executable.yml b/.github/workflows/check-scripts-executable.yml
deleted file mode 100644
index 123e9895..00000000
--- a/.github/workflows/check-scripts-executable.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: "Check for Jenkins build scripts has executable bit"
-
-on:
- pull_request:
- branches:
- - current
- - circinus
- - sagitta
- - equuleus
-
-permissions:
- contents: read
-
-jobs:
- check-scripts-executable:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v3
- with:
- repository: ${{ github.repository }}
- - name: Checking scripts are executable
- run: |
- files=$(find packages/ -type f -name '*.py' -or -name '*.sh' -not -executable -print)
- if [[ -n $files ]]; then
- echo "Found files without executable bit:"
- for file in $files; do
- echo $file;
- done;
- exit 1;
- fi
- shell: bash
diff --git a/.github/workflows/cleanup-mirror-pr-branch.yml b/.github/workflows/cleanup-mirror-pr-branch.yml
new file mode 100644
index 00000000..a62e44b2
--- /dev/null
+++ b/.github/workflows/cleanup-mirror-pr-branch.yml
@@ -0,0 +1,16 @@
+name: Cleanup pr mirror branch
+
+on:
+ pull_request:
+ types: [closed]
+ branches:
+ - current
+
+permissions:
+ contents: write
+
+jobs:
+ call-delete-branch:
+ if: github.repository_owner != 'vyos'
+ uses: vyos/.github/.github/workflows/cleanup-mirror-pr-branch.yml@current
+ secrets: inherit
diff --git a/.github/workflows/mirror-pr-and-sync.yml b/.github/workflows/mirror-pr-and-sync.yml
new file mode 100644
index 00000000..120e116d
--- /dev/null
+++ b/.github/workflows/mirror-pr-and-sync.yml
@@ -0,0 +1,21 @@
+name: Create Mirror PR and Repo Sync
+on:
+ workflow_dispatch:
+ inputs:
+ sync_branch:
+ description: 'branch to sync'
+ required: true
+ type: string
+
+permissions:
+ pull-requests: write
+ contents: write
+
+jobs:
+ call-mirror-pr-and-sync:
+ if: github.repository_owner != 'vyos'
+ uses: VyOS-Networks/vyos-reusable-workflows/.github/workflows/mirror-pr-and-sync.yml@main
+ with:
+ sync_branch: ${{ inputs.sync_branch }}
+ secrets:
+ PAT: ${{ secrets.PAT }}
diff --git a/.github/workflows/repo-sync.yml b/.github/workflows/repo-sync.yml
deleted file mode 100644
index 6da2fb40..00000000
--- a/.github/workflows/repo-sync.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: Repo-sync
-
-on:
- pull_request_target:
- types:
- - closed
- branches:
- - current
- workflow_dispatch:
-
-jobs:
- trigger-sync:
- uses: vyos/.github/.github/workflows/trigger-repo-sync.yml@current
- secrets:
- REMOTE_REPO: ${{ secrets.REMOTE_REPO }}
- REMOTE_OWNER: ${{ secrets.REMOTE_OWNER }}
- PAT: ${{ secrets.PAT }}
diff --git a/.github/workflows/trigger-docker-image-build.yml b/.github/workflows/trigger-docker-image-build.yml
new file mode 100644
index 00000000..5e0b07ef
--- /dev/null
+++ b/.github/workflows/trigger-docker-image-build.yml
@@ -0,0 +1,47 @@
+name: Trigger Docker image build
+
+on:
+ pull_request_target:
+ types:
+ - closed
+ branches:
+ - current
+
+permissions:
+ packages: write
+ contents: read
+ attestations: write
+ id-token: write
+ pull-requests: read
+
+jobs:
+ track-changes:
+ if: github.event.pull_request.merged == true
+ runs-on: ubuntu-latest
+
+ env:
+ REF: main # Used for curl to trigger image build
+
+ steps:
+ - name: Checkout vyos/vyos-build repo
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref_name }}
+
+ - uses: dorny/paths-filter@v3
+ id: changes
+ with:
+ filters: |
+ docker-dir:
+ - 'docker/**'
+
+ - name: "Trigger Docker image build for ${{ github.ref_name }}"
+ if: ${{ steps.changes.outputs.docker-dir == 'true' }}
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.PAT }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/${{ secrets.REMOTE_OWNER }}/${{ secrets.REMOTE_REUSE_REPO }}/actions/workflows/build-docker-image.yml/dispatches \
+ -d '{"ref": "${{ env.REF }}", "inputs":{"branch":"${{ github.ref_name }}", "environment":"production"}}' \ No newline at end of file
diff --git a/.github/workflows/trigger-pr-mirror-repo-sync.yml b/.github/workflows/trigger-pr-mirror-repo-sync.yml
new file mode 100644
index 00000000..7b4a241f
--- /dev/null
+++ b/.github/workflows/trigger-pr-mirror-repo-sync.yml
@@ -0,0 +1,18 @@
+name: Trigger Mirror PR and Repo Sync
+on:
+ pull_request_target:
+ types:
+ - closed
+ branches:
+ - current
+
+permissions:
+ pull-requests: write
+ contents: write
+ issues: write
+
+jobs:
+ call-trigger-mirror-pr-repo-sync:
+ if: github.repository_owner == 'vyos' && github.event.pull_request.merged == true
+ uses: vyos/.github/.github/workflows/trigger-pr-mirror-repo-sync.yml@current
+ secrets: inherit
diff --git a/.github/workflows/trigger_rebuild_packages.yml b/.github/workflows/trigger_rebuild_packages.yml
index 2be662e8..cfaf6d5c 100644
--- a/.github/workflows/trigger_rebuild_packages.yml
+++ b/.github/workflows/trigger_rebuild_packages.yml
@@ -23,8 +23,16 @@ jobs:
with:
base: ${{ github.ref_name }}
filters: |
+ amazon-cloudwatch-agent:
+ - 'scripts/package-build/amazon-cloudwatch-agent/**'
+ amazon-ssm-agent:
+ - 'scripts/package-build/amazon-ssm-agent/**'
aws-gwlbtun:
- 'scripts/package-build/aws-gwlbtun/**'
+ blackbox_exporter:
+ - 'scripts/package-build/blackbox_exporter/**'
+ bash-completion:
+ - 'scripts/package-build/bash-completion/**'
ddclient:
- 'scripts/package-build/ddclient/**'
dropbear:
@@ -33,6 +41,8 @@ jobs:
- 'scripts/package-build/ethtool/**'
frr:
- 'scripts/package-build/frr/**'
+ frr_exporter:
+ - 'scripts/package-build/frr_exporter/**'
hostap:
- 'scripts/package-build/hostap/**'
hsflowd:
@@ -43,6 +53,10 @@ jobs:
- 'scripts/package-build/kea/**'
keepalived:
- 'scripts/package-build/keepalived/**'
+ libnss-mapuser:
+ - 'scripts/package-build/libnss-mapuser/**'
+ libpam-radius-auth:
+ - 'scripts/package-build/libpam-radius-auth/**'
linux-kernel:
- 'data/defaults.toml'
- 'scripts/package-build/linux-kernel/**'
@@ -52,8 +66,8 @@ jobs:
- 'scripts/package-build/net-snmp/**'
netfilter:
- 'scripts/package-build/netfilter/**'
- opennhrp:
- - 'scripts/package-build/opennhrp/**'
+ node_exporter:
+ - 'scripts/package-build/node_exporter/**'
openvpn-otp:
- 'scripts/package-build/openvpn-otp/**'
owamp:
@@ -70,12 +84,18 @@ jobs:
- 'scripts/package-build/radvd/**'
strongswan:
- 'scripts/package-build/strongswan/**'
+ tacacs:
+ - 'scripts/package-build/tacacs/**'
telegraf:
- 'scripts/package-build/telegraf/**'
+ vpp:
+ - 'scripts/package-build/vpp/**'
waagent:
- 'scripts/package-build/waagent/**'
wide-dhcpv6:
- 'scripts/package-build/wide-dhcpv6/**'
+ xen-guest-agent:
+ - 'scripts/package-build/xen-guest-agent/**'
- name: Trigger builds for changed packages
run: |
@@ -93,10 +113,26 @@ jobs:
}
# Trigger builds based on detected changes
+ if [ "${{ steps.changes.outputs.amazon-cloudwatch-agent }}" == "true" ]; then
+ trigger_build "amazon-cloudwatch-agent"
+ fi
+
+ if [ "${{ steps.changes.outputs.amazon-ssm-agent }}" == "true" ]; then
+ trigger_build "amazon-ssm-agent"
+ fi
+
if [ "${{ steps.changes.outputs.aws-gwlbtun }}" == "true" ]; then
trigger_build "aws-gwlbtun"
fi
+ if [ "${{ steps.changes.outputs.bash-completion }}" == "true" ]; then
+ trigger_build "bash-completion"
+ fi
+
+ if [ "${{ steps.changes.outputs.blackbox_exporter }}" == "true" ]; then
+ trigger_build "blackbox_exporter"
+ fi
+
if [ "${{ steps.changes.outputs.ddclient }}" == "true" ]; then
trigger_build "ddclient"
fi
@@ -113,6 +149,10 @@ jobs:
trigger_build "frr"
fi
+ if [ "${{ steps.changes.outputs.frr_exporter }}" == "true" ]; then
+ trigger_build "frr_exporter"
+ fi
+
if [ "${{ steps.changes.outputs.hostap }}" == "true" ]; then
trigger_build "hostap"
fi
@@ -133,6 +173,14 @@ jobs:
trigger_build "keepalived"
fi
+ if [ "${{ steps.changes.outputs.libnss-mapuser }}" == "true" ]; then
+ trigger_build "libnss-mapuser"
+ fi
+
+ if [ "${{ steps.changes.outputs.libpam-radius-auth }}" == "true" ]; then
+ trigger_build "libpam-radius-auth"
+ fi
+
if [ "${{ steps.changes.outputs.linux-kernel }}" == "true" ]; then
trigger_build "linux-kernel"
fi
@@ -149,8 +197,8 @@ jobs:
trigger_build "netfilter"
fi
- if [ "${{ steps.changes.outputs.opennhrp }}" == "true" ]; then
- trigger_build "opennhrp"
+ if [ "${{ steps.changes.outputs.node_exporter }}" == "true" ]; then
+ trigger_build "node_exporter"
fi
if [ "${{ steps.changes.outputs.openvpn-otp }}" == "true" ]; then
@@ -185,10 +233,18 @@ jobs:
trigger_build "strongswan"
fi
+ if [ "${{ steps.changes.outputs.tacacs }}" == "true" ]; then
+ trigger_build "tacacs"
+ fi
+
if [ "${{ steps.changes.outputs.telegraf }}" == "true" ]; then
trigger_build "telegraf"
fi
+ if [ "${{ steps.changes.outputs.vpp }}" == "true" ]; then
+ trigger_build "vpp"
+ fi
+
if [ "${{ steps.changes.outputs.waagent }}" == "true" ]; then
trigger_build "waagent"
fi
@@ -196,3 +252,7 @@ jobs:
if [ "${{ steps.changes.outputs.wide-dhcpv6 }}" == "true" ]; then
trigger_build "ethtool"
fi
+
+ if [ "${{ steps.changes.outputs.xen-guest-agent }}" == "true" ]; then
+ trigger_build "xen-guest-agent"
+ fi
diff --git a/.gitignore b/.gitignore
index e3724a9f..6de027c6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,11 +1,12 @@
+.build/config
build/*
+config/*
*.pyc
packer_build/*
packer_cache/*
key/*
packages/*
!packages/*/
-data/live-build-config/includes.chroot/var/lib/shim-signed/mok/*
/testinstall*.img
/testinstall*.efivars
/*.qcow2
diff --git a/CODEOWNERS b/CODEOWNERS
index cf2ba0ac..0bf2e6d7 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -1 +1,2 @@
-* @vyos/reviewers
+# Users from reviewers github team
+# * @vyos/reviewers
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3ff00df8..2f57a272 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -40,7 +40,7 @@ task first. Once there is an entry in Phabricator, you should reference its id
in your commit message, as shown below:
* `ddclient: T1030: auto create runtime directories`
-* `Jenkins: add current Git commit ID to build description`
+* `keepalived: T1234: do not autostart service, will be done by CLI`
If there is no [Phabricator](https://vyos.dev) reference in the
commits of your pull request, we have to ask you to amend the commit message.
diff --git a/Jenkinsfile b/Jenkinsfile
deleted file mode 100644
index 87e02ccc..00000000
--- a/Jenkinsfile
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2019-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-setDescription()
-
-node('Docker') {
- stage('Setup Container') {
- script {
- // create container name on demand
- def branchName = getGitBranchName()
- // Adjust PR target branch name so we can re-map it to the proper Docker image.
- if (isPullRequest())
- branchName = env.CHANGE_TARGET.toLowerCase()
- if (branchName.equals('master'))
- branchName = 'current'
-
- env.DOCKER_IMAGE = 'vyos/vyos-build:' + branchName
-
- // Get the current UID and GID from the jenkins agent to allow use of the same UID inside Docker
- env.USR_ID = sh(returnStdout: true, script: 'id -u').toString().trim()
- env.GRP_ID = sh(returnStdout: true, script: 'id -g').toString().trim()
- env.DOCKER_ARGS = '--privileged --sysctl net.ipv6.conf.lo.disable_ipv6=0 -e GOSU_UID=' + env.USR_ID + ' -e GOSU_GID=' + env.GRP_ID
- env.BASE_VERSION = '1.5-rolling-'
- }
- }
-}
-
-pipeline {
- agent {
- docker {
- label "Docker"
- args "${env.DOCKER_ARGS}"
- image "${env.DOCKER_IMAGE}"
- alwaysPull true
- reuseNode true
- }
- }
- parameters {
- string(name: 'BUILD_BY', defaultValue: 'autobuild@vyos.net', description: 'Builder identifier (e.g. jrandomhacker@example.net)')
- string(name: 'BUILD_VERSION', defaultValue: env.BASE_VERSION + 'ISO8601-TIMESTAMP', description: 'Version number (release builds only)')
- booleanParam(name: 'BUILD_PUBLISH', defaultValue: false, description: 'Publish this build AWS S3')
- booleanParam(name: 'BUILD_SNAPSHOT', defaultValue: false, description: 'Upload image to AWS S3 snapshot bucket')
- booleanParam(name: 'TEST_SMOKETESTS', defaultValue: true, description: 'Run Smoketests after ISO build')
- booleanParam(name: 'TEST_RAID1', defaultValue: true, description: 'Perform RAID1 installation tests')
- }
- options {
- disableConcurrentBuilds()
- timeout(time: 240, unit: 'MINUTES')
- timestamps()
- buildDiscarder(logRotator(numToKeepStr: '20'))
- }
- stages {
- stage('Build ISO') {
- when {
- beforeOptions true
- beforeAgent true
- // Only run ISO image build process of explicit user request or
- // once a night triggered by the timer.
- anyOf {
- triggeredBy 'TimerTrigger'
- triggeredBy cause: "UserIdCause"
- }
- }
- environment {
- PYTHONDONTWRITEBYTECODE = 1
- }
- steps {
- script {
- // Display Git commit Id used with the Jenkinsfile on the Job "Build History" pane
- def commitId = sh(returnStdout: true, script: 'git rev-parse --short=11 HEAD').trim()
- currentBuild.description = sprintf('Git SHA1: %s', commitId[-11..-1])
-
- def CUSTOM_PACKAGES = ''
- if (params.TEST_SMOKETESTS)
- CUSTOM_PACKAGES = '--custom-package vyos-1x-smoketest'
-
- def VYOS_VERSION = params.BUILD_VERSION
- if (params.BUILD_VERSION == env.BASE_VERSION + 'ISO8601-TIMESTAMP')
- VYOS_VERSION = env.BASE_VERSION + sh(returnStdout: true, script: 'date -u +%Y%m%d%H%M').toString().trim()
-
- sh """
- sudo --preserve-env ./build-vyos-image \
- --build-by "${params.BUILD_BY}" \
- --debian-mirror http://deb.debian.org/debian/ \
- --build-type release \
- --version "${VYOS_VERSION}" ${CUSTOM_PACKAGES} generic
- """
-
- if (fileExists('build/live-image-amd64.hybrid.iso') == false) {
- error('ISO build error')
- }
- }
- }
- }
- stage('Smoketests for RAID-1 system installation') {
- when {
- expression { fileExists 'build/live-image-amd64.hybrid.iso' }
- expression { return params.TEST_RAID1 }
- }
- steps {
- sh "sudo make testraid"
- }
- }
- stage('Smoketests for TPM config encryption') {
- when {
- expression { fileExists 'build/live-image-amd64.hybrid.iso' }
- }
- steps {
- sh "sudo make testtpm"
- }
- }
- stage('Smoketests') {
- when {
- expression { return params.TEST_SMOKETESTS }
- }
- parallel {
- stage('CLI validation') {
- when {
- expression { fileExists 'build/live-image-amd64.hybrid.iso' }
- }
- steps {
- sh "sudo make test"
- }
- }
- stage('vyos-configd and arbitrary config loader') {
- when {
- expression { fileExists 'build/live-image-amd64.hybrid.iso' }
- }
- steps {
- sh "sudo make testc"
- }
- }
- }
- }
- }
- post {
- success {
- script {
- // only deploy ISO if build from official repository
- if (isCustomBuild())
- return
-
- // always store local artifacts
- archiveArtifacts artifacts: '**/build/vyos-*.iso, **/build/vyos-*.qcow2',
- allowEmptyArchive: true
-
- // only deploy ISO if requested via parameter
- if (!params.BUILD_PUBLISH)
- return
-
- files = findFiles(glob: 'build/vyos*.iso')
- // Publish ISO image to daily builds bucket
- if (files) {
- // Publish ISO image to snapshot bucket
- if (files && params.BUILD_SNAPSHOT) {
- withAWS(region: 'us-east-1', credentials: 's3-vyos-downloads-rolling-rw') {
- s3Upload(bucket: 's3-us.vyos.io', path: 'snapshot/' + params.BUILD_VERSION + '/', workingDir: 'build', includePathPattern: 'vyos*.iso',
- cacheControl: "public, max-age=2592000")
- }
- } else {
- // Publish build result to AWS S3 rolling bucket
- withAWS(region: 'us-east-1', credentials: 's3-vyos-downloads-rolling-rw') {
- s3Upload(bucket: 's3-us.vyos.io', path: 'rolling/' + getGitBranchName() + '/',
- workingDir: 'build', includePathPattern: 'vyos*.iso')
- s3Copy(fromBucket: 's3-us.vyos.io', fromPath: 'rolling/' + getGitBranchName() + '/' + files[0].name,
- toBucket: 's3-us.vyos.io', toPath: 'rolling/' + getGitBranchName() + '/vyos-rolling-latest.iso')
- }
- }
-
- // Trigger GitHub action which will re-build the static community website which
- // also holds the AWS download links to the generated ISO images
- withCredentials([string(credentialsId: 'vyos.net-build-trigger-token', variable: 'TOKEN')]) {
- sh '''
- curl -X POST --header "Accept: application/vnd.github.v3+json" \
- --header "authorization: Bearer $TOKEN" --data '{"ref": "production"}' \
- https://api.github.com/repos/vyos/community.vyos.net/actions/workflows/main.yml/dispatches
- '''
- }
- }
-
- // Publish ISO image to snapshot bucket
- if (files && params.BUILD_SNAPSHOT) {
- withAWS(region: 'us-east-1', credentials: 's3-vyos-downloads-rolling-rw') {
- s3Upload(bucket: 's3-us.vyos.io', path: 'snapshot/',
- workingDir: 'build', includePathPattern: 'vyos*.iso')
- }
- }
- }
- }
- failure {
- archiveArtifacts artifacts: '**/build/vyos-*.iso, **/build/vyos-*.qcow2',
- allowEmptyArchive: true
- }
- cleanup {
- echo 'One way or another, I have finished'
- // the 'build' directory got elevated permissions during the build
- // cdjust permissions so it can be cleaned up by the regular user
- sh 'sudo make purge'
- deleteDir() /* cleanup our workspace */
- }
- }
-}
diff --git a/Jenkinsfile.docker b/Jenkinsfile.docker
deleted file mode 100644
index 2cac4548..00000000
--- a/Jenkinsfile.docker
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2019-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-setDescription()
-
-pipeline {
- agent none
- environment {
- GIT_BRANCH_NAME = getGitBranchName()
- }
- options {
- disableConcurrentBuilds()
- timeout(time: 240, unit: 'MINUTES')
- timestamps()
- buildDiscarder(logRotator(numToKeepStr: '20'))
- }
- stages {
- stage('Build containers') {
- when {
- beforeOptions true
- beforeAgent true
- // Only run ISO image build process of explicit user request or
- // once a night triggered by the timer.
- anyOf {
- changeset pattern: "**/docker/*"
- changeset pattern: "**/Jenkinsfile.docker"
- triggeredBy cause: "UserIdCause"
- }
- }
- parallel {
- stage('x86_64') {
- agent {
- label "ec2_amd64"
- }
- steps {
- script {
- DOCKER_IMAGE_AMD64 = "vyos/vyos-build:" + env.GIT_BRANCH_NAME
- sh "docker build --no-cache --tag ${DOCKER_IMAGE_AMD64} docker"
- if (! isCustomBuild()) {
- withDockerRegistry([credentialsId: "DockerHub"]) {
- sh "docker push ${DOCKER_IMAGE_AMD64}"
- }
- }
- }
- }
- }
- stage('arm64') {
- agent {
- label "ec2_arm64"
- }
- steps {
- script {
- DOCKER_IMAGE_ARM64 = "vyos/vyos-build:" + env.GIT_BRANCH_NAME + "-arm64"
- sh "docker build --no-cache --tag ${DOCKER_IMAGE_ARM64} --build-arg ARCH=arm64v8/ docker"
- if (! isCustomBuild()) {
- withDockerRegistry([credentialsId: "DockerHub"]) {
- sh "docker push ${DOCKER_IMAGE_ARM64}"
- }
- }
- }
- }
- }
- }
- }
- }
-}
diff --git a/Makefile b/Makefile
index 2ff776c0..a11e88b5 100644
--- a/Makefile
+++ b/Makefile
@@ -21,17 +21,27 @@ checkiso:
.PHONY: test
.ONESHELL:
test: checkiso
- scripts/check-qemu-install --debug --configd --match="$(MATCH)" --smoketest --uefi build/live-image-amd64.hybrid.iso $(filter-out $@,$(MAKECMDGOALS))
+ scripts/check-qemu-install --debug --configd --match="$(MATCH)" --smoketest --uefi --cpu 4 --memory 8 build/live-image-amd64.hybrid.iso $(filter-out $@,$(MAKECMDGOALS))
.PHONY: test-no-interfaces
.ONESHELL:
test-no-interfaces: checkiso
- scripts/check-qemu-install --debug --configd --match="$(MATCH)" --smoketest --uefi --no-interfaces build/live-image-amd64.hybrid.iso
+ scripts/check-qemu-install --debug --configd --smoketest --uefi --no-interfaces --cpu 4 --memory 8 build/live-image-amd64.hybrid.iso
+
+.PHONY: test-interfaces
+.ONESHELL:
+test-interfaces: checkiso
+ scripts/check-qemu-install --debug --configd --match="interfaces_" --smoketest --uefi build/live-image-amd64.hybrid.iso
+
+.PHONY: test-vpp
+.ONESHELL:
+test-vpp: checkiso
+ scripts/check-qemu-install --debug --configd --match="vpp" --smoketest --uefi --cpu 4 --memory 8 build/live-image-amd64.hybrid.iso
.PHONY: testc
.ONESHELL:
testc: checkiso
- scripts/check-qemu-install --debug --configd --configtest build/live-image-amd64.hybrid.iso $(filter-out $@,$(MAKECMDGOALS))
+ scripts/check-qemu-install --debug --configd --cpu 2 --memory 7 --configtest build/live-image-amd64.hybrid.iso $(filter-out $@,$(MAKECMDGOALS))
.PHONY: testraid
.ONESHELL:
@@ -69,7 +79,7 @@ clean:
rm -f config/binary config/bootstrap config/chroot config/common config/source
rm -f build.log
rm -f vyos-*.iso
- rm -f *.img
+ rm -f *.img *.efivars
rm -f *.xz
rm -f *.vhd
rm -f *.raw
diff --git a/README.md b/README.md
index ea67445b..0a1fd85f 100644
--- a/README.md
+++ b/README.md
@@ -33,15 +33,13 @@ There are several directories with their own purpose:
* `build/` Used for temporary files used for the build and for build artifacts
* `data/` Data required for building the ISO (e.g. boot splash/configs)
- * `packages/` This directory has two meanings. First it can hold arbitrary *.deb
- packages which will be embeded into the resulting ISO, but it also
- holds Jenkins Pipeline definitions for required VyOS packages.
+ * `packages/` This directory can hold arbitrary *.deb
+ packages which will be embeded into the resulting ISO.
Among other things those packages will be: Linux Kernel, FRR,
Netfiler...
* `scripts/` Scripts that are used for the build process
* `tools/` Scripts that are used for maintainer's tasks automation and other
purposes, but not during ISO build process
- * `vars/` Jenkins Pipeline library for reusable functions
# Building VyOS
diff --git a/data/architectures/amd64.toml b/data/architectures/amd64.toml
index e85b4158..8676ad2f 100644
--- a/data/architectures/amd64.toml
+++ b/data/architectures/amd64.toml
@@ -1,15 +1,21 @@
-additional_repositories = [
- "deb [arch=amd64] https://repo.saltproject.io/py3/debian/11/amd64/3005 bullseye main"
-]
-
# Packages added to images for x86 by default
packages = [
"grub2",
"grub-pc",
+ "vyos-drivers-realtek-r8152",
"vyos-linux-firmware",
"vyos-intel-qat",
"vyos-intel-ixgbe",
"vyos-intel-ixgbevf",
- "mlnx-ofed-kernel-modules",
- "mlnx-tools",
+ "vyos-ipt-netflow",
+ "intel-microcode",
+ "amd64-microcode"
]
+
+[additional_repositories.salt]
+ architecture = "amd64"
+ url = "https://packages.vyos.net/saltproject/debian/11/amd64/3005"
+ distribution = "bullseye"
+
+[additional_repositories.zabbix]
+ url = "https://repo.zabbix.com/zabbix/6.0/debian"
diff --git a/data/architectures/arm64.toml b/data/architectures/arm64.toml
index 228d0f3f..4d8596c6 100644
--- a/data/architectures/arm64.toml
+++ b/data/architectures/arm64.toml
@@ -1,9 +1,13 @@
-additional_repositories = [
- "deb [arch=arm64] https://repo.saltproject.io/py3/debian/11/arm64/3005 bullseye main"
-]
-
# Packages included in ARM64 images by default
packages = [
"grub-efi-arm64",
]
bootloaders = "grub-efi"
+
+[additional_repositories.salt]
+ architecture = "arm64"
+ url = "https://packages.vyos.net/saltproject/debian/11/arm64/3005"
+ distribution = "bullseye"
+
+[additional_repositories.zabbix]
+ url = "https://repo.zabbix.com/zabbix/6.0/debian-arm64"
diff --git a/data/architectures/armhf.toml b/data/architectures/armhf.toml
index de5e62f4..8cf2d763 100644
--- a/data/architectures/armhf.toml
+++ b/data/architectures/armhf.toml
@@ -1,5 +1,5 @@
additional_repositories = [
- "deb [arch=armhf] https://repo.saltproject.io/py3/debian/11/armhf/3005 bullseye main"
+ "deb [arch=armhf] https://packages.vyos.net/saltproject/debian/11/arm64/3005 bullseye main"
]
# Packages included in armhf images by default
diff --git a/data/build-flavors/README.md b/data/build-flavors/README.md
deleted file mode 100644
index 19716255..00000000
--- a/data/build-flavors/README.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# VyOS build flavors
-
-VyOS supports multiple different hardware and virtual platforms.
-Those platforms often need custom packages and may require custom
-configs. To make maintenance of existing flavors simpler
-and to allow everyone to make and maintain their own flavors,
-the build scripts support storing flavor configuration in [TOML](https://toml.io) files.
-
-Flavor files must be in `data/build-flavors`. Here's an example:
-
-```toml
-# Generic (aka "universal") ISO image
-
-image_format = "iso"
-
-# Include these packages in the image regardless of the architecture
-packages = [
- # QEMU and Xen guest tools exist for multiple architectures
- "qemu-guest-agent",
- "vyos-xe-guest-utilities",
-]
-
-[architectures.amd64]
- # Hyper-V and VMware guest tools are x86-only
- packages = ["hyperv-daemons", "vyos-1x-vmware"]
-```
-
-## Image format
-
-The `image_format` option specifies the default format to build.
-
-```toml
-image_format = "iso"
-```
-
-**Note:** currently, ISO is the only supported format,
-support for different flavors is in progress.
-
-## Including custom packages
-
-If you want the build scripts to include custom packages from repositories
-in the image, you can list them in the `packages` field.
-
-For example, this is how to include the GNU Hello package:
-
-```toml
-packages = ['hello']
-```
-
-It's possible to include packages only in images with certain build architectures
-by placing them in a subtable.
-
-If you want to include GNU Hello only in AMD64 images, do this:
-
-```toml
-[architectures.amd64]
- packages = ['hello']
-```
-
-## Including custom files
-
-You can include files inside the SquashFS filesystem by adding entries
-to the `includes_chroot` array.
-
-```toml
-[[includes_chroot]]
- path = "etc/question.txt"
- data = '''
-Can you guess how this file ended up in the image?
- '''
-
- path = "etc/answer.txt"
- data = '''
-It was in the flavor file!
- '''
-```
diff --git a/data/build-flavors/aws-iso.toml b/data/build-flavors/aws-iso.toml
deleted file mode 100644
index e13ed59a..00000000
--- a/data/build-flavors/aws-iso.toml
+++ /dev/null
@@ -1,3 +0,0 @@
-image_format = "iso"
-
-packages = ["amazon-cloudwatch-agent"]
diff --git a/data/build-flavors/azure-iso.toml b/data/build-flavors/azure-iso.toml
deleted file mode 100644
index b4774483..00000000
--- a/data/build-flavors/azure-iso.toml
+++ /dev/null
@@ -1,5 +0,0 @@
-image_format = "iso"
-
-packages = ["waagent"]
-
-
diff --git a/data/build-flavors/dell-vep1400.toml b/data/build-flavors/dell-vep1400.toml
deleted file mode 100644
index 038cbe24..00000000
--- a/data/build-flavors/dell-vep1400.toml
+++ /dev/null
@@ -1,116 +0,0 @@
-# ISO image for Dell VEP4600 devices
-
-image_format = "iso"
-
-# Replace built-in NIC naming rules with empty files
-# to prevent them from taking any effect
-
-[[includes_chroot]]
- path = "lib/systemd/network/99-default.link"
- data = ''
-
-[[includes_chroot]]
- path = "lib/udev/rules.d/65-vyatta-net.rules"
- data = ''
-
-[[includes_chroot]]
- path = "lib/udev/rules.d/75-persistent-net-generator.rules"
- data = ''
-
-# Install platform-specific link files
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth0-000.link"
- data = '''
-[Match]
-Path=pci-0000:05:00.1
-Driver=ixgbe
-
-[Link]
-Name=eth0
-Alias=SFP+1
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth1-000.link"
- data = '''
-[Match]
-Path=pci-0000:05:00.0
-Driver=ixgbe
-
-[Link]
-Name=eth1
-Alias=SFP+2
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth2-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.2
-Driver=igb
-
-[Link]
-Name=eth2
-Alias=GE1
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth3-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.3
-Driver=igb
-
-[Link]
-Name=eth3
-Alias=GE2
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth4-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.0
-Driver=igb
-
-[Link]
-Name=eth4
-Alias=GE3
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth5-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.1
-Driver=igb
-
-[Link]
-Name=eth5
-Alias=GE4
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth6-000.link"
- data = '''
-[Match]
-Path=pci-0000:07:00.1
-Driver=ixgbe
-
-[Link]
-Name=eth6
-Alias=GE5
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth7-000.link"
- data = '''
-[Match]
-Path=pci-0000:07:00.0
-Driver=ixgbe
-
-[Link]
-Name=eth7
-Alias=GE6
-'''
diff --git a/data/build-flavors/dell-vep4600.toml b/data/build-flavors/dell-vep4600.toml
deleted file mode 100644
index 50fdb4d2..00000000
--- a/data/build-flavors/dell-vep4600.toml
+++ /dev/null
@@ -1,267 +0,0 @@
-# ISO image for Dell VEP4600 devices
-
-image_format = "iso"
-
-# Replace built-in NIC naming rules with empty files
-# to prevent them from taking any effect
-
-[[includes_chroot]]
- path = "lib/systemd/network/99-default.link"
- data = ''
-
-[[includes_chroot]]
- path = "lib/udev/rules.d/65-vyatta-net.rules"
- data = ''
-
-[[includes_chroot]]
- path = "lib/udev/rules.d/75-persistent-net-generator.rules"
- data = ''
-
-# Install platform-specific link files
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth0-000.link"
- data = '''
-[Match]
-Path=pci-0000:04:00.0
-Driver=igb
-
-[Link]
-Name=eth0
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth0-001.link"
- data = '''
-[Match]
-Path=pci-0000:05:00.0
-Driver=igb
-
-[Link]
-Name=eth0
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth10-000.link"
- data = '''
-[Match]
-Path=pci-0000:65:00.3
-Driver=i40e
-
-[Link]
-Name=eth10
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth1-000.link"
- data = '''
-[Match]
-Path=pci-0000:b8:00.0
-Driver=i40e
-
-[Link]
-Name=eth1
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth1-001.link"
- data = '''
-[Match]
-Path=pci-0000:b6:00.0
-Driver=i40e
-
-[Link]
-Name=eth1
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth11-000.link"
- data = '''
-[Match]
-Path=pci-0000:17:00.0
-Driver=i40e
-
-[Link]
-Name=eth11
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth12-000.link"
- data = '''
-[Match]
-Path=pci-0000:17:00.1
-Driver=i40e
-
-[Link]
-Name=eth12
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth13-000.link"
- data = '''
-[Match]
-Path=pci-0000:17:00.2
-Driver=i40e
-
-[Link]
-Name=eth13
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth14-000.link"
- data = '''
-[Match]
-Path=pci-0000:17:00.3
-Driver=i40e
-
-[Link]
-Name=eth14
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth2-000.link"
- data = '''
-[Match]
-Path=pci-0000:b8:00.1
-Driver=i40e
-
-[Link]
-Name=eth2
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth2-001.link"
- data = '''
-[Match]
-Path=pci-0000:b6:00.1
-Driver=i40e
-
-[Link]
-Name=eth2
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth3-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.1
-Driver=igb
-
-[Link]
-Name=eth3
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth4-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.0
-Driver=igb
-
-[Link]
-Name=eth4
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth5-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.3
-Driver=igb
-
-[Link]
-Name=eth5
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth6-000.link"
- data = '''
-[Match]
-Path=pci-0000:02:00.2
-Driver=igb
-
-[Link]
-Name=eth6
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth7-000.link"
- data = '''
-[Match]
-Path=pci-0000:65:00.0
-Driver=i40e
-
-[Link]
-Name=eth7
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth8-000.link"
- data = '''
-[Match]
-Path=pci-0000:65:00.1
-Driver=i40e
-
-[Link]
-Name=eth8
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-eth9-000.link"
- data = '''
-[Match]
-Path=pci-0000:65:00.2
-Driver=i40e
-
-[Link]
-Name=eth9
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-unused0-000.link"
- data = '''
-[Match]
-Path=pci-0000:b8:00.2
-Driver=i40e
-
-[Link]
-Name=unused0
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-unused0-001.link"
- data = '''
-[Match]
-Path=pci-0000:b6:00.2
-Driver=i40e
-
-[Link]
-Name=unused0
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-unused1-000.link"
- data = '''
-[Match]
-Path=pci-0000:b8:00.3
-Driver=i40e
-
-[Link]
-Name=unused1
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/10-unused1-001.link"
- data = '''
-[Match]
-Path=pci-0000:b6:00.3
-Driver=i40e
-
-[Link]
-Name=unused1
-'''
-
-[[includes_chroot]]
- path = "etc/systemd/network/99-default.link"
- data = '''
-'''
diff --git a/data/build-flavors/edgecore.toml b/data/build-flavors/edgecore.toml
deleted file mode 100644
index efea4863..00000000
--- a/data/build-flavors/edgecore.toml
+++ /dev/null
@@ -1,46 +0,0 @@
-# ISO image for EdgeCore routers
-
-image_format = "iso"
-
-# udev rules for correct ordering of onboard NICs
-[[includes_chroot]]
- path = "lib/udev/rules.d/64-vyos-SAF51015I-net.rules"
- data = '''
-ATTR{[dmi/id]board_name}!="SAF51015I-0318-EC", GOTO="end_ec_nic"
-
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:02:00.0", ENV{VYOS_IFNAME}="eth1"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:03:00.0", ENV{VYOS_IFNAME}="eth2"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:04:00.0", ENV{VYOS_IFNAME}="eth3"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:05:00.0", ENV{VYOS_IFNAME}="eth4"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:06:00.0", ENV{VYOS_IFNAME}="eth5"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:06:00.1", ENV{VYOS_IFNAME}="eth6"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:06:00.2", ENV{VYOS_IFNAME}="eth7"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:06:00.3", ENV{VYOS_IFNAME}="eth8"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:0a:00.0", ENV{VYOS_IFNAME}="eth9"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:0a:00.1", ENV{VYOS_IFNAME}="eth10"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:0b:00.0", ENV{VYOS_IFNAME}="eth11"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:0b:00.1", ENV{VYOS_IFNAME}="eth12"
-
-LABEL="end_ec_nic"
-
-'''
-
-[[includes_chroot]]
- path = "lib/udev/rules.d/64-vyos-SAF51003I-net.rules"
- data = '''
-ATTR{[dmi/id]board_name}!="SAF51003I", GOTO="end_ec_nic"
-
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:02:00.0", ENV{VYOS_IFNAME}="eth1", ATTR{ifalias}="LAN1"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:02:00.1", ENV{VYOS_IFNAME}="eth2", ATTR{ifalias}="LAN2"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:02:00.2", ENV{VYOS_IFNAME}="eth3", ATTR{ifalias}="LAN3"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:02:00.3", ENV{VYOS_IFNAME}="eth4", ATTR{ifalias}="LAN4"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:05:00.0", ENV{VYOS_IFNAME}="eth5", ATTR{ifalias}="LAN5"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:05:00.1", ENV{VYOS_IFNAME}="eth6", ATTR{ifalias}="LAN6"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:05:00.2", ENV{VYOS_IFNAME}="eth7", ATTR{ifalias}="LAN7"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:05:00.3", ENV{VYOS_IFNAME}="eth8", ATTR{ifalias}="LAN8"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:08:00.0", ENV{VYOS_IFNAME}="eth9", ATTR{ifalias}="DMZ"
-ACTION=="add", SUBSYSTEM=="net", KERNELS=="0000:08:00.1", ENV{VYOS_IFNAME}="eth10", ATTR{ifalias}="WAN"
-
-LABEL="end_ec_nic"
-
-'''
diff --git a/data/build-flavors/generic.toml b/data/build-flavors/generic.toml
index 9bf7044d..f49a83c8 100644
--- a/data/build-flavors/generic.toml
+++ b/data/build-flavors/generic.toml
@@ -1,14 +1,3 @@
# Generic (aka "universal") ISO image
image_format = "iso"
-
-# Include these packages in the image regardless of the architecture
-packages = [
- # QEMU and Xen guest tools exist for multiple architectures
- "qemu-guest-agent",
- "vyos-xe-guest-utilities",
-]
-
-[architectures.amd64]
- # Hyper-V and VMware guest tools are x86-only
- packages = ["hyperv-daemons", "vyos-1x-vmware"]
diff --git a/data/build-flavors/xcpng.toml b/data/build-flavors/xcpng.toml
deleted file mode 100644
index 3a04177f..00000000
--- a/data/build-flavors/xcpng.toml
+++ /dev/null
@@ -1,6 +0,0 @@
-# Installation ISO for the XCP-ng virtualization platform
-
-image_format = "iso"
-
-# Include these packages in the image
-packages = ["xe-guest-utilities"]
diff --git a/data/build-types/development.toml b/data/build-types/development.toml
index f0207a6a..16ed483e 100644
--- a/data/build-types/development.toml
+++ b/data/build-types/development.toml
@@ -6,3 +6,73 @@ packages = [
"vim",
"vyos-1x-smoketest"
]
+
+[[includes_chroot]]
+ path = 'usr/share/vyos/EULA'
+ data = '''
+VyOS ROLLING RELEASE END USER LICENSE AGREEMENT
+
+PLEASE READ THIS AGREEMENT CAREFULLY BEFORE INSTALLING AND/OR USING VyOS ROLLING RELEASE.
+IF YOU DO NOT ACCEPT THE TERMS OF THIS AGREEMENT, THEN YOU MUST NOT USE VyOS ROLLING RELEASE.
+
+I. This End-User License Agreement (“Agreement”) is a legal document between you and VyOS Inc.
+(a company organized and existing under the laws of California,
+having its registered office at 12585 Kirkham Ct, Suite 1, Poway, California 92604)
+that governs your use of VyOS Rolling Release, available at vyos.io website.
+
+II. By downloading, installing and using VyOS Rolling Release you:
+- irrevocably agree to comply with all applicable laws, restrictions,
+ regulations, rules, the GNU GPL and other applicable licenses, and with this Agreement;
+- confirm you have all legal rights to enter into this Agreement
+ and your authority is not limited by any legal means;
+- obligate to certainly, indisputably and immediately
+ (but in any case at the first request of the VyOS Inc.)
+ compensate for any damage, if such is caused to the VyOS Inc. by your actions;
+- assure and enforce any third party you grant access to Rolling Release
+ will bear the same amount of obligations.
+ For the purpose of this Agreement such third party will be referred to also as “you”.
+
+III. VyOS Rolling Release (“Rolling Release”) are copyrighted works
+released under the terms of the GNU General Public License (GPL)
+and other licenses approved by the Open Source Initiative (www.opensource.org),
+(hereinafter, the “Public Licenses”).
+Verbatim copies of such works may be made and distributed, by anyone,
+in accordance with the terms of the GPL and the Public Licenses.
+The GPL and the Public Licenses also grant you certain rights
+to make and distribute derivative works based on the source code to Rolling Release.
+
+You can redistribute and/or modify the Rolling Release under the terms of the GPL and the Public Licenses.
+You may obtain a copy of the source code corresponding to the binaries for the Rolling Release
+from public Git repositories as https://github.com/vyos
+
+The GPL and the Public Licenses do not grant you any right, license or interest to use “VyOS” trademarks and logos,
+that are trademarks or registered trademarks in the US, EU and other countries,
+in connection with these derivative works.
+VyOS trademarks may not be used in connection with any such derivative works
+unless that usage is explicitly and specifically permitted, in writing.
+Otherwise, You must modify the files identifiable as VyOS logos and VyOS trademarks
+so as to remove all use of images containing them.
+Note that mere deletion of these files may corrupt the Rolling Release.
+
+IV. Under no circumstances VyOS Inc. will be liable to you for any damages,
+however caused or arising in any way out of the use of
+or of inability to use the Rolling Release.
+VyOS Inc. provides no warranty for Rolling Release.
+
+V. This Agreement comes into force upon your acceptance in the form of downloading,
+installing or using Rolling Release (whatever happens first) and remains valid until termination.
+This Agreement shall terminate immediately if you violate any applicable law,
+restriction, regulation, rule, GPL or other applicable license, or any provision of this Agreement.
+Upon termination of this Agreement you shall discontinue to use Rolling Release
+and delete it as well as all copies you made from all storage devices.
+
+VI. This Agreement may be amended by VyOS Inc. at any time and brought to your attention
+by publication on vyos.io website with enter into force immediately after such publication.
+
+VII. This Agreement, and any dispute or claim arising out of or in connection with it,
+shall be governed by, and construed in accordance with the laws of California.
+The courts of California shall have exclusive jurisdiction to settle any dispute or claim.
+
+For more information or any other query please contact VyOS Inc. at: legal@vyos.io
+
+'''
diff --git a/data/build-types/release.toml b/data/build-types/release.toml
index e69de29b..5b460be9 100644
--- a/data/build-types/release.toml
+++ b/data/build-types/release.toml
@@ -0,0 +1,441 @@
+[[includes_chroot]]
+ path = 'usr/share/vyos/EULA'
+ data = '''
+VyOS END USER LICENSE AGREEMENT
+
+PLEASE READ THIS END USER LICENSE AGREEMENT (EULA, THIS ‘AGREEMENT') CAREFULLY
+BEFORE USING VYOS FROM US. BY USING VYOS, YOU ("YOU", "LICENSEE", "CUSTOMER")
+SIGNIFY YOUR ASSENT TO AND ACCEPTANCE OF THIS END USER LICENSE AGREEMENT AND
+ACKNOWLEDGE YOU HAVE READ AND UNDERSTAND THE TERMS. THIS AGREEMENT IS
+ENFORCEABLE AGAINST ANY PERSON OR ENTITY THAT USES THE SOFTWARE AND ANY PERSON
+OR ENTITY (E.G., SYSTEMS INTEGRATOR, CONSULTANT OR CONTRACTOR) THAT USES THE
+SOFTWARE ON ANOTHER PERSON'S OR ENTITY'S BEHALF. IF YOU DO NOT ACCEPT THE TERMS
+OF THIS AGREEMENT, THEN YOU MUST NOT USE THE SOFTWARE. THE EFFECTIVE DATE OF
+THIS AGREEMENT IS THE EARLIEST OF THE START DATE OF SERVICES STATED IN OUR
+INVOICE, PREVIOUS ACCEPTANCE OF THIS AGREEMENT (OR OUR BUSINESS PARTNER'S ORDER
+OR/AND INVOICE, PREVIOUS ACCEPTANCE OF THIS AGREEMENT) OR THE DATE THAT
+CUSTOMER HAS ACCESS AND IS ABLE TO USE OUR PRODUCTS OR SERVICES. THIS END USER
+LICENSE AGREEMENT DOES NOT COVER ANY SERVICES FROM US, OR THROUGH OUR BUSINESS
+PARTNER, OTHER THAN ACCESS TO THE SOFTWARE, SUCH AS TECHNICAL SUPPORT, UPGRADES
+OR SUPPORT SERVICES. PLEASE REVIEW YOUR SERVICES OR SUBSCRIPTION AGREEMENT(S)
+THAT YOU MAY HAVE WITH US OR OTHER AUTHORIZED VYOS SERVICES PROVIDER OR
+BUSINESS PARTNER REGARDING THE SOFTWARE AND SERVICES AND ASSOCIATED PAYMENTS.
+
+1. Definitions
+
+1.1 "We, Our, Us" means VyOS Contracting Entity defined in Section 13.
+
+1.2 "VyOS" or "Software" means VyOS software provided by Us (or authorized
+services provider or business partner) and consisting of VyOS software
+application (exclusively or along with any third-party software included
+therein or therewith) that includes or refers to this Agreement and any related
+documentation (including, without limitation, user and technical documentation,
+further explanatory written materials related to the Software, etc.), services
+(including, without limitation, SaaS, internet-based service, etc.), tool,
+application, component, object code, source code, appearance (including,
+without limitation, images, designs, fonts, etc.), structure as well as any
+modification
+and update thereof, regardless of the delivery mechanism.
+
+"Services" means software support services and any other services provided by
+Us, or through Our Business Partner, on a subscription basis.
+
+1.3 "Authorized Users" means employees or individual contractors to whom,
+pursuant to this Agreement, the Licensee has granted a right to access and use
+the Software with your credentials, provided that such access shall be for your
+sole benefit and in full compliance with this EULA.
+
+All Authorized Users are bound by the terms of this Agreement.
+
+1.4 "Cloud Provider" means authorized hosting partner's cloud marketplace
+platform, a company that delivers cloud computing based services, resources and
+solutions to businesses and/or offers solutions via the cloud marketplace.
+
+1.5 "Business Partner" shall mean Our authorized sales agent, partner, Cloud
+Provider reseller or distributor of the Software and Our Services authorized to
+sell Software and Services via our subscriptions. Purchases through or by a
+Business Partner. In instances where Customer purchases through a Business
+Partner, final prices and terms and conditions of sale will be as agreed
+between Customer and the Business Partner from which Customer makes such
+purchases; however, the terms set forth in this EULA are applicable to
+Customer's use and the performance of VyOS. Customer acknowledges that:
+(a) We may share information with the Business Partner related to Customer's
+ use and consumption of VyOS, and vice versa, for account management and
+ billing purposes;
+(b) the termination provisions below will also apply if Customer's Business
+ Partner fails to pay Us applicable fees; and
+(c) Business Partner is not authorized to make any changes to this EULA or
+ otherwise authorized to make any warranties, representations, promises or
+ commitments on Our behalf or in any way concerning the VyOS.
+
+"Business Partner's order" means the ordering document(s), issued during Your
+purchasing process by Our Business Partner in a way and manner as defined by
+Our Business Partner. Business Partner's order may describe specific Software
+and Services, Subscription(s), associated fees, payment terms, and shall be
+subject to the terms of this Agreement and EULA.
+
+1.6 "Customer", "You", "Licensee", "Your" - user of VyOS and its heirs, agents,
+successors, assigns and - for the purpose of Global subscription - its
+Affiliates.
+
+2. License Grant
+
+Subject to the following terms, We grant to You a perpetual, worldwide license
+to the Software (most of which includes multiple software components) pursuant
+to different open sourced and public licenses. The license agreement for each
+software component is located in the software component's source code and
+permits you to run, copy, modify, and redistribute the software component
+(subject to certain obligations in some cases), both in source code and binary
+code forms, with the exception of the images identified in Section 4 below. You
+shall either agree to the terms of each applicable public license or You must
+not install/use those components or exercise such licensed rights.
+
+This EULA pertains solely to the Software and does not limit your rights under,
+or grant you rights that supersede, the license terms of any particular
+component.
+
+2.1 Limited Modifications. For the avoidance of doubt, Licensee is permitted to
+use VyOS from Us in accordance with VyOS terms and conditions and on the
+specific quotation, purchase order and/or the subscription or customized
+agreements, if any. Any other modifications of VyOS terms and conditions won't
+be allowed, except as expressly authorized through a separate custom agreement,
+unless otherwise defined by this Agreement, specific quotation, purchase order
+and/or the subscription or customized agreements.
+
+2.2 No Unbundling. Nonetheless, the Software is designed and provided to
+Licensee solely as permitted herein. Licensee shall not unbundle or repackage
+the Software for distribution, transfer or other disposition, unless otherwise
+specified by this Agreement.
+
+3. Prohibited Use and Allowed Use
+
+3.1 Except as expressly authorized through a separate custom agreement,
+Licensee and the Authorized Users are prohibited from:
+(a) using the Software on behalf of third parties;
+(b) sublicensing, licensing, renting, leasing, lending or granting other rights
+ in the Software including rights on a membership or subscription basis;
+(c) providing use of the Software in a service bureau arrangement, outsourcing
+ or on a time sharing basis;
+(d) interfere with or disrupt the Software or systems used to provide the VyOS
+ or other equipment or networks connected;
+(e) circumvent or disclose the user authentication or security of the Software
+ or any host, network, or account related thereto or attempt to gain
+ unauthorized access;
+(f) store or transmit SPAM or malicious code;
+(g) duplicate the Software or publish the Software for others to copy;
+(h) infringe the intellectual property rights of any entity or person; or
+(i) make any use of the Software that violates any applicable local, state,
+ national, international or foreign law or regulation.
+
+For more information about how to obtain a custom agreement, please contact us
+at: sales@vyos.io.
+
+3.2 The following uses of the Software shall be allowed:
+(a) any lab setup within the Licensee or on an Authorized User's personal
+ device, for the purpose of learning, testing, or debugging company network
+ configs, and
+(b) any use in Authorized User's personal home networks, including but not
+ limited to Internet access, corporate VPN access, learning and
+ experimentation.
+
+4. Intellectual Property Rights
+
+The Software and each of their components are owned by Us and other licensors
+and are protected under copyright law and other laws as applicable. Title to
+the Software and any component and systems, or to any copy or modification
+shall remain with Us and other licensors, subject to the applicable license.
+The "VyOS" mark, the individual Software marks, and the "VyOS" logo are
+trademarks or registered trademarks in the EU, US and other countries. Artwork
+files that feature the VyOS logo, including but not limited to boot splash
+images and user interface elements, are Our property, distributed on the "all
+rights reserved" basis. You cannot redistribute those files separately or as
+part of Software without an express permission from the copyright holder. By
+accepting this Agreement You commit not to register or request registration of
+any commercial name, domain name, email, trademark, symbol or distinctive;
+sign, with similar characteristics, color, typography, style or appearance or
+that includes the word "VyOS" or/and VyOS logo.
+
+This EULA does not permit you to distribute the Software using VyOS trademarks,
+regardless of whether the Software has been modified. You may make a commercial
+redistribution of the Software only if
+(a) permitted under a separate written agreement with Us authorizing such
+ commercial redistribution or
+(b) you remove and replace all Our occurrences and VyOS trademarks and logos.
+
+Modifications to the software may corrupt the Software.
+
+4.1 The Licensee grants Us a right to use its logos and trademarks for the
+purpose of displaying their Licensee status on the VyOS website, and for the
+purposes specified in VyOS Subscription Agreement. We will not claim that the
+Licensee endorses VyOS and will not publicize any details of Licensee's VyOS
+usage, network setup, or any other information not explicitly provided by the
+Licensee for public release.
+
+4.1.1 The Licensee can revoke Our right to use Licensee's trademarks and logos
+at any time, unless otherwise agreed in VyOS Subscription Agreement, or Our
+Quotation.
+
+5. Updates
+
+Along with all software update subscriptions, We provide security updates,
+hot-fixes and security advisory notifications before public disclosure
+(herein after collectively referred to as the "Updates"). You expressly
+acknowledge and agree that We have no obligation to make available and/or
+provide any Updates. All upgrades and Updates are provided by Us or through
+Our Business Partners to Licensee at Our sole discretion and are subject to
+the terms of this Agreement on a license exchange basis. Any obligations that
+We may have to support previous versions during the license term may end upon
+the availability of this update. Upgrades and Updates may be licensed to
+Licensee by Us with additional or different terms.
+
+6. Support
+
+This agreement does not automatically entitle the Licensee to any support for
+the Software provided by Us or through Our Business Partners unless otherwise
+specified in the subscription terms. For the avoidance of doubt, We have no
+liability and provide no support for any hardware or any cloud marketplace
+services provided by any Business Partner or Cloud Provider. Where available,
+maintenance and support may be purchased separately subject to a separate
+VyOS's support services included subscriptions.
+
+Support for software built from source code by a party other than Us, with or
+without modifications made by the Licensee or a third party, is provided only
+through separate agreements.
+
+For more information about how to obtain a VyOS's software and support services
+included subscriptions, please contact us at: sales@vyos.io.
+
+7. Term and Termination.
+
+This Agreement begins on the Effective Date and shall remain in effect until
+terminated due to
+(a) Licensee fails to pay the fees amounts associated to Our subscriptions
+ when due or otherwise materially breaches this Agreement, specific
+ quotation, purchase order and/or the subscription or customized agreements
+ and fails to remedy the breach within ten (10) days from the receipt of a
+ notification sent in writing or electronically,
+(b) Licensee's deactivation or subscription cancellation of the Software,
+(c) Licensee fails to pay the Business Partner, or terminate the agreement with
+ a Business Partner, or Business Partner fails to pay Us the applicable fees
+ of your Software and/or Services, or
+(d) We change, cease to provide or discontinue the Software at any time.
+
+Upon the occurrence of (a), (b), (c) or (d), above, We are entitled to
+terminate this Agreement. Upon termination of this Agreement for any reason,
+Licensee shall discontinue use of the Software. If you have copies of the
+Software obtained when You still had an active subscription, you can keep using
+them indefinitely as long as you comply with this Agreement and VyOS
+Subscription Agreement, in particular - with Section 4 above and provided this
+is not intended to interfere with any rights you may have from other public
+and open source licenses.Termination shall not, however, relieve either party
+of obligations incurred prior to the termination. The following Sections shall
+survive termination of this Agreement: Definitions, Intellectual Property
+Rights, Limited Warranty, Limitation of Remedies and Liability, General, Term
+and Termination, and others which by their nature are intended to survive.
+
+8. Limited Warranty
+
+Except as specifically stated in this Section 8, a separate agreement with Us,
+or a license for a particular component, to the maximum extent permitted under
+applicable law, the Software and the components are provided and licensed
+"as is" without warranty of any kind, express or implied, including the
+implied warranties of merchantability, non-infringement, integration, quiet
+enjoyment, satisfactory quality or fitness for a particular purpose. Neither
+Us nor Our affiliates and Business Partners warrant that the Software will
+meet your requirements, will be uninterrupted, timely, secure; that the
+operation of the Software will be entirely error-free, appear or perform
+precisely as described in the accompanying documentation, or comply with
+regulatory requirements; that the results that may be obtained from the use of
+the Software will be effective, accurate or reliable; the quality of the
+Software will meet your expectations; or that any errors or defects in the
+Software will be corrected. This warranty extends only to the party that
+purchases subscription services for the Software from Us and/or Our affiliates
+or a Our authorized Business Partner.
+
+We and Our affiliates specifically disclaim any liability with regard to any
+actions resulting from your use of the Software. Any material downloaded or
+otherwise obtained through use of the Software is accessed at your own
+discretion and risk, and you will be solely responsible for any damage to your
+computer system or loss of data that results from use of the Software. We and
+Our affiliates assume no liability for any malicious software that may be
+downloaded to your computer as a result of your use of the Software.
+
+We will not be liable for any loss that you may incur as a result of a third
+party using your password or account or account information in connection with
+the Software, either with or without your knowledge.
+
+Licensee assumes the entire cost of all necessary servicing, repair, or
+correction of problems caused by viruses or other harmful components; We
+disclaim and makes no warranties or representations as to the accuracy,
+quality, reliability, suitability, completeness, truthfulness, usefulness, or
+effectiveness of the outputs, logs, reports, data, results or other information
+obtained, generated or otherwise received by Licensee from accessing and/or
+using the Software or otherwise resulting from this Agreement; and Licensee
+shall use the Software at its own risk and in no event shall We be liable to
+Licensee for any loss or damage of any kind (except personal injury or death
+resulting from Our negligence, fraud or fraudulent misrepresentation and any
+other liability that cannot be excluded by law) arising from Licensee's use of
+or inability to use the Software or from faults or defects in the Software
+whether caused by negligence or otherwise.
+
+Licensee agrees to defend, indemnify and hold Us harmless from any losses,
+liabilities, damages, actions, claims or expenses (including legal fees and
+court costs) arising or resulting from Licensee's breach of any term of this
+agreement or caused by acts or omissions performed by licensee.
+
+Some jurisdictions do not allow the exclusion of certain warranties, the
+limitation or exclusion of implied warranties, or limitations on how long an
+implied warranty may last, so the above limitations may not apply to you.
+
+9. Limitation of Remedies and Liability
+
+To the maximum extent permitted under applicable law, under no circumstances
+will We, Our affiliates, any of Our authorized Business Partner, or the
+licensor of any component provided to you under this EULA be liable to you for
+any direct, indirect, incidental, special, exemplary, punitive, or
+consequential damages (including, but not limited to, procurement of substitute
+goods or services, computer failure or malfunction, loss of data or profits,
+business interruption, etc.) however caused and on any theory of liability,
+whether in contract, strict liability, or tort (including negligence or
+otherwise) arising in any way out of the use of the software or inability to
+use the software, even if We, Our affiliates, an authorized Business Partner,
+and/or licensor are aware of or have been advised of the possibility of such
+damage. To the extent permitted by law and as the maximum aggregate liability,
+Our or Our affiliates' liability, an authorized Business Partner's liability
+or the liability of the licensor of a component provided to you under or in
+connection with this EULA will be limited to the lesser of either five hundred
+United States dollars ($500) or the fees paid by the Licensee or by Business
+Partner and received by Us for the Software and attributable to the 6 month
+period immediately preceding the first event giving rise to such liability. The
+limitations and exclusions in this section apply to the maximum extent
+permitted by applicable law in your jurisdiction. Some jurisdictions prohibit
+the exclusion or limitation of liability for incidental, consequential or
+punitive damages. Accordingly, the limitations and exclusions set forth above
+may not apply to you.
+
+10. Compliance and Export Control
+
+You understand that countries may restrict the import, use, export, re-export
+or transfer of encryption products and other controlled materials (which may
+include the Software or related technical information licensed hereunder). You
+agree to comply with export regulations by the Bureau of Industry and Security
+of the U.S. Department of Commerce and all applicable laws, restrictions and
+regulations in Your use of the Software, including but not limited to export
+restrictions of various countries that the Software may be subject to, and
+personal data protection regulations. You should comply with and oblige to
+secure Us from any breach of any law and regulation, from any claim or
+litigation arising as a result of such breach and to reimburse Us any loss,
+resulting from such breach. You will not use the Software for a prohibited use.
+10.1 Sanctions compliance. You undertake to follow that You and any person,
+allowed to use the Software and the Services by You, is not a subject or the
+target of sanctions, embargoes and restrictive measures ("Sanctions"),
+administered by the Office of Foreign Assets Control of the U.S. Department of
+the Treasury or the U.S. Department of State, the United Nations Security
+Council, the European Union, Her Majesty's Treasury of the United Kingdom,
+Department of Foreign Affairs and Trade of the Australian Federal Government,
+or other relevant sanctions authority ("Sanctioning Authorities").
+
+You undertake to comply with all the abovementioned Sanctions in all possible
+ways to keep Us harmless and oblige to immediately terminate relations with
+any person that becomes (or is) subject or target of any of the abovementioned
+Sanctions, or assists anybody to evade or violate the above mentioned Sanctions.
+
+11. Third-Party Beneficiary
+
+Licensee acknowledges and agrees that Our licensors (and/or Us if Licensee
+obtained the Software from any party other than Us) are third party
+beneficiaries of this Agreement, with the right to enforce the obligations set
+forth herein with respect to the respective technology of such licensors and/or
+Ours.
+
+12. Third-party components, contributions and software programs
+
+We do not assert any Intellectual Property Rights over:
+(a) components created by third parties that may be taken from upstream
+ sources in binary form compiled by Us from the source code;
+(b) source code and documentation of the Software, which is develope
+ ollaboratively and is open to contributions by parties not affiliated with
+ Us (to such purpose, contributors give Us non-exclusive rights according
+ to the licenses of the Software and documentation);
+(c) third parties software or programs included therein or therewith the
+ Software.
+
+13. General
+
+If any provision of this EULA is held to be unenforceable, the enforceability
+of the remaining provisions shall not be affected.
+
+Updates and upgrades may be licensed to Licensee by Us with additional or
+different terms.
+
+You are not allowed to transfer or assign this EULA or any rights hereunder,
+unless with Our previous written consent. Please inform Us of Your intention
+to transfer or assign in advance so We can respond accordingly. Conversely, We
+may transfer, assign, sublicense or delegate the EULA or any portions thereof,
+without restriction. We also may subcontract any performance associated with
+the Software to third parties, provided that such subcontract does not relieve
+Us of any of Our obligations under this EULA.
+
+Licensee may not sublicense, transfer or assign, whether voluntarily or by
+operation of law, any right or license in or to the Software. Any attempted
+sublicense, transfer or assignment shall be void.
+
+We may, from time-to-time modify this agreement.
+
+Licensee shall comply with all applicable laws and regulations pertaining to
+this Agreement
+
+This Agreement, along with a VyOS Subscription Agreement, Privacy Policy and
+Terms and Conditions, any quotation, purchase order and services level
+agreement, if applicable, and any other documents deemed to be incorporated by
+reference in it, constitutes the entire agreement between the parties with
+respect to its subject matter and it supersedes all prior or contemporaneous
+agreements concerning such matter. If you order VyOS from a Business Partner,
+then any agreement that you enter into with a Business Partner is solely
+between you and a Business Partner and will not be binding on Us.
+
+In the table below, "Customer Location" refers to where Customer is located
+(as determined by Customer's business address on the invoice) and determines
+which table row applies to Customer:
+
+Customer Location* VyOS Contracting Entity Governing Law Venue/Courts
+================== ======================= ============= ============
+North & South America VyOS Inc California Poway
+
+EEA & UK VyOS EMEA Operations Ireland Cork
+(except Spain & Portugal) Limited
+
+Spain, Andorra & Portugal VyOS Networks Iberia SLU Spain Madrid
+
+Asia & Oceania VyOS APAC Pty Ltd Australia Sydney
+
+Non-EEA parts of Europe, VyOS Networks Cyprus Cyprus Limassol
+Middle East, & Africa Limited
+(except Andorra)
+
+*all sales via Cloud Providers are generally done by VyOS Inc., unless
+otherwise decided by Us regardless of Customer location.
+
+References to "We", "Our", "Us" are references to the applicable VyOS
+Contracting Entity specified in the Contracting Entity Table, unless otherwise
+has been decided for operational purposes, in the Quotation and in the invoice.
+The Services are provided by that VyOS Contracting Entity.
+
+This Agreement, and any disputes arising out of or related hereto, will be
+governed exclusively by the applicable governing law above, without giving
+effect to any of its conflicts of laws, rules or principles. The courts located
+in the applicable venue above will have exclusive jurisdiction to adjudicate
+any dispute arising out of or relating to this Agreement or its formation,
+interpretation, or enforcement. Each party hereby consents and submits to the
+exclusive jurisdiction of such courts. Before resorting to any external dispute
+resolution mechanisms, the parties agree to use their best efforts in good
+faith to settle any dispute in relation to the Agreement.
+
+We may, in our sole discretion, amend this EULA at any time by posting a
+revised version thereof on Our website and, by updating the "last updated"
+date on the applicable page, or by providing reasonable notice. Your continued
+use of the Software following changes to the Agreement after the effective
+date of a revised version thereof constitutes Your expressed acceptance of and
+the agreement to be bound by the Agreement and its future versions or updates.
+
+'''
diff --git a/data/build-types/stream.toml b/data/build-types/stream.toml
new file mode 100644
index 00000000..f0207a6a
--- /dev/null
+++ b/data/build-types/stream.toml
@@ -0,0 +1,8 @@
+packages = [
+ "gdb",
+ "strace",
+ "apt-rdepends",
+ "tshark",
+ "vim",
+ "vyos-1x-smoketest"
+]
diff --git a/data/certificates/.gitignore b/data/certificates/.gitignore
new file mode 100644
index 00000000..c996e507
--- /dev/null
+++ b/data/certificates/.gitignore
@@ -0,0 +1 @@
+*.key
diff --git a/data/defaults.toml b/data/defaults.toml
index efe6399f..662e864f 100644
--- a/data/defaults.toml
+++ b/data/defaults.toml
@@ -9,12 +9,12 @@ debian_security_mirror = "http://deb.debian.org/debian-security"
debian_archive_areas = "main contrib non-free non-free-firmware"
-vyos_mirror = "https://rolling-packages.vyos.net/current"
+vyos_mirror = "https://packages.vyos.net/repositories/current"
vyos_branch = "current"
release_train = "current"
-kernel_version = "6.6.51"
+kernel_version = "6.6.89"
kernel_flavor = "vyos"
bootloaders = "syslinux,grub-efi"
diff --git a/data/live-build-config/archives/buster.list.chroot b/data/live-build-config/archives/buster.list.chroot
deleted file mode 100644
index 06eb2dab..00000000
--- a/data/live-build-config/archives/buster.list.chroot
+++ /dev/null
@@ -1,3 +0,0 @@
-deb http://deb.debian.org/debian/ buster main non-free
-deb http://deb.debian.org/debian/ buster-updates main non-free
-deb http://security.debian.org/debian-security buster/updates main non-free
diff --git a/data/live-build-config/archives/buster.pref.chroot b/data/live-build-config/archives/buster.pref.chroot
deleted file mode 100644
index 8caa1e6d..00000000
--- a/data/live-build-config/archives/buster.pref.chroot
+++ /dev/null
@@ -1,11 +0,0 @@
-Package: bash
-Pin: release n=buster
-Pin-Priority: 600
-
-Package: bash-completion
-Pin: release n=buster
-Pin-Priority: 600
-
-Package: *
-Pin: release n=buster
-Pin-Priority: -10
diff --git a/data/live-build-config/archives/zabbix-official-repo.key.chroot b/data/live-build-config/archives/zabbix-official-repo.key.chroot
new file mode 100644
index 00000000..660c453a
--- /dev/null
+++ b/data/live-build-config/archives/zabbix-official-repo.key.chroot
Binary files differ
diff --git a/data/live-build-config/hooks/live/01-live-serial.binary b/data/live-build-config/hooks/live/01-live-serial.binary
index e138b20d..05785da7 100755
--- a/data/live-build-config/hooks/live/01-live-serial.binary
+++ b/data/live-build-config/hooks/live/01-live-serial.binary
@@ -10,22 +10,22 @@ SERIAL_CONSOLE="console=tty0 console=ttyS0,115200"
GRUB_MENUENTRY=$(sed -e '/menuentry.*hotkey.*/,/^}/!d' -e 's/--hotkey=l//g' $GRUB_PATH)
# Update KVM menuentry name
-sed -i 's/"Live system \((.*-vyos)\)"/"Live system \1 - KVM console"/' $GRUB_PATH
+sed -i 's/"Live system \((.*vyos)\)"/"Live system \1 - KVM console"/' $GRUB_PATH
# Insert serial menuentry
echo "$GRUB_MENUENTRY" | sed \
- -e 's/"Live system \((.*-vyos)\)"/"Live system \1 - Serial console"/' \
+ -e 's/"Live system \((.*vyos)\)"/"Live system \1 - Serial console"/' \
-e "s/$KVM_CONSOLE/$SERIAL_CONSOLE/g" >> $GRUB_PATH
# Live.cfg Update
ISOLINUX_MENUENTRY=$(sed -e '/label live-\(.*\)-vyos$/,/^\tappend.*/!d' $ISOLINUX_PATH)
# Update KVM menuentry name
-sed -i 's/Live system \((.*-vyos)\)/Live system \1 - KVM console/' $ISOLINUX_PATH
+sed -i 's/Live system \((.*vyos)\)/Live system \1 - KVM console/' $ISOLINUX_PATH
# Insert serial menuentry
echo "\n$ISOLINUX_MENUENTRY" | sed \
-e 's/live-\(.*\)-vyos/live-\1-vyos-serial/' \
-e '/^\tmenu default/d' \
- -e 's/Live system \((.*-vyos)\)/Live system \1 - Serial console/' \
+ -e 's/Live system \((.*vyos)\)/Live system \1 - Serial console/' \
-e "s/$KVM_CONSOLE/$SERIAL_CONSOLE/g" >> $ISOLINUX_PATH
diff --git a/data/live-build-config/hooks/live/100-remove-dropbear-keys.chroot b/data/live-build-config/hooks/live/100-remove-dropbear-keys.chroot
new file mode 100644
index 00000000..20d8a670
--- /dev/null
+++ b/data/live-build-config/hooks/live/100-remove-dropbear-keys.chroot
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Delete Dropbear SSH keys that might be generated
+# by postinst scripts
+# to prevent non-unique keys from appearing in images
+
+rm -f /etc/dropbear/dropbear_*_host_key
diff --git a/data/live-build-config/hooks/live/18-enable-disable_services.chroot b/data/live-build-config/hooks/live/18-enable-disable_services.chroot
index 26c7f094..1a464404 100755
--- a/data/live-build-config/hooks/live/18-enable-disable_services.chroot
+++ b/data/live-build-config/hooks/live/18-enable-disable_services.chroot
@@ -1,6 +1,8 @@
#!/bin/sh
echo I: Disabling services
+systemctl disable syslog.service
+systemctl disable rsyslog.service
systemctl disable arpwatch.service
systemctl disable smartd.service
systemctl disable kea-ctrl-agent.service
@@ -40,7 +42,6 @@ systemctl disable snmpd.service
systemctl disable conserver-server.service
systemctl disable dropbear.service
systemctl disable fancontrol.service
-systemctl disable fastnetmon.service
systemctl disable ddclient.service
systemctl disable ocserv.service
systemctl disable tuned.service
@@ -68,6 +69,11 @@ systemctl disable dpkg-db-backup.timer
systemctl disable dpkg-db-backup.service
systemctl disable zabbix-agent2.service
systemctl disable suricata.service
+systemctl disable vyconfd.service
+systemctl disable vpp.service
+systemctl disable vyos-commitd.service
+systemctl disable netplug.service
+
echo I: Enabling services
systemctl enable vyos-hostsd.service
diff --git a/data/live-build-config/hooks/live/19-kernel_symlinks.chroot b/data/live-build-config/hooks/live/19-kernel_symlinks.chroot
index e63ca263..a7e95e0e 100755
--- a/data/live-build-config/hooks/live/19-kernel_symlinks.chroot
+++ b/data/live-build-config/hooks/live/19-kernel_symlinks.chroot
@@ -1,6 +1,9 @@
#!/bin/sh
-echo I: Creating kernel symlinks.
+echo I: Creating Linux Kernel symbolic links
cd /boot
ln -s initrd.img-* initrd.img
ln -s vmlinuz-* vmlinuz
+
+echo I: Remove Linux Kernel symbolic link to source folder
+rm -rf /lib/modules/*/build
diff --git a/data/live-build-config/hooks/live/40-init-cracklib-db.chroot b/data/live-build-config/hooks/live/40-init-cracklib-db.chroot
new file mode 100755
index 00000000..4d94b08e
--- /dev/null
+++ b/data/live-build-config/hooks/live/40-init-cracklib-db.chroot
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+CRACKLIB_DIR=/var/cache/cracklib
+CRACKLIB_DB=cracklib_dict
+
+if [ ! -f "${CRACKLIB_DIR}/${CRACKLIB_DB}.pwd" ]; then
+ echo "I: Creating the cracklib database ${CRACKLIB_DIR}/${CRACKLIB_DB}"
+ mkdir -p $CRACKLIB_DIR
+
+ /usr/sbin/create-cracklib-dict -o $CRACKLIB_DIR/$CRACKLIB_DB \
+ /usr/share/dict/cracklib-small
+fi
+
diff --git a/data/live-build-config/hooks/live/82-import-vyos-gpg-signing-key.chroot b/data/live-build-config/hooks/live/82-import-vyos-gpg-signing-key.chroot
deleted file mode 100755
index 478b88fb..00000000
--- a/data/live-build-config/hooks/live/82-import-vyos-gpg-signing-key.chroot
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-if ! command -v gpg &> /dev/null; then
- echo "gpg binary could not be found"
- exit 1
-fi
-
-GPG_KEY="/usr/share/vyos/keys/vyos-release.pub.asc"
-
-echo I: Import GPG key
-gpg --import ${GPG_KEY}
-exit $?
diff --git a/data/live-build-config/hooks/live/92-strip-symbols.chroot b/data/live-build-config/hooks/live/92-strip-symbols.chroot
index 704f9cb3..f44cb01d 100755
--- a/data/live-build-config/hooks/live/92-strip-symbols.chroot
+++ b/data/live-build-config/hooks/live/92-strip-symbols.chroot
@@ -15,7 +15,6 @@ STRIPCMD_UNNEEDED="strip --strip-unneeded --remove-section=.comment --remove-sec
STRIPDIR_REGULAR="
"
STRIPDIR_DEBUG="
-/usr/lib/modules
"
STRIPDIR_UNNEEDED="
/etc/hsflowd/modules
diff --git a/data/live-build-config/hooks/live/93-sb-sign-kernel.chroot b/data/live-build-config/hooks/live/93-sb-sign-kernel.chroot
new file mode 100755
index 00000000..8494a5c8
--- /dev/null
+++ b/data/live-build-config/hooks/live/93-sb-sign-kernel.chroot
@@ -0,0 +1,31 @@
+#!/bin/sh
+SIGN_FILE=$(find /usr/lib -name sign-file)
+KERNEL_KEY="/var/lib/shim-signed/mok/vyos-dev-2025-linux.key"
+KERNEL_CERT="/var/lib/shim-signed/mok/vyos-dev-2025-linux.pem"
+VMLINUZ=$(readlink /boot/vmlinuz)
+
+# All Linux Kernel modules need to be cryptographically signed
+find /lib/modules -type f -name \*.ko | while read MODULE; do
+ modinfo ${MODULE} | grep -q "signer:"
+ if [ $? != 0 ]; then
+ echo "E: Module ${MODULE} is not signed!"
+ read -n 1 -s -r -p "Press any key to continue"
+ fi
+done
+
+if [ ! -f ${KERNEL_KEY} ] && [ ! -f ${KERNEL_CERT} ]; then
+ echo "I: Signing key for Linux Kernel not found - Secure Boot not possible"
+else
+ echo "I: Signing Linux Kernel for Secure Boot"
+ sbsign --key ${KERNEL_KEY} --cert ${KERNEL_CERT} /boot/${VMLINUZ} --output /boot/${VMLINUZ}
+ sbverify --list /boot/${VMLINUZ}
+ rm -f ${KERNEL_KEY}
+fi
+
+for cert in $(ls /var/lib/shim-signed/mok/); do
+ if grep -rq "BEGIN PRIVATE KEY" /var/lib/shim-signed/mok/${cert}; then
+ echo "Found private key - bailing out"
+ exit 1
+ fi
+done
+
diff --git a/data/live-build-config/hooks/live/93-sign-kernel.chroot b/data/live-build-config/hooks/live/93-sign-kernel.chroot
deleted file mode 100755
index 031db10d..00000000
--- a/data/live-build-config/hooks/live/93-sign-kernel.chroot
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-SIGN_FILE=$(find /usr/lib -name sign-file)
-MOK_KEY="/var/lib/shim-signed/mok/kernel.key"
-MOK_CERT="/var/lib/shim-signed/mok/kernel.pem"
-kernel_elf=$(readlink /boot/vmlinuz)
-
-if [ ! -f ${MOK_KEY} ]; then
- echo "I: Signing key for Linux Kernel not found - Secure Boot not possible"
-else
- echo "I: Signing Linux Kernel for Secure Boot"
-
- sbsign --key $MOK_KEY --cert $MOK_CERT /boot/${kernel_elf} --output /boot/${kernel_elf}
- sbverify --list /boot/${kernel_elf}
-
- find /lib/modules -type f -name \*.ko -o -name \*.ko.xz | while read module; do
- $SIGN_FILE sha512 $MOK_KEY $MOK_CERT $module
- done
-fi
diff --git a/data/live-build-config/includes.binary/isolinux/splash.png b/data/live-build-config/includes.binary/isolinux/splash.png
index 4137d52b..f6b43d56 100644
--- a/data/live-build-config/includes.binary/isolinux/splash.png
+++ b/data/live-build-config/includes.binary/isolinux/splash.png
Binary files differ
diff --git a/data/live-build-config/includes.chroot/etc/systemd/system.conf b/data/live-build-config/includes.chroot/etc/systemd/system.conf
index 91af4090..0c30472a 100644
--- a/data/live-build-config/includes.chroot/etc/systemd/system.conf
+++ b/data/live-build-config/includes.chroot/etc/systemd/system.conf
@@ -53,3 +53,4 @@ ShowStatus=yes
#DefaultLimitNICE=
#DefaultLimitRTPRIO=
#DefaultLimitRTTIME=
+StatusUnitFormat=description
diff --git a/data/live-build-config/includes.chroot/opt/vyatta/etc/grub/default-union-grub-entry b/data/live-build-config/includes.chroot/opt/vyatta/etc/grub/default-union-grub-entry
new file mode 100644
index 00000000..49f4afc4
--- /dev/null
+++ b/data/live-build-config/includes.chroot/opt/vyatta/etc/grub/default-union-grub-entry
@@ -0,0 +1,20 @@
+menuentry "VyOS (KVM console)" {
+ linux /boot//vmlinuz boot=live quiet rootdelay=5 noautologin net.ifnames=0 biosdevname=0 udev.exec_delay=3 vyos-union=/boot/ console=ttyS0,115200 console=tty0
+ initrd /boot//initrd.img
+}
+
+menuentry "VyOS (Serial console)" {
+ linux /boot//vmlinuz boot=live quiet rootdelay=5 noautologin net.ifnames=0 biosdevname=0 udev.exec_delay=3 vyos-union=/boot/ console=tty0 console=ttyS0,115200
+ initrd /boot//initrd.img
+}
+
+menuentry "Lost password change (KVM console)" {
+ linux /boot//vmlinuz boot=live quiet rootdelay=5 noautologin net.ifnames=0 biosdevname=0 udev.exec_delay=3 vyos-union=/boot/ console=ttyS0,115200 console=tty0 init=/opt/vyatta/sbin/standalone_root_pw_reset
+ initrd /boot//initrd.img
+}
+
+menuentry "Lost password change (Serial console)" {
+ linux /boot//vmlinuz boot=live quiet rootdelay=5 noautologin net.ifnames=0 biosdevname=0 udev.exec_delay=3 vyos-union=/boot/ console=tty0 console=ttyS0,115200 init=/opt/vyatta/sbin/standalone_root_pw_reset
+ initrd /boot//initrd.img
+}
+
diff --git a/data/live-build-config/includes.chroot/usr/share/vyos/keys/vyos-release.pub.asc b/data/live-build-config/includes.chroot/usr/share/vyos/keys/vyos-release.pub.asc
deleted file mode 100644
index bf9a7aca..00000000
--- a/data/live-build-config/includes.chroot/usr/share/vyos/keys/vyos-release.pub.asc
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v2.0.22 (GNU/Linux)
-
-mQINBFXKsiIBEACyid9PR/v56pSRG8VgQyRwvzoI7rLErZ8BCQA2WFxA6+zNy+6G
-+0E/6XAOzE+VHli+wtJpiVJwAh+wWuqzOmv9css2fdJxpMW87pJAS2i3EVVVf6ab
-wU848JYLGzc9y7gZrnT1m2fNh4MXkZBNDp780WpOZx8roZq5X+j+Y5hk5KcLiBn/
-lh9Zoh8yzrWDSXQsz0BGoAbVnLUEWyo0tcRcHuC0eLx6oNG/IHvd/+kxWB1uULHU
-SlB/6vcx56lLqgzywkmhP01050ZDyTqrFRIfrvw6gLQaWlgR3lB93txvF/sz87Il
-VblV7e6HEyVUQxedDS8ikOyzdb5r9a6Zt/j8ZPSntFNM6OcKAI7U1nDD3FVOhlVn
-7lhUiNc+/qjC+pR9CrZjr/BTWE7Zpi6/kzeH4eAkfjyALj18oC5udJDjXE5daTL3
-k9difHf74VkZm29Cy9M3zPckOZpsGiBl8YQsf+RXSBMDVYRKZ1BNNLDofm4ZGijK
-mriXcaY+VIeVB26J8m8y0zN4/ZdioJXRcy72c1KusRt8e/TsqtC9UFK05YpzRm5R
-/nwxDFYb7EdY/vHUFOmfwXLaRvyZtRJ9LwvRUAqgRbbRZg3ET/tn6JZk8hqx3e1M
-IxuskOB19t5vWyAo/TLGIFw44SErrq9jnpqgclTSRgFjcjHEm061r4vjoQARAQAB
-tDZWeU9TIE1haW50YWluZXJzIChWeU9TIFJlbGVhc2UpIDxtYWludGFpbmVyc0B2
-eW9zLm5ldD6JAjgEEwECACIFAlXKsiICGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
-AheAAAoJEP0iAoWg/m1+xbgP+QEDYZi5dA4IPY+vU1L95Bavju2m2o35TSUDPg5B
-jfAGuhbsNUceU+l/yUlxjpKEmvshyW3GHR5QzUaKGup/ZDBo1CBxZNhpSlFida2E
-KAYTx4vHk3MRXcntiAj/hIJwRtzCUp5UQIqHoU8dmHoHOkKEP+zhJuR6E2s+WwDr
-nTwE6eRa0g/AHY+chj2Je6flpPm2CKoTfUE7a2yBBU3wPq3rGtsQgVxPAxHRZz7A
-w4AjH3NM1Uo3etuiDnGkJAuoKKb1J4X3w2QlbwlR4cODLKhJXHIufwaGtRwEin9S
-1l2bL8V3gy2Hv3D2t9TQZuR5NUHsibJRXLSa8WnSCcc6Bij5aqfdpYB+YvKH/rIm
-GvYPmLZDfKGkx0JE4/qtfFjiPJ5VE7BxNyliEw/rnQsxWAGPqLlL61SD8w5jGkw3
-CinwO3sccTVcPz9b6A1RsbBVhTJJX5lcPn1lkOEVwQ7l8bRhOKCMe0P53qEDcLCd
-KcXNnAFbVes9u+kfUQ4oxS0G2JS9ISVNmune+uv+JR7KqSdOuRYlyXA9uTjgWz4y
-Cs7RS+CpkJFqrqOtS1rmuDW9Ea4PA8ygGlisM5d/AlVkniHz/2JYtgetiLCj9mfE
-MzQpgnldNSPumKqJ3wwmCNisE+lXQ5UXCaoaeqF/qX1ykybQn41LQ+0xT5Uvy7sL
-9IwGuQINBFXKsiIBEACg2mP3QYkXdgWTK5JyTGyttE6bDC9uqsK8dc1J66Tjd5Ly
-Be0amO+88GHXa0o5Smwk2QNoxsRR41G/D/eAeGsuOEYnePROEr3tcLnDjo4KLgQ+
-H69zRPn77sdP3A34Jgp+QIzByJWM7Cnim31quQP3qal2QdpGJcT/jDJWdticN76a
-Biaz+HN13LyvZM+DWhUDttbjAJc+TEwF9YzIrU+3AzkTRDWkRh4kNIQxjlpNzvho
-9V75riVqg2vtgPwttPEhOLb0oMzy4ADdfezrfVvvMb4M4kY9npu4MlSkNTM97F/I
-QKy90JuSUIjE05AO+PDXJF4Fd5dcpmukLV/2nV0WM2LAERpJUuAgkZN6pNUFVISR
-+nSfgR7wvqeDY9NigHrJqJbSEgaBUs6RTk5hait2wnNKLJajlu3aQ2/QfRT/kG3h
-ClKUz3Ju7NCURmFE6mfsdsVrlIsEjHr/dPbXRswXgC9FLlXpWgAEDYi9Wdxxz8o9
-JDWrVYdKRGG+OpLFh8AP6QL3YnZF+p1oxGUQ5ugXauAJ9YS55pbzaUFP8oOO2P1Q
-BeYnKRs1GcMI8KWtE/fze9C9gZ7Dqju7ZFEyllM4v3lzjhT8muMSAhw41J22mSx6
-VRkQVRIAvPDFES45IbB6EEGhDDg4pD2az8Q7i7Uc6/olEmpVONSOZEEPsQe/2wAR
-AQABiQIfBBgBAgAJBQJVyrIiAhsMAAoJEP0iAoWg/m1+niUQAKTxwJ9PTAfB+XDk
-3qH3n+T49O2wP3fhBI0EGhJp9Xbx29G7qfEeqcQm69/qSq2/0HQOc+w/g8yy71jA
-6rPuozCraoN7Im09rQ2NqIhPK/1w5ZvgNVC0NtcMigX9MiSARePKygAHOPHtrhyO
-rJQyu8E3cV3VRT4qhqIqXs8Ydc9vL3ZrJbhcHQuSLdZxM1k+DahCJgwWabDCUizm
-sVP3epAP19FP8sNtHi0P1LC0kq6/0qJot+4iBiRwXMervCD5ExdOm2ugvSgghdYN
-BikFHvmsCxbZAQjykQ6TMn+vkmcEz4fGAn4L7Nx4paKEtXaAFO8TJmFjOlGUthEm
-CtHDKjCTh9WV4pwG2WnXuACjnJcs6LcK377EjWU25H4y1ff+NDIUg/DWfSS85iIc
-UgkOlQO6HJy0O96L5uxn7VJpXNYFa20lpfTVZv7uu3BC3RW/FyOYsGtSiUKYq6cb
-CMxGTfFxGeynwIlPRlH68BqH6ctR/mVdo+5UIWsChSnNd1GreIEI6p2nBk3mc7jZ
-7pTEHpjarwOjs/S/lK+vLW53CSFimmW4lw3MwqiyAkxl0tHAT7QMHH9Rgw2HF/g6
-XD76fpFdMT856dsuf+j2uuJFlFe5B1fERBzeU18MxML0VpDmGFEaxxypfACeI/iu
-8vzPzaWHhkOkU8/J/Ci7+vNtUOZb
-=Ld8S
------END PGP PUBLIC KEY BLOCK-----
diff --git a/data/live-build-config/includes.chroot/var/lib/shim-signed/mok/README.md b/data/live-build-config/includes.chroot/var/lib/shim-signed/mok/README.md
deleted file mode 100644
index 5a6edbba..00000000
--- a/data/live-build-config/includes.chroot/var/lib/shim-signed/mok/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Secure Boot
-
-## CA
-
-Create Certificate Authority used for Kernel signing. CA is loaded into the
-Machine Owner Key store on the target system.
-
-```bash
-openssl req -new -x509 -newkey rsa:2048 -keyout MOK.key -outform DER -out MOK.der -days 36500 -subj "/CN=VyOS Secure Boot CA/" -nodes
-openssl x509 -inform der -in MOK.der -out MOK.pem
-```
-
-## Kernel Module Signing Key
-
-We do not make use of ephemeral keys for Kernel module signing. Instead a key
-is generated and signed by the VyOS Secure Boot CA which signs all the Kernel
-modules during ISO assembly if present.
-
-```bash
-openssl req -newkey rsa:2048 -keyout kernel.key -out kernel.csr -subj "/CN=VyOS Secure Boot Signer 2024 - linux/" -nodes
-openssl x509 -req -in kernel.csr -CA MOK.pem -CAkey MOK.key -CAcreateserial -out kernel.pem -days 730 -sha256
-```
diff --git a/data/live-build-config/package-lists/vyos-base.list.chroot b/data/live-build-config/package-lists/vyos-base.list.chroot
index 4ccc7f76..b20c2962 100644
--- a/data/live-build-config/package-lists/vyos-base.list.chroot
+++ b/data/live-build-config/package-lists/vyos-base.list.chroot
@@ -1,6 +1,4 @@
debconf
-gpgv
-gnupg
vyos-1x
vyos-user-utils
zstd
diff --git a/data/live-build-config/rootfs/excludes b/data/live-build-config/rootfs/excludes
index a5fe41e5..558e637b 100644
--- a/data/live-build-config/rootfs/excludes
+++ b/data/live-build-config/rootfs/excludes
@@ -44,7 +44,8 @@ usr/games/*
usr/local/games/*
# T5511: We do not need any caches on the system (will be recreated when needed).
-var/cache/*
+# T7278: We need directory created by python3-cracklib for password checks
+var/cache/!(cracklib)
# T5511: We do not need any log-files on the system (will be recreated when needed).
var/log/*.log
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 498dd69a..1b73ca66 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2024 VyOS maintainers and contributors
+# Copyright (C) 2018-2025 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# in order to easy exprort images built to "external" world
@@ -18,7 +18,7 @@
# This Dockerfile is installable on both x86, x86-64, armhf and arm64 systems
ARG ARCH=
-FROM ${ARCH}debian:bookworm
+FROM ${ARCH}debian:bookworm-slim
RUN grep "VERSION_ID" /etc/os-release || (echo 'VERSION_ID="12"' >> /etc/os-release)
@@ -103,11 +103,14 @@ RUN apt-get update && apt-get install -y \
python3-flake8 \
python3-autopep8 \
python3-tomli \
+ python3-tomli-w \
yq \
debootstrap \
live-build \
gdisk \
- dosfstools
+ sbsigntool \
+ dosfstools \
+ kpartx
# Packages for TPM test
RUN apt-get update && apt-get install -y swtpm
@@ -136,7 +139,7 @@ RUN dpkg-reconfigure ca-certificates; \
# Installing OCAML needed to compile libvyosconfig
RUN curl https://raw.githubusercontent.com/ocaml/opam/master/shell/install.sh \
--output /tmp/opam_install.sh --retry 10 --retry-delay 5 && \
- sed -i 's/read BINDIR/BINDIR=""/' /tmp/opam_install.sh && sh /tmp/opam_install.sh && \
+ sed -i 's/read_tty BINDIR/BINDIR=""/' /tmp/opam_install.sh && sh /tmp/opam_install.sh && \
opam init --root=/opt/opam --comp=${OCAML_VERSION} --disable-sandboxing --no-setup
RUN eval $(opam env --root=/opt/opam --set-root) && \
@@ -152,23 +155,12 @@ RUN eval $(opam env --root=/opt/opam --set-root) && opam install -y \
fileutils \
xml-light
-# Build VyConf which is required to build libvyosconfig
-RUN eval $(opam env --root=/opt/opam --set-root) && \
- opam pin add vyos1x-config https://github.com/vyos/vyos1x-config.git#fc327ecd76 -y
-
# Packages needed for libvyosconfig
RUN apt-get update && apt-get install -y \
quilt \
libpcre3-dev \
libffi-dev
-# Build libvyosconfig
-RUN eval $(opam env --root=/opt/opam --set-root) && \
- git clone https://github.com/vyos/libvyosconfig.git /tmp/libvyosconfig && \
- cd /tmp/libvyosconfig && git checkout c6141d97 && \
- dpkg-buildpackage -uc -us -tc -b && \
- dpkg -i /tmp/libvyosconfig0_*_$(dpkg-architecture -qDEB_HOST_ARCH).deb
-
# Packages needed for open-vmdk
RUN apt-get update && apt-get install -y \
zlib1g-dev
@@ -207,7 +199,6 @@ RUN wget https://salsa.debian.org/klausenbusk-guest/debootstrap/commit/a9a603b17
sudo dpkg -i ../debootstrap*.deb
# Packages needed for Linux Kernel
-# gnupg2 is required by Jenkins for the TAR verification
# cmake required by accel-ppp
RUN apt-get update && apt-get install -y \
cmake \
@@ -247,6 +238,7 @@ RUN pip install --break-system-packages \
iproute2 \
libzmq3-dev \
procps \
+ protobuf-compiler \
python3 \
python3-setuptools \
python3-inotify \
@@ -264,24 +256,21 @@ RUN pip install --break-system-packages \
python3-netaddr \
python3-paramiko \
python3-passlib \
+ python3-protobuf \
python3-tabulate \
python3-zmq \
pylint \
quilt \
- whois
+ whois \
+ python3-cracklib
-# Go required for validators and vyos-xe-guest-utilities
-RUN GO_VERSION_INSTALL="1.21.3" ; \
+# Go required for telegraf and prometheus exporters build
+RUN GO_VERSION_INSTALL="1.23.2" ; \
wget -O /tmp/go${GO_VERSION_INSTALL}.linux-amd64.tar.gz https://go.dev/dl/go${GO_VERSION_INSTALL}.linux-$(dpkg-architecture -qDEB_HOST_ARCH).tar.gz ; \
tar -C /opt -xzf /tmp/go*.tar.gz && \
rm /tmp/go*.tar.gz
RUN echo "export PATH=/opt/go/bin:$PATH" >> /etc/bash.bashrc
-# Packages needed for opennhrp
-RUN apt-get update && apt-get install -y \
- libc-ares-dev \
- libev-dev
-
# Packages needed for Qemu test-suite
# This is for now only supported on i386 and amd64 platforms
RUN if dpkg-architecture -ii386 || dpkg-architecture -iamd64; then \
@@ -297,7 +286,6 @@ RUN if dpkg-architecture -ii386 || dpkg-architecture -iamd64; then \
# This is only supported on i386 and amd64 platforms
RUN if dpkg-architecture -ii386 || dpkg-architecture -iamd64; then \
apt-get update && apt-get install -y \
- kpartx \
parted \
udev \
grub-pc \
@@ -381,9 +369,8 @@ RUN sed "s/^%sudo.*/%sudo\tALL=(ALL) NOPASSWD:ALL/g" -i /etc/sudoers && \
echo "vyos_bld\tALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers && \
chmod a+s /usr/sbin/useradd /usr/sbin/groupadd
-# Ensure sure all users have access to our OCAM and Go installation
-RUN echo "$(opam env --root=/opt/opam --set-root)" >> /etc/skel/.bashrc && \
- echo "export PATH=/opt/go/bin:\$PATH" >> /etc/skel/.bashrc
+# Ensure sure all users have access to Go
+RUN echo "export PATH=/opt/go/bin:\$PATH" >> /etc/skel/.bashrc
# Rise upper limit for UID when working in an Active Direcotry integrated
# environment. This solves the warning: vyos_bld's uid 1632000007 outside of the
diff --git a/docker/patches/live-build/0001-save-package-info.patch b/docker/patches/live-build/0001-save-package-info.patch
index abb4308d..3ce22fe1 100644
--- a/docker/patches/live-build/0001-save-package-info.patch
+++ b/docker/patches/live-build/0001-save-package-info.patch
@@ -1,33 +1,36 @@
-From 3ddf0e979c352f2917be6a949fb8dd37b5c9f2b7 Mon Sep 17 00:00:00 2001
+From 9dacc8bf99310b2216be24a42f2c0475080cf039 Mon Sep 17 00:00:00 2001
From: khramshinr <khramshinr@gmail.com>
-Date: Wed, 28 Aug 2024 14:38:15 +0600
+Date: Thu, 24 Oct 2024 14:22:57 +0600
Subject: [PATCH] T6684: new Debian package repo snapshot logic
-save information about all installed packages and teir source repo, including temporary packages
+Save information about all installed packages and teir source repo, including temporary packages
+Added functionality to store version information for temporarily installed packages.
---
- functions/packages.sh | 7 +++++++
+ functions/packages.sh | 9 +++++++++
scripts/build/chroot | 6 ++++++
scripts/build/clean | 2 +-
- 3 files changed, 14 insertions(+), 1 deletion(-)
+ 3 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/functions/packages.sh b/functions/packages.sh
-index 9e25df5f3..4d1fa695e 100755
+index 2481edc25..a6c2c1e8d 100755
--- a/functions/packages.sh
+++ b/functions/packages.sh
-@@ -60,6 +60,13 @@ Install_packages ()
+@@ -60,6 +60,15 @@ Install_packages ()
Chroot chroot "aptitude install --without-recommends ${APTITUDE_OPTIONS} ${_LB_PACKAGES}"
;;
esac
+
+ # save information about all temporary installed packages and source repos
+ for PACKAGE in ${_LB_PACKAGES}; do
-+ Chroot chroot "apt-cache policy ${PACKAGE}" | sed -n '/\*\*\*/,$p' | grep -P 'http:|https:' -m 1 | awk -v pkg="${PACKAGE}" '{print $2" "$3" "pkg}' >> chroot.packages.all.info
++ INSTALLED_VERSION=$(Chroot chroot "apt-cache policy ${PACKAGE}" | grep 'Installed:' | awk '{print $2}')
++ Chroot chroot "apt-cache policy ${PACKAGE}" | sed -n '/\*\*\*/,$p' | grep -P 'http:|https:' -m 1 | \
++ awk -v pkg="${PACKAGE}" -v version="${INSTALLED_VERSION}" '{print $2" "$3" "pkg" "version}' >> chroot.packages.all.info
+
+ done
+
unset _LB_PACKAGES # Can clear this now
}
-
+
diff --git a/scripts/build/chroot b/scripts/build/chroot
index a0aa10be0..700762e78 100755
--- a/scripts/build/chroot
@@ -35,7 +38,7 @@ index a0aa10be0..700762e78 100755
@@ -48,6 +48,12 @@ for _PASS in install live; do
fi
done
-
+
+# save information about all installed packages and source repos
+Chroot chroot "dpkg-query -W" | while read PACKAGE; do
+ Chroot chroot "apt-cache policy ${PACKAGE}" | sed -n '/\*\*\*/,$p' | grep -P 'http:|https:' -m 1 | awk -v pkg="${PACKAGE}" '{print $2" "$3" "pkg}' >> chroot.packages.all.info
@@ -50,11 +53,12 @@ index 6549fc635..4376d7525 100755
--- a/scripts/build/clean
+++ b/scripts/build/clean
@@ -159,7 +159,7 @@ if [ "${RM_CHROOT}" = "true" ]; then
-
+
rm -rf chroot chroot.tmp
-
+
- rm -f chroot.packages.live chroot.packages.install
+ rm -f chroot.packages.live chroot.packages.install chroot.packages.all.info
rm -f chroot.files
+
+ rm -f "$(Installed_tmp_packages_file)"
- rm -f "$(Installed_tmp_packages_file)" \ No newline at end of file
diff --git a/packages/.gitignore b/packages/.gitignore
index db1547bf..33662f55 100644
--- a/packages/.gitignore
+++ b/packages/.gitignore
@@ -1,6 +1 @@
-*.tar.gz
-*.deb
-*.dsc
-*.buildinfo
-*.changes
-*.git
+/*
diff --git a/packages/aws-gateway-load-balancer-tunnel-handler/.gitignore b/packages/aws-gateway-load-balancer-tunnel-handler/.gitignore
deleted file mode 100644
index a3e428dc..00000000
--- a/packages/aws-gateway-load-balancer-tunnel-handler/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-aws-gwlbtun/
diff --git a/packages/aws-gateway-load-balancer-tunnel-handler/Jenkinsfile b/packages/aws-gateway-load-balancer-tunnel-handler/Jenkinsfile
deleted file mode 100644
index cbf5a407..00000000
--- a/packages/aws-gateway-load-balancer-tunnel-handler/Jenkinsfile
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def timestamp = new Date().format('yyyyMMddhhmmss')
-def commit_id = 'f78058a'
-
-def pkgList = [
- ['name': "aws-gwlbtun-${timestamp}-${commit_id}",
- 'scmCommit': commit_id,
- 'scmUrl': 'https://github.com/aws-samples/aws-gateway-load-balancer-tunnel-handler',
- 'buildCmd': "../build.py --package aws-gwlbtun --version ${timestamp}-${commit_id}"],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('aws-gateway-load-balancer-tunnel-handler', pkgList, null, true, "**/packages/aws-gateway-load-balancer-tunnel-handler/**")
diff --git a/packages/aws-gateway-load-balancer-tunnel-handler/build.py b/packages/aws-gateway-load-balancer-tunnel-handler/build.py
deleted file mode 100755
index a8c75aab..00000000
--- a/packages/aws-gateway-load-balancer-tunnel-handler/build.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python3
-
-from argparse import ArgumentParser
-from pathlib import Path
-from subprocess import run
-
-
-def prepare_package() -> None:
- """Prepare a package
- """
- install_file = Path('./debian/install')
- install_data = 'obj-*-linux-gnu/gwlbtun usr/sbin'
- install_file.touch()
- install_file.write_text(install_data)
-
-
-def build_package(package_name: str, package_ver: str) -> bool:
- """Build a package using commands from external file
-
- Args:
- package_name (str): package name
- package_ver (str): package version
-
- Returns:
- bool: build status
- """
- # prepare sources
- debmake_cmd = [
- 'debmake', '-e', 'support@vyos.io', '-f', 'VyOS Support', '-p',
- package_name, '-u', package_ver, '-t'
- ]
- run(debmake_cmd)
-
- prepare_package()
-
- # build a package
- run('debuild')
-
- return True
-
-
-# build a package
-if __name__ == '__main__':
- # prepare argument parser
- arg_parser = ArgumentParser()
- arg_parser.add_argument('--package',
- required=True,
- help='Package name to build')
- arg_parser.add_argument('--version',
- required=True,
- help='Version for the package')
- args = arg_parser.parse_args()
-
- if not build_package(args.package, args.version):
- exit(1)
-
- exit()
diff --git a/packages/ddclient/.gitignore b/packages/ddclient/.gitignore
deleted file mode 100644
index 600e4cd1..00000000
--- a/packages/ddclient/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-ddclient/
diff --git a/packages/ddclient/Jenkinsfile b/packages/ddclient/Jenkinsfile
deleted file mode 100644
index b297db47..00000000
--- a/packages/ddclient/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2023-2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'ddclient',
- 'scmCommit': 'debian/3.11.2-1',
- 'scmUrl': 'https://salsa.debian.org/debian/ddclient',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; dpkg-buildpackage -uc -us -tc -b'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('ddclient', pkgList, null, null, "**/packages/ddclient/**")
diff --git a/packages/dropbear/.gitignore b/packages/dropbear/.gitignore
deleted file mode 100644
index 3f3a2a1c..00000000
--- a/packages/dropbear/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-dropbear/
diff --git a/packages/dropbear/Jenkinsfile b/packages/dropbear/Jenkinsfile
deleted file mode 100644
index 539d7578..00000000
--- a/packages/dropbear/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2022-2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'dropbear',
- 'scmCommit': 'debian/2022.83-1+deb12u1',
- 'scmUrl': 'https://salsa.debian.org/debian/dropbear.git',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('dropbear', pkgList, null, true, "**/packages/dropbear/**")
diff --git a/packages/dropbear/build.sh b/packages/dropbear/build.sh
deleted file mode 100755
index 9376fa7a..00000000
--- a/packages/dropbear/build.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=dropbear
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-PATCH_DIR=${CWD}/patches
-if [ -d $PATCH_DIR ]; then
- for patch in $(ls ${PATCH_DIR})
- do
- echo "I: Apply patch: ${patch} to main repository"
- cp ${PATCH_DIR}/${patch} ${SRC}/debian/patches/
- echo ${patch} >> ${SRC}/debian/patches/series
- done
-fi
-
-cd ${SRC}
-
-echo "I: Installing build dependencies"
-sudo apt-get install -y libpam0g-dev
-
-echo "I: Build Debian Package"
-dpkg-buildpackage -uc -us -tc -b
diff --git a/packages/ethtool/.gitignore b/packages/ethtool/.gitignore
deleted file mode 100644
index 5967d5de..00000000
--- a/packages/ethtool/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-ethtool/
diff --git a/packages/ethtool/Jenkinsfile b/packages/ethtool/Jenkinsfile
deleted file mode 100644
index bddd3b63..00000000
--- a/packages/ethtool/Jenkinsfile
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) 2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'ethtool',
- 'scmCommit': 'debian/1%6.6-1',
- 'scmUrl': 'https://salsa.debian.org/kernel-team/ethtool',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; dpkg-buildpackage -uc -us -tc -b'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('ethtool', pkgList, null, true, "**/packages/ethtool/**")
diff --git a/packages/frr/.gitignore b/packages/frr/.gitignore
deleted file mode 100644
index 8afd14e8..00000000
--- a/packages/frr/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-frr/
-rtrlib/
-libyang/
diff --git a/packages/frr/Jenkinsfile b/packages/frr/Jenkinsfile
deleted file mode 100644
index 441b1681..00000000
--- a/packages/frr/Jenkinsfile
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (C) 2020-2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'libyang',
- 'scmCommit': 'v2.1.148',
- 'scmUrl': 'https://github.com/CESNET/libyang.git',
- 'buildCmd': 'pipx run apkg build -i && find pkg/pkgs -type f -name *.deb -exec mv -t .. {} +'],
- ['name': 'rtrlib',
- 'scmCommit': 'v0.8.0',
- 'scmUrl': 'https://github.com/rtrlib/rtrlib.git',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; dpkg-buildpackage -uc -us -tc -b'],
- ['name': 'frr',
- 'scmCommit': 'stable/9.1',
- 'scmUrl': 'https://github.com/FRRouting/frr.git',
- 'buildCmd': 'sudo dpkg -i ../*.deb; sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; cd ..; ./build-frr.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('FRRouting', pkgList, null, true, "**/packages/frr/**")
diff --git a/packages/frr/build-frr.sh b/packages/frr/build-frr.sh
deleted file mode 100755
index 7171a883..00000000
--- a/packages/frr/build-frr.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-FRR_SRC=frr
-
-if [ ! -d ${FRR_SRC} ]; then
- echo "FRR source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-# VyOS requires some small FRR Patches - apply them here
-# It's easier to habe them here and make use of the upstream
-# repository instead of maintaining a full Fork.
-# Saving time/resources is essential :-)
-cd ${FRR_SRC}
-
-PATCH_DIR=${CWD}/patches
-if [ -d $PATCH_DIR ]; then
- echo "I: Apply FRRouting patches not in main repository:"
- for patch in $(ls ${PATCH_DIR})
- do
- if [ -z "$(git config --list | grep -e user.name -e user.email)" ]; then
- # if git user.name and user.email is not set, -c sets temorary user.name and
- # user.email variables as these is not set in the build container by default.
- OPTS="-c user.name=VyOS-CI -c user.email=maintainers@vyos.io"
- fi
- git $OPTS am --committer-date-is-author-date ${PATCH_DIR}/${patch}
- done
-fi
-
-echo "I: Ensure Debian build dependencies are met"
-sudo apt-get -y install chrpath gawk install-info libcap-dev libjson-c-dev librtr-dev
-sudo apt-get -y install libpam-dev libprotobuf-c-dev libpython3-dev:native python3-sphinx:native libsnmp-dev protobuf-c-compiler python3-dev:native texinfo lua5.3
-
-# Build Debian FRR package
-echo "I: Build Debian FRR Package"
-# extract "real" git commit for FRR version identifier
-dch -v "$(git describe | cut -c5-)" "VyOS build - FRR"
-dpkg-buildpackage -us -uc -tc -b -Ppkg.frr.rtrlib,pkg.frr.lua
diff --git a/packages/hostap/.gitignore b/packages/hostap/.gitignore
deleted file mode 100644
index d0133c0d..00000000
--- a/packages/hostap/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-hostap/
-wpa/
diff --git a/packages/hostap/Jenkinsfile b/packages/hostap/Jenkinsfile
deleted file mode 100644
index 7eeff1bb..00000000
--- a/packages/hostap/Jenkinsfile
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (C) 2022-2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'wpa',
- 'scmCommit': 'debian/2%2.10-12',
- 'scmUrl': 'https://salsa.debian.org/debian/wpa',
- 'buildCmd': '/bin/true'],
- ['name': 'hostap',
- 'scmCommit': 'e7172e26d',
- 'scmUrl': 'git://w1.fi/srv/git/hostap.git',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('hostap', pkgList, null, true, "**/packages/hostap/**")
diff --git a/packages/hostap/build.sh b/packages/hostap/build.sh
deleted file mode 100755
index c356672a..00000000
--- a/packages/hostap/build.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-CWD=$(pwd)
-set -e
-
-SRC=hostap
-SRC_DEB=wpa
-
-if [ ! -d ${SRC} ]; then
- echo "${SRC} directory does not exists, please 'git clone'"
- exit 1
-fi
-if [ ! -d ${SRC_DEB} ]; then
- echo "${SRC_DEB} directory does not exists, please 'git clone'"
- exit 1
-fi
-
-echo "I: Copy Debian build instructions"
-cp -a ${SRC_DEB}/debian ${SRC}
-# Preserve Debian's default of allowing TLSv1.0 and legacy renegotiation for
-# compatibility with networks that use legacy crypto
-cat > ${SRC}/debian/patches/series << EOF
-allow-tlsv1.patch
-allow-legacy-renegotiation.patch
-EOF
-
-# Build Debian package
-cd ${SRC}
-
-echo "I: Ensure Debian build dependencies are met"
-sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends" -Ppkg.wpa.nogui,noudeb
-
-echo "I: Create new Debian Package version"
-version="$(git describe --tags | tr _ .)"
-dch -v ${version:7} "New version to support AES-GCM-256 for MACsec" -b
-
-echo "I: Build Debian hostap Package"
-DEB_CPPFLAGS_SET="-Wno-use-after-free -Wno-deprecated-declarations" \
- dpkg-buildpackage -us -uc -tc -b -Ppkg.wpa.nogui,noudeb
diff --git a/packages/hsflowd/.gitignore b/packages/hsflowd/.gitignore
deleted file mode 100644
index b3786b97..00000000
--- a/packages/hsflowd/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-host-sflow/
diff --git a/packages/hsflowd/Jenkinsfile b/packages/hsflowd/Jenkinsfile
deleted file mode 100644
index bb9dd68d..00000000
--- a/packages/hsflowd/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'host-sflow',
- 'scmCommit': 'v2.0.55-1',
- 'scmUrl': 'https://github.com/sflow/host-sflow.git',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('hsflowd', pkgList, null, true, "**/packages/hsflowd/**")
diff --git a/packages/hsflowd/build.sh b/packages/hsflowd/build.sh
deleted file mode 100755
index 0b00c998..00000000
--- a/packages/hsflowd/build.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-CWD=$(pwd)
-set -e
-
-SRC=host-sflow
-
-if [ ! -d ${SRC} ]; then
- echo "source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-
-echo "I: Ensure Debian build dependencies are met"
-sudo apt-get install -y libpcap0.8-dev
-
-# Build hsflowd
-# make deb FEATURES="NFLOG PCAP TCP DOCKER KVM OVS DBUS SYSTEMD DROPMON PSAMPLE DENT CONTAINERD"
-echo "I: Build VyOS hsflowd Package"
-make deb FEATURES="PCAP DROPMON DBUS"
-
-# hsflowd builds ARM package as aarch64 extension, rename to arm64
-for file in *.deb ; do mv $file ${file//aarch64/arm64} || true ; done
-
-# Do not confuse *.deb upload logic by removing build in debian packages ...
-# ugly but works
-find src -name "*.deb" -type f -exec rm {} \;
diff --git a/packages/isc-dhcp/.gitignore b/packages/isc-dhcp/.gitignore
deleted file mode 100644
index d2c5a2db..00000000
--- a/packages/isc-dhcp/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-isc-dhcp/
diff --git a/packages/isc-dhcp/Jenkinsfile b/packages/isc-dhcp/Jenkinsfile
deleted file mode 100644
index 02af15d9..00000000
--- a/packages/isc-dhcp/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'isc-dhcp',
- 'scmCommit': 'debian/4.4.3-P1-4',
- 'scmUrl': 'https://salsa.debian.org/debian/isc-dhcp',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('isc-dhcp', pkgList, null, true, "**/packages/isc-dhcp/**")
diff --git a/packages/isc-dhcp/build.sh b/packages/isc-dhcp/build.sh
deleted file mode 100755
index 0d206153..00000000
--- a/packages/isc-dhcp/build.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=isc-dhcp
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-PATCH_DIR=${CWD}/patches
-for patch in $(ls ${PATCH_DIR})
-do
- echo "I: Copy patch: ${PATCH_DIR}/${patch}"
- cp ${PATCH_DIR}/${patch} debian/patches/${patch}
- echo ${patch} >> debian/patches/series
-done
-
-echo "I: Build Debian Package"
-dpkg-buildpackage -uc -us -tc -b -d
diff --git a/packages/kea/.gitignore b/packages/kea/.gitignore
deleted file mode 100644
index 8a9161fe..00000000
--- a/packages/kea/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-isc-kea/
diff --git a/packages/kea/Jenkinsfile b/packages/kea/Jenkinsfile
deleted file mode 100644
index d5b20040..00000000
--- a/packages/kea/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'isc-kea',
- 'scmCommit': 'debian/2.4.1-1',
- 'scmUrl': 'https://salsa.debian.org/debian/isc-kea',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('ISC Kea', pkgList, null, true, "**/packages/kea/**")
diff --git a/packages/kea/build.sh b/packages/kea/build.sh
deleted file mode 100755
index ec46d293..00000000
--- a/packages/kea/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=isc-kea
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-PATCH_DIR=${CWD}/patches
-for patch in $(ls ${PATCH_DIR})
-do
- echo "I: Apply patch: ${PATCH_DIR}/${patch}"
- patch -p1 < ${PATCH_DIR}/${patch}
-done
-
-echo "I: Build Debian Package"
-dpkg-buildpackage -uc -us -tc -b -d
diff --git a/packages/keepalived/.gitignore b/packages/keepalived/.gitignore
deleted file mode 100644
index 9503bdbd..00000000
--- a/packages/keepalived/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-keepalived/
diff --git a/packages/keepalived/Jenkinsfile b/packages/keepalived/Jenkinsfile
deleted file mode 100644
index 0d886751..00000000
--- a/packages/keepalived/Jenkinsfile
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def package_name = 'keepalived'
-
-def pkgList = [
- ['name': "${package_name}",
- 'scmCommit': 'debian/1%2.2.8-1',
- 'scmUrl': 'https://salsa.debian.org/debian/pkg-keepalived.git',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; ../build.py'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage("${package_name}", pkgList, null, true, "**/packages/${package_name}/**")
diff --git a/packages/keepalived/build.py b/packages/keepalived/build.py
deleted file mode 100755
index 04f4791b..00000000
--- a/packages/keepalived/build.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python3
-
-from pathlib import Path
-from shutil import copy as copy_file
-from subprocess import run
-
-
-# copy patches
-def apply_deb_patches() -> None:
- """Apply patches to sources directory
- """
- patches_dir = Path('../patches')
- current_dir: str = Path.cwd().as_posix()
- if patches_dir.exists():
- patches_list = list(patches_dir.iterdir())
- patches_list.sort()
- Path(f'{current_dir}/debian/patches').mkdir(parents=True, exist_ok=True)
- series_file = Path(f'{current_dir}/debian/patches/series')
- series_data = ''
- for patch_file in patches_list:
- print(f'Applying patch: {patch_file.name}')
- copy_file(patch_file, f'{current_dir}/debian/patches/')
- if series_file.exists():
- series_data: str = series_file.read_text()
- series_data = f'{series_data}\n{patch_file.name}'
- series_file.write_text(series_data)
-
-
-def build_package() -> bool:
- """Build a package
-
- Returns:
- bool: build status
- """
- build_cmd: list[str] = ['dpkg-buildpackage', '-uc', '-us', '-tc', '-b']
- build_status: int = run(build_cmd).returncode
-
- if build_status:
- return False
- return True
-
-
-# build a package
-if __name__ == '__main__':
- apply_deb_patches()
-
- if not build_package():
- exit(1)
-
- exit()
diff --git a/packages/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch b/packages/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch
deleted file mode 100644
index b099dc7b..00000000
--- a/packages/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch
+++ /dev/null
@@ -1,129 +0,0 @@
-From af4aa758c3512bec8233549e138b03741c5404f9 Mon Sep 17 00:00:00 2001
-From: Quentin Armitage <quentin@armitage.org.uk>
-Date: Sat, 14 Oct 2023 15:37:19 +0100
-Subject: [PATCH] vrrp: Set sysctl arp_ignore to 1 on IPv6 VMACs
-
-Setting arp_ignore to 1 ensures that the VMAC interface does not respond
-to ARP requests for IPv4 addresses not configured on the VMAC.
-
-Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
----
- keepalived/include/vrrp_if_config.h | 2 +-
- keepalived/vrrp/vrrp_if_config.c | 28 ++++++++++++++++++++--------
- keepalived/vrrp/vrrp_vmac.c | 5 ++---
- 3 files changed, 23 insertions(+), 12 deletions(-)
-
-diff --git a/keepalived/include/vrrp_if_config.h b/keepalived/include/vrrp_if_config.h
-index 35465cd..c35e56e 100644
---- a/keepalived/include/vrrp_if_config.h
-+++ b/keepalived/include/vrrp_if_config.h
-@@ -34,7 +34,7 @@ extern void set_promote_secondaries(interface_t*);
- extern void reset_promote_secondaries(interface_t*);
- #ifdef _HAVE_VRRP_VMAC_
- extern void restore_rp_filter(void);
--extern void set_interface_parameters(const interface_t*, interface_t*);
-+extern void set_interface_parameters(const interface_t*, interface_t*, sa_family_t);
- extern void reset_interface_parameters(interface_t*);
- extern void link_set_ipv6(const interface_t*, bool);
- #endif
-diff --git a/keepalived/vrrp/vrrp_if_config.c b/keepalived/vrrp/vrrp_if_config.c
-index cfce7e2..fbfd34c 100644
---- a/keepalived/vrrp/vrrp_if_config.c
-+++ b/keepalived/vrrp/vrrp_if_config.c
-@@ -81,6 +81,11 @@ static sysctl_opts_t vmac_sysctl[] = {
- { 0, 0}
- };
-
-+static sysctl_opts_t vmac_sysctl_6[] = {
-+ { IPV4_DEVCONF_ARP_IGNORE, 1 },
-+ { 0, 0}
-+};
-+
- #endif
- #endif
-
-@@ -216,11 +221,14 @@ netlink_set_interface_flags(unsigned ifindex, const sysctl_opts_t *sys_opts)
-
- #ifdef _HAVE_VRRP_VMAC_
- static inline int
--netlink_set_interface_parameters(const interface_t *ifp, interface_t *base_ifp)
-+netlink_set_interface_parameters(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
-- if (netlink_set_interface_flags(ifp->ifindex, vmac_sysctl))
-+ if (netlink_set_interface_flags(ifp->ifindex, family == AF_INET6 ? vmac_sysctl_6 : vmac_sysctl))
- return -1;
-
-+ if (family == AF_INET6)
-+ return 0;
-+
- /* If the underlying interface is a MACVLAN that has been moved into
- * a separate network namespace from the parent, we can't access the
- * parent. */
-@@ -271,9 +279,9 @@ netlink_reset_interface_parameters(const interface_t* ifp)
- }
-
- static inline void
--set_interface_parameters_devconf(const interface_t *ifp, interface_t *base_ifp)
-+set_interface_parameters_devconf(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
-- if (netlink_set_interface_parameters(ifp, base_ifp))
-+ if (netlink_set_interface_parameters(ifp, base_ifp, family))
- log_message(LOG_INFO, "Unable to set parameters for %s", ifp->ifname);
- }
-
-@@ -310,11 +318,15 @@ reset_promote_secondaries_devconf(interface_t *ifp)
-
- #ifdef _HAVE_VRRP_VMAC_
- static inline void
--set_interface_parameters_sysctl(const interface_t *ifp, interface_t *base_ifp)
-+set_interface_parameters_sysctl(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
- unsigned val;
-
- set_sysctl("net/ipv4/conf", ifp->ifname, "arp_ignore", 1);
-+
-+ if (family == AF_INET6)
-+ return;
-+
- set_sysctl("net/ipv4/conf", ifp->ifname, "accept_local", 1);
- set_sysctl("net/ipv4/conf", ifp->ifname, "rp_filter", 0);
-
-@@ -524,15 +536,15 @@ restore_rp_filter(void)
- }
-
- void
--set_interface_parameters(const interface_t *ifp, interface_t *base_ifp)
-+set_interface_parameters(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
- if (all_rp_filter == UINT_MAX)
- clear_rp_filter();
-
- #ifdef _HAVE_IPV4_DEVCONF_
-- set_interface_parameters_devconf(ifp, base_ifp);
-+ set_interface_parameters_devconf(ifp, base_ifp, family);
- #else
-- set_interface_parameters_sysctl(ifp, base_ifp);
-+ set_interface_parameters_sysctl(ifp, base_ifp, family);
- #endif
- }
-
-diff --git a/keepalived/vrrp/vrrp_vmac.c b/keepalived/vrrp/vrrp_vmac.c
-index e5ff0e9..021953a 100644
---- a/keepalived/vrrp/vrrp_vmac.c
-+++ b/keepalived/vrrp/vrrp_vmac.c
-@@ -407,10 +407,9 @@ netlink_link_add_vmac(vrrp_t *vrrp, const interface_t *old_interface)
- if (!ifp->ifindex)
- return false;
-
-- if (vrrp->family == AF_INET && create_interface) {
-+ if (create_interface) {
- /* Set the necessary kernel parameters to make macvlans work for us */
--// If this saves current base_ifp's settings, we need to be careful if multiple VMACs on same i/f
-- set_interface_parameters(ifp, ifp->base_ifp);
-+ set_interface_parameters(ifp, ifp->base_ifp, vrrp->family);
- }
-
- #ifdef _WITH_FIREWALL_
---
-2.34.1
-
diff --git a/packages/linux-kernel/.gitignore b/packages/linux-kernel/.gitignore
deleted file mode 100644
index a3e9257c..00000000
--- a/packages/linux-kernel/.gitignore
+++ /dev/null
@@ -1,27 +0,0 @@
-/linux
-/wireguard
-/wireguard-linux-compat
-/accel-ppp
-/intel-qat
-/linux-firmware
-/vyos-drivers-intel*
-/vyos-drivers-realtek*
-/ovpn-dco
-/nat-rtsp
-/jool*
-/qat*
-/QAT*
-*.tar.xz
-/*.postinst
-
-# Intel Driver source
-i40e-*/
-igb-*/
-ixgbe-*/
-ixgbevf-*/
-vyos-intel-*/
-vyos-linux-firmware*/
-kernel-vars
-r8152-*.tar.bz2
-/MLNX_OFED_SRC*
-/vyos-mellanox-ofed*
diff --git a/packages/linux-kernel/Jenkinsfile b/packages/linux-kernel/Jenkinsfile
deleted file mode 100644
index c354200e..00000000
--- a/packages/linux-kernel/Jenkinsfile
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (C) 2020-2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- // The Linux Kernel
- ['name': 'kernel',
- 'buildCmd': '''
- # all scripts must be executed one level above ...
- cd ..
-
- # read the required Kernel version
- KERNEL_VER=\$(cat ../../data/defaults.toml | tomlq -r .kernel_version)
- gpg2 --locate-keys torvalds@kernel.org gregkh@kernel.org
- curl -OL https://www.kernel.org/pub/linux/kernel/v6.x/linux-${KERNEL_VER}.tar.xz
- curl -OL https://www.kernel.org/pub/linux/kernel/v6.x/linux-${KERNEL_VER}.tar.sign
- xz -cd linux-${KERNEL_VER}.tar.xz | gpg2 --verify linux-${KERNEL_VER}.tar.sign -
- if [ $? -ne 0 ]; then
- exit 1
- fi
-
- # Unpack Kernel source
- tar xf linux-${KERNEL_VER}.tar.xz
- ln -s linux-${KERNEL_VER} linux
- # ... Build Kernel
- ./build-kernel.sh
- '''],
-
- // Firmware
- ['name': 'linux-firmware', 'scmCommit': '20240610',
- 'scmUrl': 'https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git',
- 'buildCmd': 'cd ..; ./build-linux-firmware.sh'],
-
- // Accel-PPP
- ['name': 'accel-ppp', 'scmCommit': '1.13.0',
- 'scmUrl': 'https://github.com/accel-ppp/accel-ppp.git',
- 'buildCmd': 'cd ..; pwd; ls -al; ./build-accel-ppp.sh'],
-
- // Intel QAT
- ['name': 'qat', 'buildCmd': 'cd ..; ./build-intel-qat.sh'],
-
- // Intel IXGBE
- ['name': 'ixgbe', 'buildCmd': 'cd ..; ./build-intel-ixgbe.sh'],
-
- // Intel IXGBEVF
- ['name': 'ixgbevf', 'buildCmd': 'cd ..; ./build-intel-ixgbevf.sh'],
-
- // Mellanox OFED
- ['name': 'ofed', 'buildCmd': 'cd ..; sudo ./build-mellanox-ofed.sh'],
-
- // Jool
- ['name': 'jool', 'buildCmd': 'cd ..; ./build-jool.py'],
-
- // OpenVPN DCO
- ['name': 'ovpn-dco','scmCommit': 'v0.2.20231117',
- 'scmUrl': 'https://github.com/OpenVPN/ovpn-dco',
- 'buildCmd': 'cd ..; ./build-openvpn-dco.sh'],
-
- // RTSP netfilter helper
- ['name': 'nat-rtsp', 'scmCommit': '475af0a',
- 'scmUrl': 'https://github.com/maru-sama/rtsp-linux.git',
- 'buildCmd': 'cd ..; ./build-nat-rtsp.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('Kernel', pkgList, null, true, "**/packages/linux-kernel/**")
diff --git a/packages/linux-kernel/README.md b/packages/linux-kernel/README.md
deleted file mode 100644
index ee9a5175..00000000
--- a/packages/linux-kernel/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# About
-
-VyOS runs on a custom Linux Kernel (which is 4.19) at the time of this writing.
-This repository holds a Jenkins Pipeline which is used to build the Custom
-Kernel (x86_64/amd64 at the moment) and all required out-of tree modules.
-
-VyOS does not utilize the build in Intel Kernel drivers for its NICs as those
-Kernels sometimes lack features e.g. configurable receive-side-scaling queues.
-On the other hand we ship additional not mainlined features as WireGuard VPN.
-
-## Kernel
-
-The Kernel is build from the vanilla repositories hosted at https://git.kernel.org.
-VyOS requires two additional patches to work which are stored in the patches/kernel
-folder.
-
-### Config
-
-The Kernel configuration used is [x86_64_vyos_defconfig](x86_64_vyos_defconfig)
-which will be copied on demand during the Pipeline run into the `arch/x86/configs`i
-direcotry of the Kernel source tree.
-
-Other configurations can be added in the future easily.
-
-### Modules
-
-VyOS utilizes several Out-of-Tree modules (e.g. WireGuard, Accel-PPP and Intel
-network interface card drivers). Module source code is retrieved from the
-upstream repository and - when needed - patched so it can be build using this
-pipeline.
-
-In the past VyOS maintainers had a fork of the Linux Kernel, WireGuard and
-Accel-PPP. This is fine but increases maintenance effort. By utilizing vanilla
-repositories upgrading to new versions is very easy - only the branch/commit/tag
-used when cloning the repository via [Jenkinsfile](Jenkinsfile) needs to be
-adjusted.
diff --git a/packages/linux-kernel/build-accel-ppp.sh b/packages/linux-kernel/build-accel-ppp.sh
deleted file mode 100755
index 1685ff8d..00000000
--- a/packages/linux-kernel/build-accel-ppp.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-ACCEL_SRC=${CWD}/accel-ppp
-if [ ! -d ${ACCEL_SRC} ]; then
- echo "Accel-PPP source not found"
- exit 1
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-PATCH_DIR=${CWD}/patches/accel-ppp
-if [ -d $PATCH_DIR ]; then
- cd ${ACCEL_SRC}
- for patch in $(ls ${PATCH_DIR})
- do
- echo "I: Apply patch: ${PATCH_DIR}/${patch}"
- patch -p1 < ${PATCH_DIR}/${patch}
- done
-fi
-
-. ${KERNEL_VAR_FILE}
-mkdir -p ${ACCEL_SRC}/build
-cd ${ACCEL_SRC}/build
-
-echo "I: Build Accel-PPP Debian package"
-cmake -DBUILD_IPOE_DRIVER=TRUE \
- -DBUILD_VLAN_MON_DRIVER=TRUE \
- -DCMAKE_INSTALL_PREFIX=/usr \
- -DKDIR=${KERNEL_DIR} \
- -DLUA=5.3 \
- -DMODULES_KDIR=${KERNEL_VERSION}${KERNEL_SUFFIX} \
- -DCPACK_TYPE=Debian12 ..
-make
-cpack -G DEB
-
-# rename resulting Debian package according git description
-mv accel-ppp*.deb ${CWD}/accel-ppp_$(git describe --always --tags)_$(dpkg --print-architecture).deb
diff --git a/packages/linux-kernel/build-intel-ixgbe.sh b/packages/linux-kernel/build-intel-ixgbe.sh
deleted file mode 100755
index ab44f551..00000000
--- a/packages/linux-kernel/build-intel-ixgbe.sh
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-if ! dpkg-architecture -iamd64; then
- echo "Intel ixgbe is only buildable on amd64 platforms"
- exit 0
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-url="https://sourceforge.net/projects/e1000/files/ixgbe%20stable/5.20.3/ixgbe-5.20.3.tar.gz"
-
-cd ${CWD}
-
-DRIVER_FILE=$(basename ${url} | sed -e s/tar_0/tar/)
-DRIVER_DIR="${DRIVER_FILE%.tar.gz}"
-DRIVER_NAME="ixgbe"
-DRIVER_VERSION=$(echo ${DRIVER_DIR} | awk -F${DRIVER_NAME} '{print $2}' | sed 's/^-//')
-DRIVER_VERSION_EXTRA=""
-
-# Build up Debian related variables required for packaging
-DEBIAN_ARCH=$(dpkg --print-architecture)
-DEBIAN_DIR="${CWD}/vyos-intel-${DRIVER_NAME}_${DRIVER_VERSION}_${DEBIAN_ARCH}"
-DEBIAN_CONTROL="${DEBIAN_DIR}/DEBIAN/control"
-DEBIAN_POSTINST="${CWD}/vyos-intel-ixgbe.postinst"
-
-# Fetch Intel driver source from SourceForge
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-curl -L -o ${DRIVER_FILE} ${url}
-if [ "$?" -ne "0" ]; then
- exit 1
-fi
-
-# Unpack archive
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-mkdir -p ${DRIVER_DIR}
-tar -C ${DRIVER_DIR} --strip-components=1 -xf ${DRIVER_FILE}
-
-cd ${DRIVER_DIR}/src
-if [ -z $KERNEL_DIR ]; then
- echo "KERNEL_DIR not defined"
- exit 1
-fi
-
-# See https://lore.kernel.org/lkml/f90837d0-810e-5772-7841-28d47c44d260@intel.com/
-echo "I: remove pci_enable_pcie_error_reporting() code no longer present in Kernel"
-sed -i '/.*pci_disable_pcie_error_reporting(pdev);/d' ixgbe_main.c
-sed -i '/.*pci_enable_pcie_error_reporting(pdev);/d' ixgbe_main.c
-
-# See https://vyos.dev/T6155
-echo "I: always enable allow_unsupported_sfp for all NICs by default"
-patch -l -p1 < ../../patches/ixgbe/allow_unsupported_sfp.patch
-
-# See https://vyos.dev/T6162
-echo "I: add 1000BASE-BX support"
-patch -l -p1 < ../../patches/ixgbe/add_1000base-bx_support.patch
-
-echo "I: Compile Kernel module for Intel ${DRIVER_NAME} driver"
-make KSRC=${KERNEL_DIR} INSTALL_MOD_PATH=${DEBIAN_DIR} INSTALL_FW_PATH=${DEBIAN_DIR} -j $(getconf _NPROCESSORS_ONLN) install
-
-if [ "x$?" != "x0" ]; then
- exit 1
-fi
-
-if [ -f ${DEBIAN_DIR}.deb ]; then
- rm ${DEBIAN_DIR}.deb
-fi
-
-# build Debian package
-echo "I: Building Debian package vyos-intel-${DRIVER_NAME}"
-cd ${CWD}
-
-# delete non required files which are also present in the kernel package
-# und thus lead to duplicated files
-find ${DEBIAN_DIR} -name "modules.*" | xargs rm -f
-
-echo "#!/bin/sh" > ${DEBIAN_POSTINST}
-echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
-
-fpm --input-type dir --output-type deb --name vyos-intel-${DRIVER_NAME} \
- --version ${DRIVER_VERSION} --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Vendor based driver for Intel ${DRIVER_NAME}" \
- --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
- --license "GPL2" -C ${DEBIAN_DIR} --after-install ${DEBIAN_POSTINST}
-
-echo "I: Cleanup ${DRIVER_NAME} source"
-cd ${CWD}
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-if [ -d ${DEBIAN_DIR} ]; then
- rm -rf ${DEBIAN_DIR}
-fi
-if [ -f ${DEBIAN_POSTINST} ]; then
- rm -f ${DEBIAN_POSTINST}
-fi
diff --git a/packages/linux-kernel/build-intel-ixgbevf.sh b/packages/linux-kernel/build-intel-ixgbevf.sh
deleted file mode 100755
index 39803852..00000000
--- a/packages/linux-kernel/build-intel-ixgbevf.sh
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-if ! dpkg-architecture -iamd64; then
- echo "Intel ixgbevf is only buildable on amd64 platforms"
- exit 0
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-url="https://sourceforge.net/projects/e1000/files/ixgbevf%20stable/4.18.9/ixgbevf-4.18.9.tar.gz"
-
-cd ${CWD}
-
-DRIVER_FILE=$(basename ${url} | sed -e s/tar_0/tar/)
-DRIVER_DIR="${DRIVER_FILE%.tar.gz}"
-DRIVER_NAME="ixgbevf"
-DRIVER_VERSION=$(echo ${DRIVER_DIR} | awk -F${DRIVER_NAME} '{print $2}' | sed 's/^-//')
-DRIVER_VERSION_EXTRA=""
-
-# Build up Debian related variables required for packaging
-DEBIAN_ARCH=$(dpkg --print-architecture)
-DEBIAN_DIR="${CWD}/vyos-intel-${DRIVER_NAME}_${DRIVER_VERSION}_${DEBIAN_ARCH}"
-DEBIAN_CONTROL="${DEBIAN_DIR}/DEBIAN/control"
-DEBIAN_POSTINST="${CWD}/vyos-intel-ixgbevf.postinst"
-
-# Fetch Intel driver source from SourceForge
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-curl -L -o ${DRIVER_FILE} ${url}
-if [ "$?" -ne "0" ]; then
- exit 1
-fi
-
-# Unpack archive
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-mkdir -p ${DRIVER_DIR}
-tar -C ${DRIVER_DIR} --strip-components=1 -xf ${DRIVER_FILE}
-
-cd ${DRIVER_DIR}/src
-if [ -z $KERNEL_DIR ]; then
- echo "KERNEL_DIR not defined"
- exit 1
-fi
-
-# See https://lore.kernel.org/lkml/f90837d0-810e-5772-7841-28d47c44d260@intel.com/
-echo "I: remove pci_enable_pcie_error_reporting() code no longer present in Kernel"
-sed -i '/.*pci_disable_pcie_error_reporting(pdev);/d' ixgbevf_main.c
-sed -i '/.*pci_enable_pcie_error_reporting(pdev);/d' ixgbevf_main.c
-
-echo "I: Compile Kernel module for Intel ${DRIVER_NAME} driver"
-make KSRC=${KERNEL_DIR} INSTALL_MOD_PATH=${DEBIAN_DIR} INSTALL_FW_PATH=${DEBIAN_DIR} -j $(getconf _NPROCESSORS_ONLN) install
-
-if [ "x$?" != "x0" ]; then
- exit 1
-fi
-
-if [ -f ${DEBIAN_DIR}.deb ]; then
- rm ${DEBIAN_DIR}.deb
-fi
-
-# build Debian package
-echo "I: Building Debian package vyos-intel-${DRIVER_NAME}"
-cd ${CWD}
-
-# delete non required files which are also present in the kernel package
-# und thus lead to duplicated files
-find ${DEBIAN_DIR} -name "modules.*" | xargs rm -f
-
-echo "#!/bin/sh" > ${DEBIAN_POSTINST}
-echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
-
-fpm --input-type dir --output-type deb --name vyos-intel-${DRIVER_NAME} \
- --version ${DRIVER_VERSION} --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Vendor based driver for Intel ${DRIVER_NAME}" \
- --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
- --license "GPL2" -C ${DEBIAN_DIR} --after-install ${DEBIAN_POSTINST}
-
-echo "I: Cleanup ${DRIVER_NAME} source"
-cd ${CWD}
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-if [ -d ${DEBIAN_DIR} ]; then
- rm -rf ${DEBIAN_DIR}
-fi
-if [ -f ${DEBIAN_POSTINST} ]; then
- rm -f ${DEBIAN_POSTINST}
-fi
diff --git a/packages/linux-kernel/build-intel-qat.sh b/packages/linux-kernel/build-intel-qat.sh
deleted file mode 100755
index 5b0e023f..00000000
--- a/packages/linux-kernel/build-intel-qat.sh
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-if ! dpkg-architecture -iamd64; then
- echo "Intel-QAT is only buildable on amd64 platforms"
- exit 0
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-url="https://dev.packages.vyos.net/source-mirror/QAT.L.4.24.0-00005.tar.gz"
-
-cd ${CWD}
-
-DRIVER_FILE=$(basename ${url} | sed -e s/tar_0/tar/)
-DRIVER_DIR="${DRIVER_FILE%.tar.gz}"
-DRIVER_NAME="QAT"
-DRIVER_NAME_EXTRA="L."
-DRIVER_VERSION=$(echo ${DRIVER_DIR} | awk -F${DRIVER_NAME} '{print $2}' | awk -F${DRIVER_NAME_EXTRA} '{print $2}')
-DRIVER_VERSION_EXTRA="-0"
-
-# Build up Debian related variables required for packaging
-DEBIAN_ARCH=$(dpkg --print-architecture)
-DEBIAN_DIR="${CWD}/vyos-intel-${DRIVER_NAME}_${DRIVER_VERSION}${DRIVER_VERSION_EXTRA}_${DEBIAN_ARCH}"
-DEBIAN_CONTROL="${DEBIAN_DIR}/DEBIAN/control"
-DEBIAN_POSTINST="${CWD}/vyos-intel-qat.postinst"
-
-# Fetch Intel driver source from SourceForge
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-curl -L -o ${DRIVER_FILE} ${url}
-if [ "$?" -ne "0" ]; then
- exit 1
-fi
-
-# Unpack archive
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-mkdir -p ${DRIVER_DIR}
-tar -C ${DRIVER_DIR} -xf ${DRIVER_FILE}
-
-cd ${DRIVER_DIR}
-if [ -z $KERNEL_DIR ]; then
- echo "KERNEL_DIR not defined"
- exit 1
-fi
-
-echo "I: Compile Kernel module for Intel ${DRIVER_NAME} driver"
-mkdir -p \
- ${DEBIAN_DIR}/lib/firmware \
- ${DEBIAN_DIR}/usr/sbin \
- ${DEBIAN_DIR}/usr/lib/x86_64-linux-gnu \
- ${DEBIAN_DIR}/etc/init.d
-KERNEL_SOURCE_ROOT=${KERNEL_DIR} ./configure --enable-kapi --enable-qat-lkcf
-make -j $(getconf _NPROCESSORS_ONLN) all
-make INSTALL_MOD_PATH=${DEBIAN_DIR} INSTALL_FW_PATH=${DEBIAN_DIR} \
- qat-driver-install adf-ctl-all
-
-if [ "x$?" != "x0" ]; then
- exit 1
-fi
-
-cp quickassist/qat/fw/*.bin ${DEBIAN_DIR}/lib/firmware
-cp build/*.so ${DEBIAN_DIR}/usr/lib/x86_64-linux-gnu
-cp build/adf_ctl ${DEBIAN_DIR}/usr/sbin
-cp quickassist/build_system/build_files/qat_service ${DEBIAN_DIR}/etc/init.d
-cp build/usdm_drv.ko ${DEBIAN_DIR}/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/updates/drivers
-chmod 644 ${DEBIAN_DIR}/lib/firmware/*
-chmod 755 ${DEBIAN_DIR}/etc/init.d/* ${DEBIAN_DIR}/usr/local/bin/*
-
-if [ -f ${DEBIAN_DIR}.deb ]; then
- rm ${DEBIAN_DIR}.deb
-fi
-
-# build Debian package
-echo "I: Building Debian package vyos-intel-${DRIVER_NAME}"
-cd ${CWD}
-
-# delete non required files which are also present in the kernel package
-# und thus lead to duplicated files
-find ${DEBIAN_DIR} -name "modules.*" | xargs rm -f
-
-echo "#!/bin/sh" > ${DEBIAN_POSTINST}
-echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
-
-fpm --input-type dir --output-type deb --name vyos-intel-${DRIVER_NAME} \
- --version ${DRIVER_VERSION}${DRIVER_VERSION_EXTRA} --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Vendor based driver for Intel ${DRIVER_NAME}" \
- --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
- --license "GPL2" -C ${DEBIAN_DIR} --after-install ${DEBIAN_POSTINST}
-
-echo "I: Cleanup ${DRIVER_NAME} source"
-cd ${CWD}
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-if [ -d ${DEBIAN_DIR} ]; then
- rm -rf ${DEBIAN_DIR}
-fi
-if [ -f ${DEBIAN_POSTINST} ]; then
- rm -f ${DEBIAN_POSTINST}
-fi
diff --git a/packages/linux-kernel/build-kernel.sh b/packages/linux-kernel/build-kernel.sh
deleted file mode 100755
index f7b0c597..00000000
--- a/packages/linux-kernel/build-kernel.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-CWD=$(pwd)
-KERNEL_SRC=linux
-
-set -e
-
-if [ ! -d ${KERNEL_SRC} ]; then
- echo "Linux Kernel source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-echo "I: Copy Kernel config (x86_64_vyos_defconfig) to Kernel Source"
-cp -rv arch/ ${KERNEL_SRC}/
-
-cd ${KERNEL_SRC}
-
-echo "I: clean modified files"
-git reset --hard HEAD
-
-KERNEL_VERSION=$(make kernelversion)
-KERNEL_SUFFIX=-$(awk -F "= " '/kernel_flavor/ {print $2}' ../../../data/defaults.toml | tr -d \")
-KERNEL_CONFIG=arch/x86/configs/vyos_defconfig
-
-# VyOS requires some small Kernel Patches - apply them here
-# It's easier to habe them here and make use of the upstream
-# repository instead of maintaining a full Kernel Fork.
-# Saving time/resources is essential :-)
-PATCH_DIR=${CWD}/patches/kernel
-for patch in $(ls ${PATCH_DIR})
-do
- echo "I: Apply Kernel patch: ${PATCH_DIR}/${patch}"
- patch -p1 < ${PATCH_DIR}/${patch}
-done
-
-TRUSTED_KEYS_FILE=trusted_keys.pem
-# start with empty key file
-echo -n "" > $TRUSTED_KEYS_FILE
-CERTS=$(find ../../../data/live-build-config/includes.chroot/var/lib/shim-signed/mok -name "*.pem" -type f)
-if [ ! -z "${CERTS}" ]; then
- # add known public keys to Kernel certificate chain
- for file in $CERTS; do
- cat $file >> $TRUSTED_KEYS_FILE
- done
-
- # Force Kernel module signing and embed public keys
- echo "CONFIG_MODULE_SIG_FORMAT=y" >> $KERNEL_CONFIG
- echo "CONFIG_MODULE_SIG=y" >> $KERNEL_CONFIG
- echo "CONFIG_MODULE_SIG_FORCE=y" >> $KERNEL_CONFIG
- echo "# CONFIG_MODULE_SIG_ALL is not set" >> $KERNEL_CONFIG
- echo "CONFIG_MODULE_SIG_SHA512=y" >> $KERNEL_CONFIG
- echo "CONFIG_MODULE_SIG_HASH=\"sha512\"" >> $KERNEL_CONFIG
- echo "CONFIG_MODULE_SIG_KEY=\"\"" >> $KERNEL_CONFIG
- echo "CONFIG_MODULE_SIG_KEY_TYPE_RSA=y" >> $KERNEL_CONFIG
- echo "CONFIG_SYSTEM_TRUSTED_KEYS=\"$TRUSTED_KEYS_FILE\"" >> $KERNEL_CONFIG
-fi
-
-echo "I: make vyos_defconfig"
-# Select Kernel configuration - currently there is only one
-make vyos_defconfig
-
-echo "I: Generate environment file containing Kernel variable"
-cat << EOF >${CWD}/kernel-vars
-#!/bin/sh
-export KERNEL_VERSION=${KERNEL_VERSION}
-export KERNEL_SUFFIX=${KERNEL_SUFFIX}
-export KERNEL_DIR=${CWD}/${KERNEL_SRC}
-EOF
-
-echo "I: Build Debian Kernel package"
-touch .scmversion
-make bindeb-pkg BUILD_TOOLS=1 LOCALVERSION=${KERNEL_SUFFIX} KDEB_PKGVERSION=${KERNEL_VERSION}-1 -j $(getconf _NPROCESSORS_ONLN)
-
-cd $CWD
-if [[ $? == 0 ]]; then
- for package in $(ls linux-*.deb)
- do
- ln -sf linux-kernel/$package ..
- done
-fi
diff --git a/packages/linux-kernel/build-linux-firmware.sh b/packages/linux-kernel/build-linux-firmware.sh
deleted file mode 100755
index 2b1fa7b7..00000000
--- a/packages/linux-kernel/build-linux-firmware.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/bash
-
-# All selected drivers are then precomfiled "make drivers/foo/bar.i" and we grep for
-# the magic word "UNIQUE_ID_firmware" which identifies firmware files.
-
-CWD=$(pwd)
-LINUX_SRC="linux"
-LINUX_FIRMWARE="linux-firmware"
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-if [ ! -d ${LINUX_SRC} ]; then
- echo "Kernel source missing"
- exit 1
-fi
-
-if [ ! -d ${LINUX_FIRMWARE} ]; then
- echo "Linux firmware repository missing"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-result=()
-# Retrieve firmware blobs from source files
-FW_FILES=$(find ${LINUX_SRC}/debian/linux-image/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/kernel/drivers/net -name *.ko | xargs modinfo | grep "^firmware:" | awk '{print $2}')
-
-# Debian package will use the descriptive Git commit as version
-GIT_COMMIT=$(cd ${CWD}/${LINUX_FIRMWARE}; git describe --always)
-VYOS_FIRMWARE_NAME="vyos-linux-firmware"
-VYOS_FIRMWARE_DIR="${VYOS_FIRMWARE_NAME}_${GIT_COMMIT}-0_all"
-if [ -d ${VYOS_FIRMWARE_DIR} ]; then
- # remove Debian package folder and deb file from previous runs
- rm -rf ${VYOS_FIRMWARE_DIR}*
-fi
-mkdir -p ${VYOS_FIRMWARE_DIR}
-
-# Install firmware files to build directory
-LINUX_FIRMWARE_BUILD_DIR="${LINUX_FIRMWARE}_${GIT_COMMIT}"
-
-if [ -d ${LINUX_FIRMWARE_BUILD_DIR} ]; then
- rm -rf "${LINUX_FIRMWARE_BUILD_DIR}"
-fi
-
-mkdir -p "${LINUX_FIRMWARE_BUILD_DIR}"
-
-(
- cd ${LINUX_FIRMWARE}
- ./copy-firmware.sh "${CWD}/${LINUX_FIRMWARE_BUILD_DIR}"
-)
-
-# Copy firmware file from linux firmware build directory into
-# assembly folder for the vyos-firmware package
-SED_REPLACE="s@${CWD}/${LINUX_FIRMWARE}/@@"
-for FILE_PATTERN in ${FW_FILES}; do
- find "${LINUX_FIRMWARE_BUILD_DIR}" -path "*/${FILE_PATTERN}" -print0 | while IFS= read -r -d $'\0' FILE; do
- TARGET="$(echo "${FILE}" | sed "s/${LINUX_FIRMWARE_BUILD_DIR}\///g")"
- TARGET_DIR="${VYOS_FIRMWARE_DIR}/lib/firmware/$(dirname "${TARGET}")"
- # If file is a symlink install the symlink target as well
- if [ -h "${FILE}" ]; then
- if [ ! -f "${TARGET_DIR}/$(basename "${TARGET}")" ]; then
- if [ -f "${LINUX_FIRMWARE_BUILD_DIR}/${TARGET}" ]; then
- mkdir -p "${TARGET_DIR}"
-
- echo "I: install firmware: ${TARGET}"
- cp "${CWD}/${LINUX_FIRMWARE_BUILD_DIR}/${TARGET}" "${TARGET_DIR}"
- # If file links to other folder which this script not cover. Create folder and copy together.
- if [ -L "${LINUX_FIRMWARE_BUILD_DIR}/${TARGET}" ]; then
- REALPATH_TARGET=$(realpath --relative-to="${CWD}/${LINUX_FIRMWARE_BUILD_DIR}" "${CWD}/${LINUX_FIRMWARE_BUILD_DIR}/${TARGET}")
- REALPATH_TARGET_DIR="${VYOS_FIRMWARE_DIR}/lib/firmware/$(dirname "${REALPATH_TARGET}")"
- mkdir -p "${REALPATH_TARGET_DIR}"
- echo "I: install firmware: ${REALPATH_TARGET}"
- cp "${CWD}/${LINUX_FIRMWARE_BUILD_DIR}/${REALPATH_TARGET}" "${REALPATH_TARGET_DIR}"
- fi
- else
- echo "I: firmware file not found: ${TARGET}"
- fi
- fi
- fi
-
- if [ -f "${FILE}" ]; then
- mkdir -p "${TARGET_DIR}"
- echo "I: install firmware: ${TARGET}"
- cp -P "${CWD}/${LINUX_FIRMWARE_BUILD_DIR}/${TARGET}" "${TARGET_DIR}"
- else
- echo "I: firmware file not found: ${TARGET}"
- fi
- done
-done
-
-echo "I: Create linux-firmware package"
-rm -f ${VYOS_FIRMWARE_NAME}_*.deb
-fpm --input-type dir --output-type deb --name ${VYOS_FIRMWARE_NAME} \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Binary firmware for various drivers in the Linux kernel" \
- --architecture all --version ${GIT_COMMIT} --deb-compression gz -C ${VYOS_FIRMWARE_DIR}
-
-rm -rf "${LINUX_FIRMWARE_BUILD_DIR}"
-rm -rf ${VYOS_FIRMWARE_DIR}
diff --git a/packages/linux-kernel/build-nat-rtsp.sh b/packages/linux-kernel/build-nat-rtsp.sh
deleted file mode 100755
index 40018cfb..00000000
--- a/packages/linux-kernel/build-nat-rtsp.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-SRC=${CWD}/nat-rtsp
-if [ ! -d ${SRC} ]; then
- echo "nat-rtsp source not found"
- exit 1
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-cd ${SRC} && make KERNELDIR=$KERNEL_DIR
-
-# Copy binary to package directory
-DEBIAN_DIR=tmp/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/extra
-mkdir -p ${DEBIAN_DIR}
-cp nf_conntrack_rtsp.ko nf_nat_rtsp.ko ${DEBIAN_DIR}
-
-DEBIAN_POSTINST="${CWD}/vyos-nat-rtsp.postinst"
-echo "#!/bin/sh" > ${DEBIAN_POSTINST}
-echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
-
-# Build Debian Package
-fpm --input-type dir --output-type deb --name nat-rtsp \
- --version $(git describe --tags --always) --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Connection tracking and NAT support for RTSP" \
- --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
- --after-install ${DEBIAN_POSTINST} \
- --license "GPL2" --chdir tmp
-
-mv *.deb ..
-
-if [ -f ${DEBIAN_POSTINST} ]; then
- rm -f ${DEBIAN_POSTINST}
-fi
diff --git a/packages/linux-kernel/build-openvpn-dco.sh b/packages/linux-kernel/build-openvpn-dco.sh
deleted file mode 100755
index fd427825..00000000
--- a/packages/linux-kernel/build-openvpn-dco.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-SRC=${CWD}/ovpn-dco
-if [ ! -d ${SRC} ]; then
- echo "OpenVPN DCO source not found"
- exit 1
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-cd ${SRC} && make KERNEL_SRC=$KERNEL_DIR
-
-# Copy binary to package directory
-DEBIAN_DIR=tmp/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/extra
-mkdir -p ${DEBIAN_DIR}
-cp drivers/net/ovpn-dco/ovpn-dco-v2.ko ${DEBIAN_DIR}
-
-# Build Debian Package
-fpm --input-type dir --output-type deb --name openvpn-dco \
- --version $(git describe | sed s/^v//) --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "OpenVPN Data Channel Offload" \
- --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
- --license "GPL2" --chdir tmp
-
-mv *.deb ..
diff --git a/packages/ndppd/.gitignore b/packages/ndppd/.gitignore
deleted file mode 100644
index 0f24798d..00000000
--- a/packages/ndppd/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-ndppd/
diff --git a/packages/ndppd/Jenkinsfile b/packages/ndppd/Jenkinsfile
deleted file mode 100644
index f112ae38..00000000
--- a/packages/ndppd/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2020-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'ndppd',
- 'scmCommit': 'debian/0.2.5-6',
- 'scmUrl': 'https://salsa.debian.org/debian/ndppd',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('ndppd', pkgList, null, true, "**/packages/ndppd/**")
diff --git a/packages/ndppd/build.sh b/packages/ndppd/build.sh
deleted file mode 100755
index 223cf52b..00000000
--- a/packages/ndppd/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=ndppd
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-PATCH_DIR=${CWD}/patches
-for patch in $(ls ${PATCH_DIR})
-do
- echo "I: Apply patch: ${PATCH_DIR}/${patch}"
- patch -p1 < ${PATCH_DIR}/${patch}
-done
-
-echo "I: Build Debian Package"
-dpkg-buildpackage -uc -us -tc -b -d
diff --git a/packages/net-snmp/.gitignore b/packages/net-snmp/.gitignore
deleted file mode 100644
index 3f41bbac..00000000
--- a/packages/net-snmp/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-net-snmp/
diff --git a/packages/net-snmp/Jenkinsfile b/packages/net-snmp/Jenkinsfile
deleted file mode 100644
index a2b21ce5..00000000
--- a/packages/net-snmp/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'net-snmp',
- 'scmCommit': 'debian/5.9.4+dfsg-1',
- 'scmUrl': 'https://salsa.debian.org/debian/net-snmp',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('net-snmp', pkgList, null, true, "**/packages/net-snmp/**")
diff --git a/packages/net-snmp/build.sh b/packages/net-snmp/build.sh
deleted file mode 100755
index ebaeb6eb..00000000
--- a/packages/net-snmp/build.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=net-snmp
-
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-
-PATCH_DIR=${CWD}/patches
-if [ -d $PATCH_DIR ]; then
- echo "I: Apply SNMP patches not in main repository:"
- for patch in $(ls ${PATCH_DIR})
- do
- cp ${PATCH_DIR}/${patch} debian/patches
- echo ${patch} >> debian/patches/series
- done
-fi
-
-echo "I: Build Debian net-snmp Package"
-# We need "|| true" to fix an issue wioth the make system
-#make[2]: Leaving directory '/vyos/vyos-build/packages/net-snmp/net-snmp/snmplib'
-#making clean in /vyos/vyos-build/packages/net-snmp/net-snmp/agent
-#make[2]: Entering directory '/vyos/vyos-build/packages/net-snmp/net-snmp/agent'
-#make[2]: *** No rule to make target 'clean'. Stop.
-dpkg-buildpackage -us -uc -tc -b || true
diff --git a/packages/netfilter/.gitignore b/packages/netfilter/.gitignore
deleted file mode 100644
index 8518afb9..00000000
--- a/packages/netfilter/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/pkg-libnftnl/
-/pkg-nftables/
-
diff --git a/packages/netfilter/Jenkinsfile b/packages/netfilter/Jenkinsfile
deleted file mode 100644
index 45fc6ed8..00000000
--- a/packages/netfilter/Jenkinsfile
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (C) 2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- // libnftnl
- ['name': 'pkg-libnftnl',
- 'scmCommit': 'debian/1.2.6-2',
- 'scmUrl': 'https://salsa.debian.org/pkg-netfilter-team/pkg-libnftnl.git',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; dpkg-buildpackage -uc -us -tc -b'],
-
- // nftables
- ['name': 'pkg-nftables',
- 'scmCommit': 'debian/1.0.9-1',
- 'scmUrl': 'https://salsa.debian.org/pkg-netfilter-team/pkg-nftables.git',
- 'buildCmd': '''sudo dpkg -i ../libnftnl*.deb;
- sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends";
- ../build.py'''],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('Netfilter', pkgList, null, true)
diff --git a/packages/netfilter/build.py b/packages/netfilter/build.py
deleted file mode 100755
index 2851a679..00000000
--- a/packages/netfilter/build.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python3
-
-from pathlib import Path
-from shutil import copy as copy_file
-from subprocess import run
-
-
-# copy patches
-def apply_deb_patches() -> None:
- """Apply patches to sources directory
- """
- package_dir: str = Path.cwd().name
- current_dir: str = Path.cwd().as_posix()
- patches_dir = Path(f'../patches/{package_dir}')
- patches_dir_dst = Path(f'{current_dir}/debian/patches')
- if not patches_dir_dst.exists():
- patches_dir_dst.mkdir(parents = True)
- if patches_dir.exists():
- patches_list = list(patches_dir.iterdir())
- patches_list.sort()
- series_file = Path(f'{patches_dir_dst.as_posix()}/series')
- if series_file.exists():
- series_data: str = series_file.read_text()
- else:
-
- series_data = ''
- for patch_file in patches_list:
- print(f'Applying patch: {patch_file.name}')
- copy_file(patch_file, f'{patches_dir_dst.as_posix()}')
- series_data = f'{series_data}\n{patch_file.name}'
- series_file.write_text(series_data)
-
-
-def build_package() -> bool:
- """Build a package
- Returns:
- bool: build status
- """
- build_cmd: list[str] = ['dpkg-buildpackage', '-uc', '-us', '-tc', '-b']
- build_status: int = run(build_cmd).returncode
-
- if build_status:
- return False
- return True
-
-
-# build a package
-if __name__ == '__main__':
- apply_deb_patches()
-
- if not build_package():
- exit(1)
-
- exit()
-
diff --git a/packages/netfilter/patches/pkg-nftables/0001-meta-fix-hour-decoding.patch b/packages/netfilter/patches/pkg-nftables/0001-meta-fix-hour-decoding.patch
deleted file mode 100644
index dd466f1a..00000000
--- a/packages/netfilter/patches/pkg-nftables/0001-meta-fix-hour-decoding.patch
+++ /dev/null
@@ -1,118 +0,0 @@
-From d392ddf243dcbf8a34726c777d2c669b1e8bfa85 Mon Sep 17 00:00:00 2001
-From: Florian Westphal <fw@strlen.de>
-Date: Thu, 2 Nov 2023 15:34:13 +0100
-Subject: meta: fix hour decoding when timezone offset is negative
-
-Brian Davidson says:
-
- meta hour rules don't display properly after being created when the
- hour is on or after 00:00 UTC. The netlink debug looks correct for
- seconds past midnight UTC, but displaying the rules looks like an
- overflow or a byte order problem. I am in UTC-0400, so today, 20:00
- and later exhibits the problem, while 19:00 and earlier hours are
- fine.
-
-meta.c only ever worked when the delta to UTC is positive.
-We need to add in case the second counter turns negative after
-offset adjustment.
-
-Also add a test case for this.
-
-Fixes: f8f32deda31d ("meta: Introduce new conditions 'time', 'day' and 'hour'")
-Reported-by: Brian Davidson <davidson.brian@gmail.com>
-Signed-off-by: Florian Westphal <fw@strlen.de>
----
- src/meta.c | 11 ++++-
- .../shell/testcases/listing/dumps/meta_time.nodump | 0
- tests/shell/testcases/listing/meta_time | 52 ++++++++++++++++++++++
- 3 files changed, 61 insertions(+), 2 deletions(-)
- create mode 100644 tests/shell/testcases/listing/dumps/meta_time.nodump
- create mode 100755 tests/shell/testcases/listing/meta_time
-
-diff --git a/src/meta.c b/src/meta.c
-index b578d5e2..7846aefe 100644
---- a/src/meta.c
-+++ b/src/meta.c
-@@ -495,9 +495,16 @@ static void hour_type_print(const struct expr *expr, struct output_ctx *octx)
-
- /* Obtain current tm, so that we can add tm_gmtoff */
- ts = time(NULL);
-- if (ts != ((time_t) -1) && localtime_r(&ts, &cur_tm))
-- seconds = (seconds + cur_tm.tm_gmtoff) % SECONDS_PER_DAY;
-+ if (ts != ((time_t) -1) && localtime_r(&ts, &cur_tm)) {
-+ int32_t adj = seconds + cur_tm.tm_gmtoff;
-
-+ if (adj < 0)
-+ adj += SECONDS_PER_DAY;
-+ else if (adj >= SECONDS_PER_DAY)
-+ adj -= SECONDS_PER_DAY;
-+
-+ seconds = adj;
-+ }
- minutes = seconds / 60;
- seconds %= 60;
- hours = minutes / 60;
-diff --git a/tests/shell/testcases/listing/dumps/meta_time.nodump b/tests/shell/testcases/listing/dumps/meta_time.nodump
-new file mode 100644
-index 00000000..e69de29b
-diff --git a/tests/shell/testcases/listing/meta_time b/tests/shell/testcases/listing/meta_time
-new file mode 100755
-index 00000000..a9761998
---- /dev/null
-+++ b/tests/shell/testcases/listing/meta_time
-@@ -0,0 +1,52 @@
-+#!/bin/bash
-+
-+set -e
-+
-+TMP1=$(mktemp)
-+TMP2=$(mktemp)
-+
-+cleanup()
-+{
-+ rm -f "$TMP1"
-+ rm -f "$TMP2"
-+}
-+
-+check_decode()
-+{
-+ TZ=$1 $NFT list chain t c | grep meta > "$TMP2"
-+ diff -u "$TMP1" "$TMP2"
-+}
-+
-+trap cleanup EXIT
-+
-+$NFT -f - <<EOF
-+table t {
-+ chain c {
-+ }
-+}
-+EOF
-+
-+for i in $(seq -w 0 23); do
-+ TZ=UTC $NFT add rule t c meta hour "$i:00"-"$i:59"
-+done
-+
-+# Check decoding in UTC, this mirrors 1:1 what should have been added.
-+for i in $(seq 0 23); do
-+ printf "\t\tmeta hour \"%02d:%02d\"-\"%02d:%02d\"\n" $i 0 $i 59 >> "$TMP1"
-+done
-+
-+check_decode UTC
-+
-+printf "\t\tmeta hour \"%02d:%02d\"-\"%02d:%02d\"\n" 23 0 23 59 > "$TMP1"
-+for i in $(seq 0 22); do
-+ printf "\t\tmeta hour \"%02d:%02d\"-\"%02d:%02d\"\n" $i 0 $i 59 >> "$TMP1"
-+done
-+check_decode UTC+1
-+
-+printf "\t\tmeta hour \"%02d:%02d\"-\"%02d:%02d\"\n" 1 0 1 59 > "$TMP1"
-+for i in $(seq 2 23); do
-+ printf "\t\tmeta hour \"%02d:%02d\"-\"%02d:%02d\"\n" $i 0 $i 59 >> "$TMP1"
-+done
-+printf "\t\tmeta hour \"%02d:%02d\"-\"%02d:%02d\"\n" 0 0 0 59 >> "$TMP1"
-+
-+check_decode UTC-1
---
-cgit v1.2.3
-
diff --git a/packages/opennhrp/.gitignore b/packages/opennhrp/.gitignore
deleted file mode 100644
index cc02948b..00000000
--- a/packages/opennhrp/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-opennhrp/
diff --git a/packages/opennhrp/Jenkinsfile b/packages/opennhrp/Jenkinsfile
deleted file mode 100644
index 637bcecb..00000000
--- a/packages/opennhrp/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'opennhrp',
- 'scmCommit': '613277f',
- 'scmUrl': 'https://git.code.sf.net/p/opennhrp/code',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('opennhrp', pkgList, null, true, "**/packages/opennhrp/**")
diff --git a/packages/opennhrp/build.sh b/packages/opennhrp/build.sh
deleted file mode 100755
index e12d4765..00000000
--- a/packages/opennhrp/build.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-SRC=opennhrp
-if [ ! -d $SRC ]; then
- echo "source directory $SRC does not exist!"
- echo "$ git clone https://git.code.sf.net/p/opennhrp/code opennhrp"
- exit 1
-fi
-cd $SRC
-
-INSTALL_DIR=debian
-if [ -d $INSTALL_DIR ]; then
- rm -rf $INSTALL_DIR
-fi
-
-make clean
-make
-
-install --directory debian/etc debian/usr/sbin
-install --mode 0644 etc/racoon-ph1dead.sh debian/etc
-install --mode 0644 etc/racoon-ph1down.sh debian/etc
-install --strip --mode 0755 nhrp/opennhrp debian/usr/sbin
-install --strip --mode 0755 nhrp/opennhrpctl debian/usr/sbin
-
-# Version' field value 'v0.14-20-g613277f': version number does not start with digit
-# "cut" first character from version string
-fpm --input-type dir --output-type deb --name opennhrp \
- --version $(git describe --always | cut -c2-) --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "NBMA Next Hop Resolution Protocol daemon" \
- --license "MIT" -C $INSTALL_DIR --package ..
diff --git a/packages/openvpn-otp/.gitignore b/packages/openvpn-otp/.gitignore
deleted file mode 100644
index 91d40208..00000000
--- a/packages/openvpn-otp/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-openvpn-otp/
diff --git a/packages/openvpn-otp/Jenkinsfile b/packages/openvpn-otp/Jenkinsfile
deleted file mode 100644
index 9e0de629..00000000
--- a/packages/openvpn-otp/Jenkinsfile
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) 2020-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'openvpn-otp',
- 'scmCommit': 'master',
- 'scmUrl': 'https://github.com/evgeny-gridasov/openvpn-otp',
- 'buildCmd': 'cd ..; ./build-openvpn-otp.sh'],
-]
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('openvpn-otp', pkgList, null, true, "**/packages/openvpn-otp/**")
diff --git a/packages/openvpn-otp/build-openvpn-otp.sh b/packages/openvpn-otp/build-openvpn-otp.sh
deleted file mode 100755
index 6870db64..00000000
--- a/packages/openvpn-otp/build-openvpn-otp.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-set -e
-CWD=$(pwd)
-SRC=openvpn-otp
-
-if [ ! -d ${SRC} ]; then
- echo "source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-for pkt in debhelper libssl-dev openvpn
-do
- dpkg -s $pkt 2>&1 >/dev/null
- if [ $? -ne 0 ]; then
- echo "Package $pkt not installed - required"
- exit 1
- fi
-done
-
-# Build instructions as per https://github.com/evgeny-gridasov/openvpn-otp/blob/master/README.md
-cd ${SRC}
-./autogen.sh
-./configure --prefix=/usr
-make
-
-# install
-mkdir -p usr/lib/openvpn
-cp src/.libs/openvpn-otp.so usr/lib/openvpn
-
-fpm --input-type dir --output-type deb --name openvpn-otp \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "OpenVPN OTP Authentication support." \
- --depends openvpn --architecture $(dpkg --print-architecture) \
- --version $(git describe --tags --always | cut -c2-) --deb-compression gz usr
-
-cp *.deb ${CWD}
diff --git a/packages/owamp/.gitignore b/packages/owamp/.gitignore
deleted file mode 100644
index 0826a5ef..00000000
--- a/packages/owamp/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-owamp/
diff --git a/packages/owamp/Jenkinsfile b/packages/owamp/Jenkinsfile
deleted file mode 100644
index 995c8c4c..00000000
--- a/packages/owamp/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2022 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'owamp',
- 'scmCommit': 'v4.4.6',
- 'scmUrl': 'https://github.com/perfsonar/owamp',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('owamp', pkgList, null, true, "**/packages/owamp/**")
diff --git a/packages/owamp/build.sh b/packages/owamp/build.sh
deleted file mode 100755
index 8c99f02d..00000000
--- a/packages/owamp/build.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh -x
-CWD=$(pwd)
-set -e
-
-SRC=owamp
-
-if [ ! -d ${SRC} ]; then
- echo "source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-echo "I: Retrieve version information from Git"
-# Build owamp-client owamp-server twamp-client twamp-server
-echo "I: Build VyOS owamp Packages"
-dpkg-buildpackage -us -uc -tc -b
diff --git a/packages/pam_tacplus/.gitignore b/packages/pam_tacplus/.gitignore
deleted file mode 100644
index 4c18b4a2..00000000
--- a/packages/pam_tacplus/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-pam_tacplus/
-pam_tacplus-debian/
diff --git a/packages/pam_tacplus/Jenkinsfile b/packages/pam_tacplus/Jenkinsfile
deleted file mode 100644
index 89a5d529..00000000
--- a/packages/pam_tacplus/Jenkinsfile
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (C) 2022 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'pam_tacplus-debian',
- 'scmCommit': '50c6fd7',
- 'scmUrl': 'https://github.com/kravietz/pam_tacplus-debian',
- 'buildCmd': '/bin/true'],
- ['name': 'pam_tacplus',
- 'scmCommit': '4f91b0d',
- 'scmUrl': 'https://github.com/kravietz/pam_tacplus',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('pam_tacplus', pkgList, null, true, "**/packages/pam_tacplus/**")
diff --git a/packages/pam_tacplus/build.sh b/packages/pam_tacplus/build.sh
deleted file mode 100755
index 2aa69014..00000000
--- a/packages/pam_tacplus/build.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=pam_tacplus
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-cd ${SRC}
-cp -a ../pam_tacplus-debian debian
-rm -f debian/compat
-
-sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"
-dpkg-buildpackage -uc -us -tc -b -d
diff --git a/packages/pmacct/.gitignore b/packages/pmacct/.gitignore
deleted file mode 100644
index c2274717..00000000
--- a/packages/pmacct/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-pmacct/
diff --git a/packages/pmacct/Jenkinsfile b/packages/pmacct/Jenkinsfile
deleted file mode 100644
index 19af2c9b..00000000
--- a/packages/pmacct/Jenkinsfile
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def package_name = 'pmacct'
-// "sudo apt-get remove git -y" is necessary for solving this issue https://vyos.dev/T5663
-def pkgList = [
- ['name': "${package_name}",
- 'scmCommit': 'debian/1.7.7-1',
- 'scmUrl': 'https://salsa.debian.org/debian/pmacct.git',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; sudo apt-get remove git -y; ../build.py'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage("${package_name}", pkgList, null, true, "**/packages/pmacct/**")
diff --git a/packages/pmacct/build.py b/packages/pmacct/build.py
deleted file mode 100755
index 0f666392..00000000
--- a/packages/pmacct/build.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python3
-
-from pathlib import Path
-from shutil import copy as copy_file
-from subprocess import run
-
-
-# copy patches
-def apply_deb_patches() -> None:
- """Apply patches to sources directory
- """
- patches_dir = Path('../patches')
- current_dir: str = Path.cwd().as_posix()
- if patches_dir.exists():
- patches_list = list(patches_dir.iterdir())
- patches_list.sort()
- series_file = Path(f'{current_dir}/debian/patches/series')
- series_data = ''
- for patch_file in patches_list:
- print(f'Applying patch: {patch_file.name}')
- copy_file(patch_file, f'{current_dir}/debian/patches/')
- if series_file.exists():
- series_data: str = series_file.read_text()
- series_data = f'{series_data}\n{patch_file.name}'
- series_file.write_text(series_data)
-
-
-def build_package() -> bool:
- """Build a package
-
- Returns:
- bool: build status
- """
- build_cmd: list[str] = ['dpkg-buildpackage', '-uc', '-us', '-tc', '-b']
- build_status: int = run(build_cmd).returncode
-
- if build_status:
- return False
- return True
-
-
-# build a package
-if __name__ == '__main__':
- apply_deb_patches()
-
- if not build_package():
- exit(1)
-
- exit()
diff --git a/packages/podman/.gitignore b/packages/podman/.gitignore
deleted file mode 100644
index 1055abad..00000000
--- a/packages/podman/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-podman/
diff --git a/packages/podman/Jenkinsfile b/packages/podman/Jenkinsfile
deleted file mode 100644
index cc787565..00000000
--- a/packages/podman/Jenkinsfile
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) 2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'podman',
- 'scmCommit': 'v4.9.5',
- 'scmUrl': 'https://github.com/containers/podman',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('podman', pkgList, null, true, "**/packages/podman/**")
diff --git a/packages/podman/build.sh b/packages/podman/build.sh
deleted file mode 100755
index 251f8a48..00000000
--- a/packages/podman/build.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-export PATH=/opt/go/bin:$PATH
-
-SRC=podman
-if [ ! -d $SRC ]; then
- echo "source directory $SRC does not exist!"
- exit 1
-fi
-
-sudo apt-get install -y libseccomp-dev libgpgme-dev
-
-cd $SRC
-
-echo "I: installing dependencies"
-make install.tools
-echo "I: building podman"
-make podman-release
-
-tar xf podman-release-$(dpkg --print-architecture).tar.gz
-# retrieve version number from podman archive folder: podman-v4.9.5/
-# remove leading podman string
-VERSION=$(ls -d podman-v* | cut -c9-)
-
-fpm --input-type dir --output-type deb --name podman \
- --version $VERSION --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Engine to run OCI-based containers in Pods" \
- --depends conmon --depends crun --depends netavark --depends libgpgme11 \
- --depends fuse-overlayfs --depends golang-github-containers-common \
- --license "Apache License 2.0" -C podman-v$VERSION --package ..
-
diff --git a/packages/pyhumps/.gitignore b/packages/pyhumps/.gitignore
deleted file mode 100644
index 0cf480fa..00000000
--- a/packages/pyhumps/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-humps/
diff --git a/packages/pyhumps/Jenkinsfile b/packages/pyhumps/Jenkinsfile
deleted file mode 100644
index 523ffdde..00000000
--- a/packages/pyhumps/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2022 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'humps',
- 'scmCommit': 'v3.8.0',
- 'scmUrl': 'https://github.com/nficano/humps.git',
- 'buildCmd': 'python setup.py --command-packages=stdeb.command bdist_deb; cp deb_dist/*.deb ..'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('humps', pkgList, null, false, "**/packages/pyhumps/**")
diff --git a/packages/radvd/.gitignore b/packages/radvd/.gitignore
deleted file mode 100644
index 142acc2d..00000000
--- a/packages/radvd/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-radvd/
-*.deb
diff --git a/packages/radvd/Jenkinsfile b/packages/radvd/Jenkinsfile
deleted file mode 100644
index 6b4f4a0b..00000000
--- a/packages/radvd/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'radvd',
- 'scmCommit': 'f2de4764559',
- 'scmUrl': 'https://github.com/radvd-project/radvd',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('radvd', pkgList, null, true, "**/packages/radvd/**")
diff --git a/packages/radvd/build.sh b/packages/radvd/build.sh
deleted file mode 100755
index 36057e27..00000000
--- a/packages/radvd/build.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-SRC=radvd
-if [ ! -d $SRC ]; then
- echo "source directory $SRC does not exist!"
- echo "$ git clone https://github.com/radvd-project/radvd"
- exit 1
-fi
-cd $SRC
-
-INSTALL_DIR=debian
-if [ -d $INSTALL_DIR ]; then
- rm -rf $INSTALL_DIR
-fi
-
-./autogen.sh
-./configure
-make
-
-install --directory debian/lib/systemd/system debian/usr/sbin
-install --mode 0644 radvd.service debian/lib/systemd/system
-install --strip --mode 0755 radvd debian/usr/sbin
-
-# Version' field value 'v0.14-20-g613277f': version number does not start with digit
-# "cut" first character from version string
-fpm --input-type dir --output-type deb --name radvd \
- --version $(git describe --always | cut -c2- | tr _ -) --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "RADVD router advertisement daemon" \
- --license "RADVD" -C $INSTALL_DIR --package ..
diff --git a/packages/strongswan/.gitignore b/packages/strongswan/.gitignore
deleted file mode 100644
index c6b0d6f0..00000000
--- a/packages/strongswan/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-strongswan/
diff --git a/packages/strongswan/Jenkinsfile b/packages/strongswan/Jenkinsfile
deleted file mode 100644
index d79941d7..00000000
--- a/packages/strongswan/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2021-2023 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'strongswan',
- 'scmCommit': 'debian/5.9.11-2',
- 'scmUrl': 'https://salsa.debian.org/debian/strongswan.git',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('strongswan', pkgList, null, true, "**/packages/strongswan/**")
diff --git a/packages/strongswan/build.sh b/packages/strongswan/build.sh
deleted file mode 100755
index c0dab4d5..00000000
--- a/packages/strongswan/build.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-# extracted build dependencies, as autogenerationg and installing them will fail :/
-sudo apt-get install -y bison \
- bzip2 \
- debhelper-compat \
- dh-apparmor \
- dpkg-dev \
- flex \
- gperf \
- libiptc-dev \
- libcap-dev \
- libcurl3-dev \
- libgcrypt20-dev \
- libgmp3-dev \
- libkrb5-dev \
- libldap2-dev \
- libnm-dev \
- libpam0g-dev \
- libsqlite3-dev \
- libssl-dev \
- libsystemd-dev \
- libtool \
- libtss2-dev \
- libxml2-dev \
- pkg-config \
- po-debconf \
- systemd \
- libsystemd-dev \
- tzdata
-
-SRC=strongswan
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-PATCH_DIR=${CWD}/patches
-if [ -d $PATCH_DIR ]; then
- for patch in $(ls ${PATCH_DIR})
- do
- echo "I: Apply patch: ${patch} to main repository"
- cp ${PATCH_DIR}/${patch} ${SRC}/debian/patches/
- echo ${patch} >> ${SRC}/debian/patches/series
- done
-fi
-
-cd ${SRC}
-
-echo "I: bump version"
-dch -v "5.9.11-2+vyos0" "Patchset for DMVPN support" -b
-
-echo "I: Build Debian Package"
-dpkg-buildpackage -uc -us -tc -b -d
diff --git a/packages/telegraf/.gitignore b/packages/telegraf/.gitignore
deleted file mode 100644
index 7e284c4f..00000000
--- a/packages/telegraf/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-telegraf/
diff --git a/packages/telegraf/Jenkinsfile b/packages/telegraf/Jenkinsfile
deleted file mode 100644
index 873d3842..00000000
--- a/packages/telegraf/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2020-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-// NOTE: we can build with -d as the libbpf dependency is installed manually
-// and not via a DEB package
-def pkgList = [
- ['name': 'telegraf',
- 'scmCommit': 'v1.28.3',
- 'scmUrl': 'https://github.com/influxdata/telegraf.git',
- 'buildCmd': 'cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('telegraf', pkgList, null, false, "**/packages/telegraf/**")
diff --git a/packages/telegraf/build.sh b/packages/telegraf/build.sh
deleted file mode 100755
index 2ba511d0..00000000
--- a/packages/telegraf/build.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-BUILD_ARCH=$(dpkg-architecture -qDEB_TARGET_ARCH)
-
-SRC=telegraf
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-PLUGIN_DIR=${CWD}/plugins
-
-echo "I: Selecting Input plugins"
-cp ${PLUGIN_DIR}/inputs/all/all.go ${SRC}/plugins/inputs/all/all.go
-
-echo "I: Selecting Output plugins"
-cp ${PLUGIN_DIR}/outputs/all/all.go ${SRC}/plugins/outputs/all/all.go
-
-echo "I: Build Debian ${BUILD_ARCH} package"
-cd ${SRC}
-export PATH=/opt/go/bin:$PATH
-
-# Generate default telegraf config
-go run ./cmd/telegraf config > etc/telegraf.conf
-LDFLAGS=-w make "${BUILD_ARCH}.deb"
diff --git a/packages/telegraf/plugins/inputs/all/all.go b/packages/telegraf/plugins/inputs/all/all.go
deleted file mode 100644
index 8265681b..00000000
--- a/packages/telegraf/plugins/inputs/all/all.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package all
-
-import (
- //Blank imports for plugins to register themselves
- _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue"
- _ "github.com/influxdata/telegraf/plugins/inputs/bond"
- _ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
- _ "github.com/influxdata/telegraf/plugins/inputs/chrony"
- _ "github.com/influxdata/telegraf/plugins/inputs/conntrack"
- _ "github.com/influxdata/telegraf/plugins/inputs/cpu"
- _ "github.com/influxdata/telegraf/plugins/inputs/disk"
- _ "github.com/influxdata/telegraf/plugins/inputs/diskio"
- _ "github.com/influxdata/telegraf/plugins/inputs/disque"
- _ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
- _ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
- _ "github.com/influxdata/telegraf/plugins/inputs/docker"
- _ "github.com/influxdata/telegraf/plugins/inputs/docker_log"
- _ "github.com/influxdata/telegraf/plugins/inputs/ethtool"
- _ "github.com/influxdata/telegraf/plugins/inputs/exec"
- _ "github.com/influxdata/telegraf/plugins/inputs/execd"
- _ "github.com/influxdata/telegraf/plugins/inputs/file"
- _ "github.com/influxdata/telegraf/plugins/inputs/filecount"
- _ "github.com/influxdata/telegraf/plugins/inputs/filestat"
- _ "github.com/influxdata/telegraf/plugins/inputs/fireboard"
- _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
- _ "github.com/influxdata/telegraf/plugins/inputs/hugepages"
- _ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
- _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener"
- _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener"
- _ "github.com/influxdata/telegraf/plugins/inputs/intel_pmu"
- _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt"
- _ "github.com/influxdata/telegraf/plugins/inputs/internal"
- _ "github.com/influxdata/telegraf/plugins/inputs/internet_speed"
- _ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
- _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
- _ "github.com/influxdata/telegraf/plugins/inputs/ipset"
- _ "github.com/influxdata/telegraf/plugins/inputs/iptables"
- _ "github.com/influxdata/telegraf/plugins/inputs/ipvs"
- _ "github.com/influxdata/telegraf/plugins/inputs/kernel"
- _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/mdstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/mem"
- _ "github.com/influxdata/telegraf/plugins/inputs/net"
- _ "github.com/influxdata/telegraf/plugins/inputs/netstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/nstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/ping"
- _ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor"
- _ "github.com/influxdata/telegraf/plugins/inputs/processes"
- _ "github.com/influxdata/telegraf/plugins/inputs/procstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/sensors"
- _ "github.com/influxdata/telegraf/plugins/inputs/sflow"
- _ "github.com/influxdata/telegraf/plugins/inputs/slab"
- _ "github.com/influxdata/telegraf/plugins/inputs/smart"
- _ "github.com/influxdata/telegraf/plugins/inputs/snmp"
- _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
- _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap"
- _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
- _ "github.com/influxdata/telegraf/plugins/inputs/socketstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/syslog"
- _ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
- _ "github.com/influxdata/telegraf/plugins/inputs/system"
- _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units"
- _ "github.com/influxdata/telegraf/plugins/inputs/tail"
- _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
- _ "github.com/influxdata/telegraf/plugins/inputs/temp"
- _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
- _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
- _ "github.com/influxdata/telegraf/plugins/inputs/wireguard"
- _ "github.com/influxdata/telegraf/plugins/inputs/wireless"
- _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert"
-)
diff --git a/packages/telegraf/plugins/outputs/all/all.go b/packages/telegraf/plugins/outputs/all/all.go
deleted file mode 100644
index 49f7e63d..00000000
--- a/packages/telegraf/plugins/outputs/all/all.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package all
-
-import (
- //Blank imports for plugins to register themselves
- _ "github.com/influxdata/telegraf/plugins/outputs/azure_data_explorer"
- _ "github.com/influxdata/telegraf/plugins/outputs/http"
- _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2"
- _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client"
-)
diff --git a/packages/waagent/Jenkinsfile b/packages/waagent/Jenkinsfile
deleted file mode 100644
index 79415d71..00000000
--- a/packages/waagent/Jenkinsfile
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2024 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def package_name = 'waagent'
-def pkgList = [
- ['name': "${package_name}",
- 'scmCommit': 'debian/2.9.1.1-2',
- 'scmUrl': 'https://salsa.debian.org/cloud-team/waagent.git',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; sudo apt-get install --yes --no-install-recommends dpkg-source-gitarchive; ../build.py'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage("${package_name}", pkgList, null, false, "**/packages/waagent/**")
diff --git a/packages/waagent/build.py b/packages/waagent/build.py
deleted file mode 100755
index 04f4791b..00000000
--- a/packages/waagent/build.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python3
-
-from pathlib import Path
-from shutil import copy as copy_file
-from subprocess import run
-
-
-# copy patches
-def apply_deb_patches() -> None:
- """Apply patches to sources directory
- """
- patches_dir = Path('../patches')
- current_dir: str = Path.cwd().as_posix()
- if patches_dir.exists():
- patches_list = list(patches_dir.iterdir())
- patches_list.sort()
- Path(f'{current_dir}/debian/patches').mkdir(parents=True, exist_ok=True)
- series_file = Path(f'{current_dir}/debian/patches/series')
- series_data = ''
- for patch_file in patches_list:
- print(f'Applying patch: {patch_file.name}')
- copy_file(patch_file, f'{current_dir}/debian/patches/')
- if series_file.exists():
- series_data: str = series_file.read_text()
- series_data = f'{series_data}\n{patch_file.name}'
- series_file.write_text(series_data)
-
-
-def build_package() -> bool:
- """Build a package
-
- Returns:
- bool: build status
- """
- build_cmd: list[str] = ['dpkg-buildpackage', '-uc', '-us', '-tc', '-b']
- build_status: int = run(build_cmd).returncode
-
- if build_status:
- return False
- return True
-
-
-# build a package
-if __name__ == '__main__':
- apply_deb_patches()
-
- if not build_package():
- exit(1)
-
- exit()
diff --git a/packages/wide-dhcpv6/.gitignore b/packages/wide-dhcpv6/.gitignore
deleted file mode 100644
index 5fd2ad64..00000000
--- a/packages/wide-dhcpv6/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-wide-dhcpv6/
diff --git a/packages/wide-dhcpv6/Jenkinsfile b/packages/wide-dhcpv6/Jenkinsfile
deleted file mode 100644
index 83954d97..00000000
--- a/packages/wide-dhcpv6/Jenkinsfile
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2020-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-@NonCPS
-
-// Using a version specifier library, use 'current' branch. The underscore (_)
-// is not a typo! You need this underscore if the line immediately after the
-// @Library annotation is not an import statement!
-@Library('vyos-build@current')_
-
-def pkgList = [
- ['name': 'wide-dhcpv6',
- 'scmCommit': 'debian/20080615-23',
- 'scmUrl': 'https://salsa.debian.org/debian/wide-dhcpv6',
- 'buildCmd': 'sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"; cd ..; ./build.sh'],
-]
-
-// Start package build using library function from https://github.com/vyos/vyos-build
-buildPackage('wide-dhcpv6', pkgList, null, true, "**/packages/wide-dhcpv6/**")
diff --git a/packages/wide-dhcpv6/build.sh b/packages/wide-dhcpv6/build.sh
deleted file mode 100755
index 87cab92b..00000000
--- a/packages/wide-dhcpv6/build.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-set -e
-
-SRC=wide-dhcpv6
-if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
- exit 1
-fi
-
-PATCH_DIR=${CWD}/patches
-if [ -d $PATCH_DIR ]; then
- for patch in $(ls ${PATCH_DIR})
- do
- echo "I: Apply patch: ${patch} to main repository"
- cp ${PATCH_DIR}/${patch} ${SRC}/debian/patches/
- echo ${patch} >> ${SRC}/debian/patches/series
- done
-fi
-
-cd ${SRC}
-echo "I: Build Debian Package"
-dpkg-buildpackage -uc -us -tc -b
diff --git a/scripts/check-qemu-install b/scripts/check-qemu-install
index dfb772d8..ab6e1b1f 100755
--- a/scripts/check-qemu-install
+++ b/scripts/check-qemu-install
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2019-2024, VyOS maintainers and contributors
+# Copyright (C) 2019-2025, VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -44,6 +44,7 @@ import logging
import re
import tomli
import shutil
+import json
from io import BytesIO
from datetime import datetime
@@ -94,12 +95,24 @@ parser.add_argument('--sbtest', help='Execute Secure Boot tests',
action='store_true', default=False)
parser.add_argument('--qemu-cmd', help='Only generate QEMU launch command',
action='store_true', default=False)
+parser.add_argument('--cpu', help='Set QEMU CPU', type=int, default=2)
+parser.add_argument('--memory', help='Set QEMU memory', type=int, default=4)
args = parser.parse_args()
+# This is what we requested the build to contain
with open('data/defaults.toml', 'rb') as f:
vyos_defaults = tomli.load(f)
+# This is what we got from the build
+manifest_file = 'build/manifest.json'
+if os.path.isfile(manifest_file):
+ with open('build/manifest.json', 'rb') as f:
+ manifest = json.load(f)
+
+ vyos_version = manifest['build_config']['version']
+ vyos_codename = manifest['build_config']['release_train']
+
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
@@ -121,13 +134,6 @@ class StreamToLogger(object):
def flush(self):
pass
-def get_half_cpus():
- """ return 1/2 of the numbers of available CPUs """
- cpu = os.cpu_count()
- if cpu > 1:
- cpu /= 2
- return int(cpu)
-
OVMF_CODE = '/usr/share/OVMF/OVMF_CODE_4M.secboot.fd'
OVMF_VARS_TMP = args.disk.replace('.img', '.efivars')
if args.sbtest:
@@ -162,17 +168,15 @@ def get_qemu_cmd(name, enable_uefi, disk_img, raid=None, iso_img=None, tpm=False
f' -device ahci,id=achi0' \
f' -device ide-cd,bus=achi0.0,drive=drive-cd1,id=cd1,bootindex=10'
- # test using half of the available CPUs on the system
- cpucount = get_half_cpus()
-
- macbase = '52:54:00:00:00'
+ # RFC7042 section 2.1.2 MAC addresses used for documentation
+ macbase = '00:00:5E:00:53'
cmd = f'qemu-system-x86_64 \
-name "{name}" \
- -smp {cpucount},sockets=1,cores={cpucount},threads=1 \
+ -smp {args.cpu},sockets=1,cores={args.cpu},threads=1 \
-cpu host \
-machine {machine},accel=kvm \
{uefi} \
- -m 4G \
+ -m {args.memory}G \
-vga none \
-nographic \
{vga} {vnc}\
@@ -357,7 +361,7 @@ if args.qemu_cmd:
os.system(tmp)
exit(0)
-test_timeout = 3 *3600 # 3 hours (in seconds)
+test_timeout = 5 *3600 # 3 hours (in seconds)
tpm_process = None
try:
# Start TPM emulator
@@ -385,14 +389,26 @@ try:
toggleUEFISecureBoot(c)
try:
- c.expect('Automatic boot in', timeout=10)
- c.sendline('')
+ c.expect('Welcome to GRUB', timeout=10)
+ c.send(KEY_DOWN)
+ c.send(KEY_DOWN)
+ c.send(KEY_RETURN)
except pexpect.TIMEOUT:
log.warning('Did not find GRUB countdown window, ignoring')
loginVM(c, log)
#################################################
+ # Check for no private key contents within the image
+ #################################################
+ msg = 'Found private key - bailing out'
+ c.sendline(f'if sudo grep -rq "BEGIN PRIVATE KEY" /var/lib/shim-signed/mok; then echo {msg}; exit 1; fi')
+ tmp = c.expect([f'\n{msg}', op_mode_prompt])
+ if tmp == 0:
+ log.error(msg)
+ exit(1)
+
+ #################################################
# Installing into VyOS system
#################################################
log.info('Starting installer')
@@ -544,6 +560,33 @@ try:
c.sendline('systemd-detect-virt')
c.expect('kvm')
c.expect(op_mode_prompt)
+ c.sendline('show system cpu')
+ c.expect(op_mode_prompt)
+ c.sendline('show system memory')
+ c.expect(op_mode_prompt)
+ c.sendline('show version all | grep -e "vpp" -e "vyos-1x"')
+ c.expect(op_mode_prompt)
+
+ #################################################
+ # Verify /etc/os-release via lsb_release
+ #################################################
+ c.sendline('lsb_release --short --id 2>/dev/null')
+ c.expect('VyOS')
+ if os.path.isfile(manifest_file):
+ c.sendline('lsb_release --short --release 2>/dev/null')
+ c.expect(vyos_version)
+ c.sendline('lsb_release --short --codename 2>/dev/null')
+ c.expect(vyos_codename)
+
+ # Ensure ephemeral key is loaded
+ vyos_kernel_key = 'VyOS build time autogenerated kernel key'
+ c.sendline(f'show log kernel | match "{vyos_kernel_key}"')
+ c.expect(f'.*{vyos_kernel_key}.*')
+ c.expect(op_mode_prompt)
+
+ # Inform smoketest about this environment
+ c.sendline('touch /tmp/vyos.smoketests.hint')
+ c.expect(op_mode_prompt)
#################################################
# Executing test-suite
@@ -558,7 +601,7 @@ try:
def verify_config():
# Verify encrypted config is loaded
c.sendline('show config commands | cat')
- c.expect('set system option performance \'latency\'')
+ c.expect('set system option performance \'network-latency\'')
c.expect('set system option reboot-on-panic')
c.expect(op_mode_prompt)
@@ -595,7 +638,7 @@ try:
log.info('Adding nodes for encrypted config test')
c.sendline('configure')
c.expect(cfg_mode_prompt)
- c.sendline('set system option performance latency')
+ c.sendline('set system option performance network-latency')
c.expect(cfg_mode_prompt)
c.sendline('set system option reboot-on-panic')
c.expect(cfg_mode_prompt)
@@ -755,7 +798,7 @@ try:
if args.match:
# Remove tests that we don't want to run
match_str = '-o '.join([f'-name "test_*{name}*.py" ' for name in args.match.split("|")]).strip()
- c.sendline(f'sudo find /usr/libexec/vyos/tests/smoke/cli/test_* -type f ! \( {match_str} \) -delete')
+ c.sendline(f'sudo find /usr/libexec/vyos/tests/smoke -maxdepth 2 -type f -name test_* ! \( {match_str} \) -delete')
c.expect(op_mode_prompt)
if args.no_interfaces:
# remove interface tests as they consume a lot of time
@@ -846,7 +889,7 @@ except pexpect.exceptions.ExceptionPexpect:
EXCEPTION = 1
except Exception:
- log.error('Unknown error occured while VyOS!')
+ log.error('Unknown error occured!')
traceback.print_exc()
EXCEPTION = 1
diff --git a/scripts/image-build/build-vyos-image b/scripts/image-build/build-vyos-image
index 80b4d61d..3275c5de 100755
--- a/scripts/image-build/build-vyos-image
+++ b/scripts/image-build/build-vyos-image
@@ -25,6 +25,7 @@ import copy
import uuid
import glob
import json
+import base64
import shutil
import argparse
import datetime
@@ -62,7 +63,7 @@ except Exception as e:
# Checkout vyos-1x under build directory
try:
branch_name = build_defaults['vyos_branch']
- url_vyos_1x = 'https://github.com/vyos/vyos-1x'
+ url_vyos_1x = os.getenv('VYOS1X_REPO_URL', default='https://github.com/vyos/vyos-1x')
path_vyos_1x = os.path.join(defaults.BUILD_DIR, 'vyos-1x')
try:
repo_vyos_1x = git.Repo.clone_from(url_vyos_1x, path_vyos_1x, no_checkout=True)
@@ -191,9 +192,10 @@ if __name__ == "__main__":
'pbuilder-debian-mirror': ('Debian repository mirror for pbuilder env bootstrap', None),
'vyos-mirror': ('VyOS package mirror', None),
'build-type': ('Build type, release or development', lambda x: x in ['release', 'development']),
- 'version': ('Version number (release builds only)', None),
+ 'version': ('Version string', None),
'build-comment': ('Optional build comment', None),
- 'build-hook-opts': ('Custom options for the post-build hook', None)
+ 'build-hook-opts': ('Custom options for the post-build hook', None),
+ 'bootloaders': ('Bootloaders to include in the image', None)
}
# Create the option parser
@@ -259,28 +261,24 @@ if __name__ == "__main__":
pre_build_config = merge_defaults(args, defaults=pre_build_config, skip_none=True)
# Some fixup for mirror settings.
- # The idea is: if --debian-mirror is specified but --pbuilder-debian-mirror is not,
- # use the --debian-mirror value for both lb and pbuilder bootstrap
- if pre_build_config['debian_mirror'] is None or pre_build_config['debian_security_mirror'] is None:
- print("E: debian_mirror and debian_security_mirror cannot be empty")
+ # The idea is: if --debian-mirror is specified
+ # but --pbuilder-debian-mirror or --debian-security-mirror are not,
+ # use the --debian-mirror value for those
+ if pre_build_config['debian_mirror'] is None:
+ print("E: debian_mirror must be specified")
sys.exit(1)
if pre_build_config['pbuilder_debian_mirror'] is None:
- args['pbuilder_debian_mirror'] = pre_build_config['pbuilder_debian_mirror'] = pre_build_config['debian_mirror']
-
- # Version can only be set for release builds,
- # for dev builds it hardly makes any sense
- if pre_build_config['build_type'] == 'development':
- if args['version'] is not None:
- print("E: Version can only be set for release builds")
- print("Use --build-type=release option if you want to set version number")
- sys.exit(1)
+ pre_build_config['pbuilder_debian_mirror'] = pre_build_config['debian_mirror']
+
+ if pre_build_config['debian_security_mirror'] is None:
+ pre_build_config['debian_security_mirror'] = pre_build_config['debian_mirror']
# Validate characters in version name
- if 'version' in args and args['version'] != None:
+ if args.get('version'):
allowed = string.ascii_letters + string.digits + '.' + '-' + '+'
if not set(args['version']) <= set(allowed):
- print(f'Version contained illegal character(s), allowed: {allowed}')
+ print(f'Version string contains illegal character(s), allowed: {allowed}')
sys.exit(1)
## Inject some useful hardcoded options
@@ -306,6 +304,11 @@ if __name__ == "__main__":
build_config = merge_defaults(flavor_config, defaults=build_config)
build_config = merge_defaults(args, defaults=build_config, skip_none=True)
+ # If Debian mirror is specified explicitly but Debian security mirror is not,
+ # assume that the user wants to use that mirror for security updates as well.
+ if (args['debian_mirror'] is not None) and (args['debian_security_mirror'] is None):
+ build_config['debian_security_mirror'] = args['debian_mirror']
+
## Rename and merge some fields for simplicity
## E.g. --custom-packages is for the user, but internally
## it's added to the same package list as everything else
@@ -326,6 +329,10 @@ if __name__ == "__main__":
print("E: image format is not specified in the build flavor file")
sys.exit(1)
+ ## Override bootloaders if specified
+ if args['bootloaders'] is not None:
+ build_config['bootloaders'] = args['bootloaders']
+
## Add default boot settings if needed
if "boot_settings" not in build_config:
build_config["boot_settings"] = defaults.boot_settings
@@ -337,6 +344,17 @@ if __name__ == "__main__":
if type(build_config["image_format"]) != list:
build_config["image_format"] = [ build_config["image_format"] ]
+ ## If the user didn't explicitly specify what extensions build artifact should have,
+ ## assume that the list is the same as image formats.
+ ## One case when it's not the same is when a custom build hook is used
+ ## to build a format that our build script doesn't support natively.
+ if not has_nonempty_key(build_config, "artifact_format"):
+ build_config["artifact_format"] = build_config["image_format"]
+ else:
+ # If the option is there, also make it list if it's a scalar
+ if type(build_config["artifact_format"]) != list:
+ build_config["artifact_format"] = [ build_config["artifact_format"] ]
+
## Dump the complete config if the user enabled debug mode
if debug:
import json
@@ -349,6 +367,11 @@ if __name__ == "__main__":
shutil.copytree("data/live-build-config/", lb_config_dir)
os.makedirs(lb_config_dir, exist_ok=True)
+ ## Secure Boot - Copy public Keys to image
+ sb_certs = 'data/certificates'
+ if os.path.isdir(sb_certs):
+ shutil.copytree(sb_certs, f'{lb_config_dir}/includes.chroot/var/lib/shim-signed/mok')
+
# Switch to the build directory, this is crucial for the live-build work
# because the efective build config files etc. are there.
#
@@ -401,8 +424,10 @@ if __name__ == "__main__":
build_git = ""
git_branch = ""
- # Create the build version string
- if build_config['build_type'] == 'development':
+ # Create the build version string, if it's not explicitly given
+ if build_config.get('version'):
+ version = build_config['version']
+ else:
try:
if not git_branch:
raise ValueError("git branch could not be determined")
@@ -417,14 +442,8 @@ if __name__ == "__main__":
except Exception as e:
print("W: Could not build a version string specific to git branch, falling back to default: {0}".format(str(e)))
version = "999.{0}".format(build_timestamp)
- else:
- # Release build, use the version from ./configure arguments
- version = build_config['version']
- if build_config['build_type'] == 'development':
- lts_build = False
- else:
- lts_build = True
+ build_config['version'] = version
version_data = {
'version': version,
@@ -436,7 +455,7 @@ if __name__ == "__main__":
'build_branch': git_branch,
'release_train': build_config['release_train'],
'architecture': build_config['architecture'],
- 'lts_build': lts_build,
+ 'build_type': build_config['build_type'],
'build_comment': build_config['build_comment'],
'bugtracker_url': build_config['bugtracker_url'],
'documentation_url': build_config['documentation_url'],
@@ -446,19 +465,18 @@ if __name__ == "__main__":
# Multi line strings needs to be un-indented to not have leading
# whitespaces in the resulting file
- os_release = f"""
- PRETTY_NAME="VyOS {version} ({build_config['release_train']})"
- NAME="VyOS"
- VERSION_ID="{version}"
- VERSION="{version} ({build_config['release_train']})"
- VERSION_CODENAME={build_defaults['debian_distribution']}
- ID=vyos
- BUILD_ID="{build_git}"
- HOME_URL="{build_defaults['website_url']}"
- SUPPORT_URL="{build_defaults['support_url']}"
- BUG_REPORT_URL="{build_defaults['bugtracker_url']}"
- DOCUMENTATION_URL="{build_config['documentation_url']}"
- """
+ os_release = f"""PRETTY_NAME="VyOS {version} ({build_config['release_train']})"
+NAME="VyOS"
+VERSION_ID="{version}"
+VERSION="{version} ({build_config['release_train']})"
+VERSION_CODENAME={build_defaults['release_train']}
+ID=vyos
+BUILD_ID="{build_git}"
+HOME_URL="{build_defaults['website_url']}"
+SUPPORT_URL="{build_defaults['support_url']}"
+BUG_REPORT_URL="{build_defaults['bugtracker_url']}"
+DOCUMENTATION_URL="{build_config['documentation_url']}"
+"""
# Reminder: all paths relative to the build dir, not to the repository root
chroot_includes_dir = defaults.CHROOT_INCLUDES_DIR
@@ -478,8 +496,8 @@ if __name__ == "__main__":
print("Version: {0}".format(version), file=f)
# Define variables that influence to welcome message on boot
- os.makedirs(os.path.join(chroot_includes_dir, 'usr/lib/'), exist_ok=True)
- with open(os.path.join(chroot_includes_dir, 'usr/lib/os-release'), 'w') as f:
+ os.makedirs(os.path.join(chroot_includes_dir, 'etc/'), exist_ok=True)
+ with open(os.path.join(chroot_includes_dir, 'etc/os-release'), 'w') as f:
print(os_release, file=f)
## Clean up earlier build state and artifacts
@@ -498,8 +516,9 @@ if __name__ == "__main__":
## Create live-build configuration files
# Add the additional repositories to package lists
- print("I: Setting up additional APT entries")
+ print("I: Setting up VyOS repository APT entries")
vyos_repo_entry = "deb {vyos_mirror} {vyos_branch} main\n".format(**build_config)
+ vyos_repo_entry += "deb-src {vyos_mirror} {vyos_branch} main\n".format(**build_config)
apt_file = defaults.VYOS_REPO_FILE
@@ -511,10 +530,36 @@ if __name__ == "__main__":
f.write(vyos_repo_entry)
# Add custom APT entries
+ print("I: Setting up additional APT entries")
if build_config.get('additional_repositories', False):
- build_config['custom_apt_entry'] += build_config['additional_repositories']
+ for r in build_config['additional_repositories']:
+ repo_data = build_config['additional_repositories'][r]
+
+ url = repo_data.get('url', None)
+ arch = repo_data.get('architecture', None)
+ distro = repo_data.get('distribution', build_config['debian_distribution'])
+ components = repo_data.get('components', 'main')
- if build_config.get('custom_apt_entry', False):
+ if not url:
+ print(f'E: repository {r} does not specify URL')
+ sys.exit(1)
+
+ if arch:
+ arch_string = f'[arch={arch}]'
+ else:
+ arch_string = ''
+
+ entry = f'deb {arch_string} {url} {distro} {components}'
+ build_config['custom_apt_entry'].append(entry)
+
+ if not repo_data.get('no_source', False):
+ src_entry = f'deb-src {url} {distro} {components}'
+ build_config['custom_apt_entry'].append(src_entry)
+
+ if repo_data.get('key', None):
+ build_config['custom_apt_keys'].append({'name': r, 'key': repo_data['key']})
+
+ if build_config.get('custom_apt_entry', []):
custom_apt_file = defaults.CUSTOM_REPO_FILE
entries = "\n".join(build_config['custom_apt_entry'])
if debug:
@@ -525,11 +570,13 @@ if __name__ == "__main__":
f.write("\n")
# Add custom APT keys
- if has_nonempty_key(build_config, 'custom_apt_key'):
+ if has_nonempty_key(build_config, 'custom_apt_keys'):
key_dir = defaults.ARCHIVES_DIR
- for k in build_config['custom_apt_key']:
- dst_name = '{0}.key.chroot'.format(os.path.basename(k))
- shutil.copy(k, os.path.join(key_dir, dst_name))
+ for k in build_config['custom_apt_keys']:
+ dst_name = '{0}.key.chroot'.format(k['name'])
+ with open(os.path.join(key_dir, dst_name), 'bw') as f:
+ key_data = base64.b64decode(k['key'])
+ f.write(key_data)
# Add custom packages
if has_nonempty_key(build_config, 'packages'):
@@ -548,6 +595,15 @@ if __name__ == "__main__":
with open(file_path, 'w') as f:
f.write(i["data"])
+ if has_nonempty_key(build_config, "includes_binary"):
+ for i in build_config["includes_binary"]:
+ file_path = os.path.join(binary_includes_dir, i["path"])
+ if debug:
+ print(f"D: Creating binary image include file: {file_path}")
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ with open(file_path, 'w') as f:
+ f.write(i["data"])
+
## Create the default config
## Technically it's just another includes.chroot entry,
## but it's special enough to warrant making it easier for flavor writers
@@ -560,35 +616,37 @@ if __name__ == "__main__":
## Configure live-build
lb_config_tmpl = jinja2.Template("""
lb config noauto \
+ --no-color \
--apt-indices false \
- --apt-options "--yes -oAPT::Get::allow-downgrades=true" \
+ --apt-options "--yes" \
--apt-recommends false \
- --architecture {{architecture}} \
- --archive-areas {{debian_archive_areas}} \
+ --architecture "{{architecture}}" \
+ --archive-areas "{{debian_archive_areas}}" \
--backports true \
--binary-image iso-hybrid \
--bootappend-live "boot=live components hostname=vyos username=live nopersistence noautologin nonetworking union=overlay console=ttyS0,115200 console=tty0 net.ifnames=0 biosdevname=0" \
--bootappend-live-failsafe "live components memtest noapic noapm nodma nomce nolapic nomodeset nosmp nosplash vga=normal console=ttyS0,115200 console=tty0 net.ifnames=0 biosdevname=0" \
- --bootloaders {{bootloaders}} \
- --checksums 'sha256 md5' \
+ --bootloaders "{{bootloaders}}" \
+ --checksums "sha256" \
--chroot-squashfs-compression-type "{{squashfs_compression_type}}" \
--debian-installer none \
--debootstrap-options "--variant=minbase --exclude=isc-dhcp-client,isc-dhcp-common,ifupdown --include=apt-utils,ca-certificates,gnupg2,linux-kbuild-6.1" \
- --distribution {{debian_distribution}} \
+ --distribution "{{debian_distribution}}" \
--firmware-binary false \
--firmware-chroot false \
--iso-application "VyOS" \
--iso-publisher "{{build_by}}" \
--iso-volume "VyOS" \
- --linux-flavours {{kernel_flavor}} \
- --linux-packages linux-image-{{kernel_version}} \
- --mirror-binary {{debian_mirror}} \
- --mirror-binary-security {{debian_security_mirror}} \
- --mirror-bootstrap {{debian_mirror}} \
- --mirror-chroot {{debian_mirror}} \
- --mirror-chroot-security {{debian_security_mirror}} \
+ --linux-flavours "{{kernel_flavor}}" \
+ --linux-packages "linux-image-{{kernel_version}}" \
+ --mirror-binary "{{debian_mirror}}" \
+ --mirror-binary-security "{{debian_security_mirror}}" \
+ --mirror-bootstrap "{{debian_mirror}}" \
+ --mirror-chroot "{{debian_mirror}}" \
+ --mirror-chroot-security "{{debian_security_mirror}}" \
--security true \
- --updates true
+ --updates true \
+ --utc-time true
"${@}"
""")
@@ -631,11 +689,14 @@ Pin-Priority: 600
# Copy the image
shutil.copy("live-image-{0}.hybrid.iso".format(build_config["architecture"]), iso_file)
+ # Add the image to the manifest
+ manifest['artifacts'].append(iso_file)
+
# If the flavor has `image_format = "iso"`, then the work is done.
# If not, build additional flavors from the ISO.
if build_config["image_format"] != ["iso"]:
# For all non-iso formats, we always build a raw image first
- raw_image = raw_image.create_raw_image(build_config, iso_file, "tmp/")
+ version_data, raw_image = raw_image.create_raw_image(build_config, iso_file, "tmp/")
manifest['artifacts'].append(raw_image)
# If there are other formats in the flavor, the assumptions is that
@@ -665,9 +726,24 @@ Pin-Priority: 600
hook_opts = build_config["build_hook_opts"]
else:
hook_opts = ""
- custom_image = rc_cmd(f"./build_hook {raw_image} {build_config['version']} \
- {build_config['architecture']} {hook_opts}")
+ build_hook_command = f"./build_hook {raw_image} {version_data['version']} \
+ {build_config['architecture']} {hook_opts}"
+ print(f'I: executing build hook command: {build_hook_command}')
+ custom_image = rc_cmd(build_hook_command)
manifest['artifacts'].append(custom_image)
+ # Filter out unwanted files from the artifact list
+ # and leave only those the user specified
+ # in either `artifact_format` or `image_format`.
+ #
+ # For example, with `image_format = "raw"`,
+ # the ISO image is just an intermediate object, not an target artifact.
+
+ # os.path.splitext returns extensions with dots,
+ # so we need to remove the dots, hence [1:]
+ is_artifact = lambda f: os.path.splitext(f)[-1][1:] in build_config['artifact_format']
+
+ manifest['artifacts'] = list(filter(is_artifact, manifest['artifacts']))
+
with open('manifest.json', 'w') as f:
f.write(json.dumps(manifest))
diff --git a/scripts/image-build/defaults.py b/scripts/image-build/defaults.py
index a0c5c8bf..29a6d59f 100644
--- a/scripts/image-build/defaults.py
+++ b/scripts/image-build/defaults.py
@@ -35,7 +35,7 @@ boot_settings: dict[str, str] = {
# Hardcoded default values
HARDCODED_BUILD = {
'custom_apt_entry': [],
- 'custom_apt_key': [],
+ 'custom_apt_keys': [],
'custom_package': [],
'reuse_iso': None,
'disk_size': 10,
diff --git a/scripts/image-build/raw_image.py b/scripts/image-build/raw_image.py
index dedb6f5e..d850eead 100644
--- a/scripts/image-build/raw_image.py
+++ b/scripts/image-build/raw_image.py
@@ -210,4 +210,4 @@ def create_raw_image(build_config, iso_file, work_dir):
install_image(con, version)
install_grub(con, version)
- return raw_file
+ return (version_data, raw_file)
diff --git a/scripts/package-build/opennhrp/.gitignore b/scripts/package-build/.gitignore
index 65d0752b..a1b8b226 100644
--- a/scripts/package-build/opennhrp/.gitignore
+++ b/scripts/package-build/.gitignore
@@ -1,6 +1,8 @@
-opennhrp/
*.buildinfo
*.build
*.changes
*.deb
+*.udeb
*.dsc
+*.tar.gz
+*.tar.xz
diff --git a/scripts/package-build/amazon-cloudwatch-agent/.gitignore b/scripts/package-build/amazon-cloudwatch-agent/.gitignore
new file mode 100644
index 00000000..5eb3e42a
--- /dev/null
+++ b/scripts/package-build/amazon-cloudwatch-agent/.gitignore
@@ -0,0 +1 @@
+/amazon-cloudwatch-agent/
diff --git a/scripts/package-build/opennhrp/build.py b/scripts/package-build/amazon-cloudwatch-agent/build.py
index 3c76af73..3c76af73 120000
--- a/scripts/package-build/opennhrp/build.py
+++ b/scripts/package-build/amazon-cloudwatch-agent/build.py
diff --git a/scripts/package-build/amazon-cloudwatch-agent/package.toml b/scripts/package-build/amazon-cloudwatch-agent/package.toml
new file mode 100644
index 00000000..120a17f9
--- /dev/null
+++ b/scripts/package-build/amazon-cloudwatch-agent/package.toml
@@ -0,0 +1,14 @@
+[[packages]]
+name = "amazon-cloudwatch-agent"
+commit_id = "v1.300050.0"
+scm_url = "https://github.com/aws/amazon-cloudwatch-agent"
+
+build_cmd = """
+
+make clean test check_secrets amazon-cloudwatch-agent-linux package-deb
+ARCH=$(dpkg --print-architecture)
+TAG=$(git describe --tags --abbrev=0)
+COMMIT=$(git rev-parse --short HEAD)
+cp ./build/bin/linux/${ARCH}/*.deb ../amazon-cloudwatch-agent_${TAG}_${COMMIT}_${ARCH}.deb
+
+"""
diff --git a/scripts/package-build/amazon-ssm-agent/.gitignore b/scripts/package-build/amazon-ssm-agent/.gitignore
new file mode 100644
index 00000000..78fa9ab9
--- /dev/null
+++ b/scripts/package-build/amazon-ssm-agent/.gitignore
@@ -0,0 +1 @@
+/amazon-ssm-agent/
diff --git a/scripts/package-build/pam_tacplus/build.py b/scripts/package-build/amazon-ssm-agent/build.py
index 3c76af73..3c76af73 120000
--- a/scripts/package-build/pam_tacplus/build.py
+++ b/scripts/package-build/amazon-ssm-agent/build.py
diff --git a/scripts/package-build/amazon-ssm-agent/package.toml b/scripts/package-build/amazon-ssm-agent/package.toml
new file mode 100644
index 00000000..ecd2fdf6
--- /dev/null
+++ b/scripts/package-build/amazon-ssm-agent/package.toml
@@ -0,0 +1,16 @@
+[[packages]]
+name = "amazon-ssm-agent"
+commit_id = "3.3.1311.0"
+scm_url = "https://github.com/aws/amazon-ssm-agent"
+
+build_cmd = """
+
+ARCH=$(dpkg --print-architecture)
+TAG=$(git describe --tags --abbrev=0)
+COMMIT=$(git rev-parse --short HEAD)
+
+make build-linux
+make package-deb
+cp ./bin/debian_${ARCH}/*.deb ../amazon-ssm-agent_${TAG}_${COMMIT}_${ARCH}.deb
+
+"""
diff --git a/scripts/package-build/aws-gwlbtun/.gitignore b/scripts/package-build/aws-gwlbtun/.gitignore
index 0fe7946f..dab49f62 100644
--- a/scripts/package-build/aws-gwlbtun/.gitignore
+++ b/scripts/package-build/aws-gwlbtun/.gitignore
@@ -1,8 +1 @@
-aws-gwlbtun*/
-*.tar.gz
-*.tar.xz
-*.deb
-*.dsc
-*.buildinfo
-*.build
-*.changes \ No newline at end of file
+/aws-gwlbtun*/
diff --git a/scripts/package-build/bash-completion/.gitignore b/scripts/package-build/bash-completion/.gitignore
new file mode 100644
index 00000000..73e9d517
--- /dev/null
+++ b/scripts/package-build/bash-completion/.gitignore
@@ -0,0 +1 @@
+/bash-completion/
diff --git a/scripts/package-build/bash-completion/build.py b/scripts/package-build/bash-completion/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/bash-completion/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/bash-completion/package.toml b/scripts/package-build/bash-completion/package.toml
new file mode 100644
index 00000000..49667429
--- /dev/null
+++ b/scripts/package-build/bash-completion/package.toml
@@ -0,0 +1,12 @@
+# VyOS CLI requires an older version of bash-completion to work
+
+[[packages]]
+name = "bash-completion"
+commit_id = "debian/2.8-6"
+scm_url = "https://salsa.debian.org/debian/bash-completion"
+
+build_cmd = """
+
+# Build deb
+dpkg-buildpackage -b -us -uc
+"""
diff --git a/scripts/package-build/blackbox_exporter/.gitignore b/scripts/package-build/blackbox_exporter/.gitignore
new file mode 100644
index 00000000..435e791f
--- /dev/null
+++ b/scripts/package-build/blackbox_exporter/.gitignore
@@ -0,0 +1 @@
+/blackbox_exporter/
diff --git a/scripts/package-build/blackbox_exporter/build.py b/scripts/package-build/blackbox_exporter/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/blackbox_exporter/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/blackbox_exporter/build.sh b/scripts/package-build/blackbox_exporter/build.sh
new file mode 100755
index 00000000..127c03be
--- /dev/null
+++ b/scripts/package-build/blackbox_exporter/build.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+CWD=$(pwd)
+set -e
+
+BUILD_ARCH=$(dpkg-architecture -qDEB_TARGET_ARCH)
+
+SRC="blackbox_exporter"
+if [ ! -d ${SRC} ]; then
+ echo "Source directory does not exist, please 'git clone'"
+ exit 1
+fi
+
+cd $SRC
+
+mkdir -p debian
+
+echo "I: Create $SRC/debian/control"
+cat <<EOF > debian/control
+Source: blackbox-exporter
+Section: net
+Priority: optional
+Maintainer: VyOS Package Maintainers <maintainers@vyos.net>
+Build-Depends: debhelper-compat (= 13)
+Standards-Version: 4.5.1
+Homepage: https://github.com/prometheus/blackbox_exporter
+
+Package: blackbox-exporter
+Architecture: ${BUILD_ARCH}
+Depends: \${shlibs:Depends}, \${misc:Depends}
+Description: The blackbox exporter allows blackbox probing of endpoints over HTTP, HTTPS, DNS, TCP, ICMP and gRPC.
+EOF
+
+echo "I: Create $SRC/debian/changelog"
+cat <<EOF > debian/changelog
+blackbox-exporter (0.26.0) UNRELEASED; urgency=medium
+
+ * Upstream package
+
+ -- VyOS Maintainers <maintainers@vyos.io> Thu, 26 Sep 2024 12:35:47 +0000
+EOF
+
+echo "I: Create $SRC/debian/rules"
+cat <<EOF > debian/rules
+#!/usr/bin/make -f
+
+clean:
+ @# Do nothing
+
+build:
+ @# Do nothing
+
+binary:
+ mkdir -p debian/blackbox-exporter
+ mkdir -p debian/blackbox-exporter/usr/sbin
+ mkdir -p debian/blackbox-exporter/run/blackbox_exporter
+ cp blackbox_exporter debian/blackbox-exporter/usr/sbin/blackbox_exporter
+ dh_gencontrol
+ dh_builddeb
+EOF
+chmod +x debian/rules
+
+echo "I: Build blackbox_exporter"
+go build
+
+echo "I: Build Debian Package"
+dpkg-buildpackage -uc -us -tc -b -d
diff --git a/scripts/package-build/blackbox_exporter/package.toml b/scripts/package-build/blackbox_exporter/package.toml
new file mode 100644
index 00000000..a59a3fdd
--- /dev/null
+++ b/scripts/package-build/blackbox_exporter/package.toml
@@ -0,0 +1,5 @@
+[[packages]]
+name = "blackbox_exporter"
+commit_id = "v0.26.0"
+scm_url = "https://github.com/prometheus/blackbox_exporter"
+build_cmd = "cd ..; y | ./build.sh"
diff --git a/scripts/package-build/build.py b/scripts/package-build/build.py
index 99180e17..9c1df7b3 100755
--- a/scripts/package-build/build.py
+++ b/scripts/package-build/build.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
#
-# Copyright (C) 2024 VyOS maintainers and contributors
+# Copyright (C) 2024-2025 VyOS maintainers and contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 or later as
@@ -58,7 +58,6 @@ def apply_patches(repo_dir: Path, patch_dir: Path) -> None:
series.write(patch.name + '\n')
print(f"I: Applied patch: {patch.name}")
-
def prepare_package(repo_dir: Path, install_data: str) -> None:
"""Prepare a package"""
if not install_data:
@@ -75,12 +74,11 @@ def prepare_package(repo_dir: Path, install_data: str) -> None:
raise
-def build_package(package: list, dependencies: list, patch_dir: Path) -> None:
+def build_package(package: list, patch_dir: Path) -> None:
"""Build a package from the repository
Args:
package (list): List of Packages from toml
- dependencies (list): List of additional dependencies
patch_dir (Path): Directory containing patches
"""
repo_name = package['name']
@@ -94,12 +92,51 @@ def build_package(package: list, dependencies: list, patch_dir: Path) -> None:
# Check out the specific commit
run(['git', 'checkout', package['commit_id']], cwd=repo_dir, check=True)
- # Ensure dependencies
- ensure_dependencies(dependencies)
-
- # Apply patches if any
- if (repo_dir / 'patches'):
- apply_patches(repo_dir, patch_dir)
+ # The `pre_build_hook` is an optional configuration defined in `package.toml`.
+ # It executes after the repository is checked out and before the build process begins.
+ # This hook allows you to perform preparatory tasks, such as creating directories,
+ # copying files, or running custom scripts/commands.
+ #
+ # Usage:
+ # - Single command:
+ # pre_build_hook = "echo 'Hello Pre-Build-Hook'"
+ #
+ # - Multi-line commands:
+ # pre_build_hook = """
+ # mkdir -p ../hello/vyos
+ # mkdir -p ../vyos
+ # cp example.txt ../vyos
+ # """
+ #
+ # - Combination of commands and scripts:
+ # pre_build_hook = "ls -l; ./script.sh"
+ pre_build_hook = package.get('pre_build_hook', '')
+ if pre_build_hook:
+ try:
+ print(f'I: execute pre_build_hook for the package "{repo_name}"')
+ run(pre_build_hook, cwd=repo_dir, check=True, shell=True)
+ except CalledProcessError as e:
+ print(e)
+ print(f"I: pre_build_hook failed for the {repo_name}")
+ raise
+
+ # Apply patches if the 'apply_patches' key is set to True (default) in the package configuration
+ # This allows skipping patch application for specific packages when desired
+ #
+ # Usage:
+ # apply_patches = false
+ #
+ # Default to True if the key is missing
+ if package.get('apply_patches', True):
+ # Check if the 'patches' directory exists in the repository
+ if (repo_dir / 'patches'):
+ apply_patches(repo_dir, patch_dir / repo_name)
+
+ # Sanitize the commit ID and build a tarball for the package
+ commit_id_sanitized = package['commit_id'].replace('/', '_')
+ tarball_name = f"{repo_name}_{commit_id_sanitized}.tar.gz"
+ run(['tar', '-czf', tarball_name, '-C', str(repo_dir.parent), repo_name], check=True)
+ print(f"I: Tarball created: {tarball_name}")
# Prepare the package if required
if package.get('prepare_package', False):
@@ -171,11 +208,14 @@ if __name__ == '__main__':
packages = config['packages']
patch_dir = Path(args.patch_dir)
- for package in packages:
- dependencies = package.get('dependencies', {}).get('packages', [])
+ # Load global dependencies
+ global_dependencies = config.get('dependencies', {}).get('packages', [])
+ if global_dependencies:
+ ensure_dependencies(global_dependencies)
+ for package in packages:
# Build the package
- build_package(package, dependencies, patch_dir)
+ build_package(package, patch_dir)
# Clean up build dependency packages after build
cleanup_build_deps(Path(package['name']))
diff --git a/scripts/package-build/ddclient/.gitignore b/scripts/package-build/ddclient/.gitignore
index aeb8af66..17d0b753 100644
--- a/scripts/package-build/ddclient/.gitignore
+++ b/scripts/package-build/ddclient/.gitignore
@@ -1,7 +1 @@
-ddclient/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/ddclient/
diff --git a/scripts/package-build/dropbear/.gitignore b/scripts/package-build/dropbear/.gitignore
index 6e8cff9c..58c2ff3d 100644
--- a/scripts/package-build/dropbear/.gitignore
+++ b/scripts/package-build/dropbear/.gitignore
@@ -1,7 +1 @@
-dropbear/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/dropbear/
diff --git a/scripts/package-build/dropbear/package.toml b/scripts/package-build/dropbear/package.toml
index cbb885ee..a00aad3d 100644
--- a/scripts/package-build/dropbear/package.toml
+++ b/scripts/package-build/dropbear/package.toml
@@ -3,5 +3,5 @@ name = "dropbear"
commit_id = "debian/2022.83-1+deb12u1"
scm_url = "https://salsa.debian.org/debian/dropbear.git"
-[packages.dependencies]
-packages = ["libpam0g-dev"] \ No newline at end of file
+[dependencies]
+packages = ["libpam0g-dev"]
diff --git a/scripts/package-build/dropbear/patches/0001-Enable-PAM-support.patch b/scripts/package-build/dropbear/patches/0001-Enable-PAM-support.patch
deleted file mode 100644
index fa6cf620..00000000
--- a/scripts/package-build/dropbear/patches/0001-Enable-PAM-support.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From 861bfb53de5909e25a952a83654c63de61af02b5 Mon Sep 17 00:00:00 2001
-From: Christian Breunig <christian@breunig.cc>
-Date: Sun, 28 May 2023 15:45:32 +0200
-Subject: [PATCH] Enable PAM support
-
----
- debian/control | 1 +
- debian/rules | 2 +-
- default_options.h | 4 ++--
- 3 files changed, 4 insertions(+), 3 deletions(-)
-
-diff --git a/debian/control b/debian/control
-index 77ea036..b252b97 100644
---- a/debian/control
-+++ b/debian/control
-@@ -6,6 +6,7 @@ Build-Depends: debhelper,
- debhelper-compat (= 13),
- libtomcrypt-dev (>= 1.18.2~),
- libtommath-dev (>= 1.2.0~),
-+ libpam0g-dev,
- libz-dev
- Rules-Requires-Root: no
- Standards-Version: 4.6.1
-diff --git a/debian/rules b/debian/rules
-index 7dab64c..ce11aa4 100755
---- a/debian/rules
-+++ b/debian/rules
-@@ -24,7 +24,7 @@ endif
- dh $@
-
- override_dh_auto_configure:
-- dh_auto_configure -- --disable-bundled-libtom \
-+ dh_auto_configure -- --disable-bundled-libtom --enable-pam \
- CC='$(CC)' CFLAGS='$(CFLAGS)' $(CONFFLAGS)
-
- execute_before_dh_auto_build:
-diff --git a/default_options.h b/default_options.h
-index 5132775..e7d274c 100644
---- a/default_options.h
-+++ b/default_options.h
-@@ -223,7 +223,7 @@ group1 in Dropbear server too */
-
- /* Authentication Types - at least one required.
- RFC Draft requires pubkey auth, and recommends password */
--#define DROPBEAR_SVR_PASSWORD_AUTH 1
-+#define DROPBEAR_SVR_PASSWORD_AUTH 0
-
- /* Note: PAM auth is quite simple and only works for PAM modules which just do
- * a simple "Login: " "Password: " (you can edit the strings in svr-authpam.c).
-@@ -231,7 +231,7 @@ group1 in Dropbear server too */
- * but there's an interface via a PAM module. It won't work for more complex
- * PAM challenge/response.
- * You can't enable both PASSWORD and PAM. */
--#define DROPBEAR_SVR_PAM_AUTH 0
-+#define DROPBEAR_SVR_PAM_AUTH 1
-
- /* ~/.ssh/authorized_keys authentication.
- * You must define DROPBEAR_SVR_PUBKEY_AUTH in order to use plugins. */
---
-2.30.2
-
diff --git a/packages/dropbear/patches/0001-Enable-PAM-support.patch b/scripts/package-build/dropbear/patches/dropbear/0001-Enable-PAM-support.patch
index fa6cf620..fa6cf620 100644
--- a/packages/dropbear/patches/0001-Enable-PAM-support.patch
+++ b/scripts/package-build/dropbear/patches/dropbear/0001-Enable-PAM-support.patch
diff --git a/scripts/package-build/ethtool/.gitignore b/scripts/package-build/ethtool/.gitignore
index f964bd07..16adf9e5 100644
--- a/scripts/package-build/ethtool/.gitignore
+++ b/scripts/package-build/ethtool/.gitignore
@@ -1,7 +1 @@
-ethtool/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/ethtool/
diff --git a/scripts/package-build/ethtool/package.toml b/scripts/package-build/ethtool/package.toml
index 9468ed82..ec22a06c 100644
--- a/scripts/package-build/ethtool/package.toml
+++ b/scripts/package-build/ethtool/package.toml
@@ -1,4 +1,4 @@
[[packages]]
name = "ethtool"
-commit_id = "debian/1%6.6-1"
+commit_id = "debian/1%6.10-1"
scm_url = "https://salsa.debian.org/kernel-team/ethtool"
diff --git a/scripts/package-build/frr/.gitignore b/scripts/package-build/frr/.gitignore
index 590895c0..93dfaca8 100644
--- a/scripts/package-build/frr/.gitignore
+++ b/scripts/package-build/frr/.gitignore
@@ -1,8 +1,3 @@
-frr/
-rtrlib/
-libyang/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/frr/
+/rtrlib/
+/libyang/
diff --git a/scripts/package-build/frr/package.toml b/scripts/package-build/frr/package.toml
index 48d51ae6..8ff35777 100644
--- a/scripts/package-build/frr/package.toml
+++ b/scripts/package-build/frr/package.toml
@@ -1,6 +1,6 @@
[[packages]]
name = "libyang"
-commit_id = "v2.1.148"
+commit_id = "v3.4.2"
scm_url = "https://github.com/CESNET/libyang.git"
build_cmd = "pipx run apkg build -i && find pkg/pkgs -type f -name *.deb -exec mv -t .. {} +"
@@ -8,20 +8,20 @@ build_cmd = "pipx run apkg build -i && find pkg/pkgs -type f -name *.deb -exec m
name = "rtrlib"
commit_id = "v0.8.0"
scm_url = "https://github.com/rtrlib/rtrlib.git"
-build_cmd = "sudo mk-build-deps --install --tool 'apt-get --yes --no-install-recommends'; dpkg-buildpackage -uc -us -tc -b"
[[packages]]
name = "frr"
-commit_id = "stable/9.1"
+commit_id = "stable/10.2"
scm_url = "https://github.com/FRRouting/frr.git"
-build_cmd = "sudo dpkg -i ../*.deb; sudo dpkg-buildpackage -us -uc -tc -b -Ppkg.frr.rtrlib,pkg.frr.lua"
+build_cmd = "sudo dpkg -i ../*.deb; dpkg-buildpackage -us -uc -tc -b -Ppkg.frr.rtrlib,pkg.frr.lua"
-[packages.dependencies]
+[dependencies]
packages = [
"chrpath",
"gawk",
"install-info",
"libcap-dev",
+ "libc-ares-dev",
"libjson-c-dev",
"librtr-dev",
"libpam-dev",
@@ -32,5 +32,6 @@ packages = [
"protobuf-c-compiler",
"python3-dev:native",
"texinfo",
- "lua5.3"
+ "lua5.3",
+ "doxygen"
]
diff --git a/scripts/package-build/frr/patches/frr/0001-Enable-PCRE2-in-Debian-package-builds.patch b/scripts/package-build/frr/patches/frr/0001-Enable-PCRE2-in-Debian-package-builds.patch
new file mode 100644
index 00000000..545e7d5e
--- /dev/null
+++ b/scripts/package-build/frr/patches/frr/0001-Enable-PCRE2-in-Debian-package-builds.patch
@@ -0,0 +1,24 @@
+From 21800432167ac022c01772df993efca8d4969b38 Mon Sep 17 00:00:00 2001
+From: Daniil Baturin <daniil@baturin.org>
+Date: Wed, 6 Nov 2024 15:58:10 +0000
+Subject: [PATCH] Enable PCRE2 in Debian package builds
+
+---
+ debian/rules | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/debian/rules b/debian/rules
+index 43e5d7e61..1f971ab22 100755
+--- a/debian/rules
++++ b/debian/rules
+@@ -69,6 +69,7 @@ override_dh_auto_configure:
+ --enable-vty-group=frrvty \
+ --enable-configfile-mask=0640 \
+ --enable-logfile-mask=0640 \
++ --enable-pcre2posix \
+ # end
+
+ override_dh_auto_install:
+--
+2.47.0
+
diff --git a/scripts/package-build/frr/patches/frr/0001-ldpd-Option-for-disabled-LDP-hello-message-during-TC.patch b/scripts/package-build/frr/patches/frr/0001-ldpd-Option-for-disabled-LDP-hello-message-during-TC.patch
new file mode 100644
index 00000000..67f85d01
--- /dev/null
+++ b/scripts/package-build/frr/patches/frr/0001-ldpd-Option-for-disabled-LDP-hello-message-during-TC.patch
@@ -0,0 +1,176 @@
+From 945eff42df61982585011fa8427050c74ca90c6b Mon Sep 17 00:00:00 2001
+From: Andrii Melnychenko <a.melnychenko@vyos.io>
+Date: Mon, 17 Mar 2025 13:25:20 +0100
+Subject: [PATCH 1/1] ldpd: Option for disabled LDP hello message during TCP
+
+Added option "disable-establish-hello" that disableds
+sending additional LDP hello multicast messages during
+TCP session establishment.
+This option enables per interface: "(config-ldp-af-if)".
+
+Signed-off-by: Andrii Melnychenko <a.melnychenko@vyos.io>
+---
+ ldpd/interface.c | 2 ++
+ ldpd/ldp_vty.h | 1 +
+ ldpd/ldp_vty_cmds.c | 11 +++++++++++
+ ldpd/ldp_vty_conf.c | 32 ++++++++++++++++++++++++++++++++
+ ldpd/ldpd.c | 1 +
+ ldpd/ldpd.h | 1 +
+ ldpd/neighbor.c | 5 +++--
+ 7 files changed, 51 insertions(+), 2 deletions(-)
+
+diff --git a/ldpd/interface.c b/ldpd/interface.c
+index f0e70cbac..6fccd4af5 100644
+--- a/ldpd/interface.c
++++ b/ldpd/interface.c
+@@ -63,11 +63,13 @@ if_new(const char *name)
+ iface->ipv4.af = AF_INET;
+ iface->ipv4.iface = iface;
+ iface->ipv4.enabled = 0;
++ iface->ipv4.disable_establish_hello = 0;
+
+ /* ipv6 */
+ iface->ipv6.af = AF_INET6;
+ iface->ipv6.iface = iface;
+ iface->ipv6.enabled = 0;
++ iface->ipv6.disable_establish_hello = 0;
+
+ return (iface);
+ }
+diff --git a/ldpd/ldp_vty.h b/ldpd/ldp_vty.h
+index 5c83d1c56..196d05c93 100644
+--- a/ldpd/ldp_vty.h
++++ b/ldpd/ldp_vty.h
+@@ -24,6 +24,7 @@ int ldp_vty_allow_broken_lsp(struct vty *, const char *);
+ int ldp_vty_address_family (struct vty *, const char *, const char *);
+ int ldp_vty_disc_holdtime(struct vty *, const char *, enum hello_type, long);
+ int ldp_vty_disc_interval(struct vty *, const char *, enum hello_type, long);
++int ldp_vty_disable_establish_hello(struct vty *, const char *);
+ int ldp_vty_targeted_hello_accept(struct vty *, const char *, const char *);
+ int ldp_vty_nbr_session_holdtime(struct vty *, const char *, struct in_addr, long);
+ int ldp_vty_af_session_holdtime(struct vty *, const char *, long);
+diff --git a/ldpd/ldp_vty_cmds.c b/ldpd/ldp_vty_cmds.c
+index e046ae996..d6c36c35b 100644
+--- a/ldpd/ldp_vty_cmds.c
++++ b/ldpd/ldp_vty_cmds.c
+@@ -122,6 +122,15 @@ DEFPY (ldp_discovery_link_interval,
+ return (ldp_vty_disc_interval(vty, no, HELLO_LINK, interval));
+ }
+
++DEFPY (ldp_disable_establish_hello,
++ ldp_disable_establish_hello_cmd,
++ "[no] disable-establish-hello",
++ NO_STR
++ "Disable sending additional LDP hello message on establishing LDP tcp connection\n")
++{
++ return ldp_vty_disable_establish_hello(vty, no);
++}
++
+ DEFPY (ldp_discovery_targeted_interval,
+ ldp_discovery_targeted_interval_cmd,
+ "[no] discovery targeted-hello interval (1-65535)$interval",
+@@ -866,9 +875,11 @@ ldp_vty_init (void)
+
+ install_element(LDP_IPV4_IFACE_NODE, &ldp_discovery_link_holdtime_cmd);
+ install_element(LDP_IPV4_IFACE_NODE, &ldp_discovery_link_interval_cmd);
++ install_element(LDP_IPV4_IFACE_NODE, &ldp_disable_establish_hello_cmd);
+
+ install_element(LDP_IPV6_IFACE_NODE, &ldp_discovery_link_holdtime_cmd);
+ install_element(LDP_IPV6_IFACE_NODE, &ldp_discovery_link_interval_cmd);
++ install_element(LDP_IPV6_IFACE_NODE, &ldp_disable_establish_hello_cmd);
+
+ install_element(LDP_L2VPN_NODE, &ldp_bridge_cmd);
+ install_element(LDP_L2VPN_NODE, &ldp_mtu_cmd);
+diff --git a/ldpd/ldp_vty_conf.c b/ldpd/ldp_vty_conf.c
+index ffff67683..56ad071c8 100644
+--- a/ldpd/ldp_vty_conf.c
++++ b/ldpd/ldp_vty_conf.c
+@@ -119,6 +119,8 @@ ldp_af_iface_config_write(struct vty *vty, int af)
+ ia->hello_interval != 0)
+ vty_out (vty, " discovery hello interval %u\n",
+ ia->hello_interval);
++ if (ia->disable_establish_hello)
++ vty_out (vty, " disable-establish-hello\n");
+
+ vty_out (vty, " exit\n");
+ }
+@@ -632,6 +634,36 @@ ldp_vty_disc_interval(struct vty *vty, const char *negate,
+ return (CMD_SUCCESS);
+ }
+
++int
++ldp_vty_disable_establish_hello(struct vty *vty,
++ const char *negate)
++{
++ struct iface *iface;
++ struct iface_af *ia;
++ int af;
++
++ switch (vty->node) {
++ case LDP_IPV4_IFACE_NODE:
++ case LDP_IPV6_IFACE_NODE:
++ af = ldp_vty_get_af(vty);
++ iface = VTY_GET_CONTEXT(iface);
++ VTY_CHECK_CONTEXT(iface);
++
++ ia = iface_af_get(iface, af);
++ if (negate)
++ ia->disable_establish_hello = 0;
++ else
++ ia->disable_establish_hello = 1;
++
++ ldp_config_apply(vty, vty_conf);
++ break;
++ default:
++ fatalx("ldp_vty_disable_establish_hello: unexpected node");
++ }
++
++ return (CMD_SUCCESS);
++}
++
+ int
+ ldp_vty_targeted_hello_accept(struct vty *vty, const char *negate,
+ const char *acl_from_str)
+diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c
+index 4d38fdcd0..9a5667c26 100644
+--- a/ldpd/ldpd.c
++++ b/ldpd/ldpd.c
+@@ -1604,6 +1604,7 @@ merge_iface_af(struct iface_af *ia, struct iface_af *xi)
+ }
+ ia->hello_holdtime = xi->hello_holdtime;
+ ia->hello_interval = xi->hello_interval;
++ ia->disable_establish_hello = xi->disable_establish_hello;
+ }
+
+ static void
+diff --git a/ldpd/ldpd.h b/ldpd/ldpd.h
+index ad831a6ea..40a1e8c3c 100644
+--- a/ldpd/ldpd.h
++++ b/ldpd/ldpd.h
+@@ -332,6 +332,7 @@ struct iface_af {
+ struct event *hello_timer;
+ uint16_t hello_holdtime;
+ uint16_t hello_interval;
++ int disable_establish_hello;
+ };
+
+ struct iface_ldp_sync {
+diff --git a/ldpd/neighbor.c b/ldpd/neighbor.c
+index 2596c7948..00a809186 100644
+--- a/ldpd/neighbor.c
++++ b/ldpd/neighbor.c
+@@ -630,8 +630,9 @@ nbr_establish_connection(struct nbr *nbr)
+ * an adjacency as well.
+ */
+ RB_FOREACH(adj, nbr_adj_head, &nbr->adj_tree)
+- send_hello(adj->source.type, adj->source.link.ia,
+- adj->source.target);
++ if (!(adj->source.type == HELLO_LINK && adj->source.link.ia->disable_establish_hello))
++ send_hello(adj->source.type, adj->source.link.ia,
++ adj->source.target);
+
+ if (connect(nbr->fd, &remote_su.sa, sockaddr_len(&remote_su.sa)) == -1) {
+ if (errno == EINPROGRESS) {
+--
+2.43.0
+
diff --git a/scripts/package-build/frr/patches/frr/0003-Clear-Babel-Config-On-Stop.patch b/scripts/package-build/frr/patches/frr/0003-Clear-Babel-Config-On-Stop.patch
new file mode 100644
index 00000000..fea45891
--- /dev/null
+++ b/scripts/package-build/frr/patches/frr/0003-Clear-Babel-Config-On-Stop.patch
@@ -0,0 +1,29 @@
+From c3c70e87b040233263b9594d14582dfedfecc92e Mon Sep 17 00:00:00 2001
+From: Yaroslav Kholod <y.kholod@vyos.io>
+Date: Wed, 18 Dec 2024 11:48:29 +0200
+Subject: [PATCH] #17413: Clean babeld config on stop
+
+---
+ babeld/babeld.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/babeld/babeld.c b/babeld/babeld.c
+index b562f0b70..6f1a9a3d7 100644
+--- a/babeld/babeld.c
++++ b/babeld/babeld.c
+@@ -304,6 +304,12 @@ void babel_clean_routing_process(void)
+ flush_all_routes();
+ babel_interface_close_all();
+
++ /* Clean babel config */
++ diversity_kind = DIVERSITY_NONE;
++ diversity_factor = BABEL_DEFAULT_DIVERSITY_FACTOR;
++ resend_delay = BABEL_DEFAULT_RESEND_DELAY;
++ smoothing_half_life = BABEL_DEFAULT_SMOOTHING_HALF_LIFE;
++
+ /* cancel events */
+ event_cancel(&babel_routing_process->t_read);
+ event_cancel(&babel_routing_process->t_update);
+--
+2.43.0
+
diff --git a/scripts/package-build/frr_exporter/.gitignore b/scripts/package-build/frr_exporter/.gitignore
new file mode 100644
index 00000000..aee4cba5
--- /dev/null
+++ b/scripts/package-build/frr_exporter/.gitignore
@@ -0,0 +1 @@
+/frr_exporter/
diff --git a/scripts/package-build/frr_exporter/build.py b/scripts/package-build/frr_exporter/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/frr_exporter/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/frr_exporter/package.toml b/scripts/package-build/frr_exporter/package.toml
new file mode 100644
index 00000000..607c4c68
--- /dev/null
+++ b/scripts/package-build/frr_exporter/package.toml
@@ -0,0 +1,22 @@
+[[packages]]
+name = "frr_exporter"
+commit_id = "v1.5.0"
+scm_url = "https://github.com/tynany/frr_exporter"
+
+build_cmd = """
+
+# Create the install directory
+mkdir -p debian/usr/sbin
+make setup_promu
+go build
+
+# Move the frr_exporter binary to the install directory
+mv frr_exporter debian/usr/sbin
+
+# Build the Debian package
+fpm --input-type dir --output-type deb --name frr-exporter \
+ --version $(git describe --tags --always | cut -c2-) --deb-compression gz \
+ --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
+ --description "Prometheus exporter for FRR" \
+ --license Apache-2.0 -C debian --package ..
+"""
diff --git a/scripts/package-build/hostap/.gitignore b/scripts/package-build/hostap/.gitignore
index f9c7eb32..1a2c97d8 100644
--- a/scripts/package-build/hostap/.gitignore
+++ b/scripts/package-build/hostap/.gitignore
@@ -1,7 +1,2 @@
-hostap/
-wpa/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/hostap/
+/wpa/
diff --git a/scripts/package-build/hsflowd/.gitignore b/scripts/package-build/hsflowd/.gitignore
index d0964b29..aebf1d06 100644
--- a/scripts/package-build/hsflowd/.gitignore
+++ b/scripts/package-build/hsflowd/.gitignore
@@ -1,6 +1 @@
-host-sflow/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/host-sflow/
diff --git a/scripts/package-build/hsflowd/package.toml b/scripts/package-build/hsflowd/package.toml
index 75d320a7..823b0db3 100644
--- a/scripts/package-build/hsflowd/package.toml
+++ b/scripts/package-build/hsflowd/package.toml
@@ -1,8 +1,8 @@
[[packages]]
name = "host-sflow"
-commit_id = "v2.0.55-1"
+commit_id = "v2.1.11-5"
scm_url = "https://github.com/sflow/host-sflow.git"
-build_cmd = "make deb FEATURES='PCAP DROPMON DBUS'"
+build_cmd = "make deb FEATURES='PCAP DROPMON DBUS PSAMPLE VPP'"
-[packages.dependencies]
+[dependencies]
packages = ["libpcap0.8-dev"]
diff --git a/scripts/package-build/isc-dhcp/.gitignore b/scripts/package-build/isc-dhcp/.gitignore
index 66d17cc8..41aa96b8 100644
--- a/scripts/package-build/isc-dhcp/.gitignore
+++ b/scripts/package-build/isc-dhcp/.gitignore
@@ -1,7 +1 @@
-isc-dhcp/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/isc-dhcp/
diff --git a/scripts/package-build/isc-dhcp/package.toml b/scripts/package-build/isc-dhcp/package.toml
index 76a0e4a1..f07e71e3 100644
--- a/scripts/package-build/isc-dhcp/package.toml
+++ b/scripts/package-build/isc-dhcp/package.toml
@@ -3,5 +3,5 @@ name = "isc-dhcp"
commit_id = "debian/4.4.3-P1-4"
scm_url = "https://salsa.debian.org/debian/isc-dhcp"
-[packages.dependencies]
+[dependencies]
packages = ["libpam0g-dev"]
diff --git a/scripts/package-build/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch b/scripts/package-build/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch
deleted file mode 100644
index c13569ad..00000000
--- a/scripts/package-build/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch
+++ /dev/null
@@ -1,248 +0,0 @@
-From 8d9e8ace96ad9e2dba9f2d4069228dee5daf6772 Mon Sep 17 00:00:00 2001
-From: Loic Poulain <loic.poulain@linaro.org>
-Date: Mon, 2 Nov 2020 06:42:12 -0500
-Subject: [PATCH 1/4] Add support for raw IP interface type
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Under linux some devices can expose raw IP interfaces, such as WWAN
-modems. In that case IP data is not encapsulated in any lower level
-protocol.
-
-dhclient does not support this currently and this patch adds support
-for such pure IP interfaces.
-
-The original patch comes from Bjørn Mork on Network-Manage mailing list:
-https://mail.gnome.org/archives/networkmanager-list/2015-December/msg00044.html
-
----
- common/bpf.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++-
- common/lpf.c | 59 +++++++++++++++++++++++++++++++++++++-----------
- common/packet.c | 7 ++++++
- includes/dhcp.h | 1 +
- 4 files changed, 113 insertions(+), 14 deletions(-)
-
-diff --git a/common/bpf.c b/common/bpf.c
-index 658e5db..0c08574 100644
---- a/common/bpf.c
-+++ b/common/bpf.c
-@@ -198,6 +198,34 @@ struct bpf_insn dhcp_bpf_filter [] = {
- BPF_STMT (BPF_RET + BPF_K, 0),
- };
-
-+int dhcp_bpf_filter_len = sizeof dhcp_bpf_filter / sizeof (struct bpf_insn);
-+
-+struct bpf_insn dhcp_bpf_pureip_filter [] = {
-+ /* Make sure it's a UDP packet... */
-+ BPF_STMT (BPF_LD + BPF_B + BPF_ABS, 9),
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 0, 6),
-+
-+ /* Make sure this isn't a fragment... */
-+ BPF_STMT(BPF_LD + BPF_H + BPF_ABS, 6),
-+ BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 4, 0),
-+
-+ /* Get the IP header length... */
-+ BPF_STMT (BPF_LDX + BPF_B + BPF_MSH, 0),
-+
-+ /* Make sure it's to the right port... */
-+ BPF_STMT (BPF_LD + BPF_H + BPF_IND, 2),
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 37, 0, 1), /* patch */
-+
-+ /* If we passed all the tests, ask for the whole packet. */
-+ BPF_STMT(BPF_RET+BPF_K, (u_int)-1),
-+
-+ /* Otherwise, drop it. */
-+ BPF_STMT(BPF_RET+BPF_K, 0),
-+};
-+
-+int dhcp_bpf_pureip_filter_len =
-+ sizeof dhcp_bpf_pureip_filter / sizeof (struct bpf_insn);
-+
- #if defined(RELAY_PORT)
- /*
- * For relay port extension
-@@ -235,13 +263,43 @@ struct bpf_insn dhcp_bpf_relay_filter [] = {
-
- int dhcp_bpf_relay_filter_len =
- sizeof dhcp_bpf_relay_filter / sizeof (struct bpf_insn);
-+
-+struct bpf_insn dhcp_bpf_pureip_relay_filter [] = {
-+ /* Make sure it's a UDP packet... */
-+ BPF_STMT (BPF_LD + BPF_B + BPF_ABS, 9),
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 0, 8),
-+
-+ /* Make sure this isn't a fragment... */
-+ BPF_STMT(BPF_LD + BPF_H + BPF_ABS, 6),
-+ BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 6, 0),
-+
-+ /* Get the IP header length... */
-+ BPF_STMT (BPF_LDX + BPF_B + BPF_MSH, 0),
-+
-+ /* Make sure it's to the right port... */
-+ BPF_STMT (BPF_LD + BPF_H + BPF_IND, 16),
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 37, 2, 0), /* patch */
-+
-+ /* relay can have an alternative port... */
-+ BPF_STMT (BPF_LD + BPF_H + BPF_IND, 16),
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 37, 0, 1), /* patch */
-+
-+ /* If we passed all the tests, ask for the whole packet. */
-+ BPF_STMT (BPF_RET + BPF_K, (u_int)-1),
-+
-+ /* Otherwise, drop it. */
-+ BPF_STMT (BPF_RET + BPF_K, 0),
-+};
-+
-+int dhcp_bpf_pureip_relay_filter_len =
-+ sizeof dhcp_bpf_pureip_relay_filter / sizeof (struct bpf_insn);
-+
- #endif
-
- #if defined (DEC_FDDI)
- struct bpf_insn *bpf_fddi_filter = NULL;
- #endif
-
--int dhcp_bpf_filter_len = sizeof dhcp_bpf_filter / sizeof (struct bpf_insn);
- #if defined (HAVE_TR_SUPPORT)
- struct bpf_insn dhcp_bpf_tr_filter [] = {
- /* accept all token ring packets due to variable length header */
-diff --git a/common/lpf.c b/common/lpf.c
-index bb8822a..d8f34a4 100644
---- a/common/lpf.c
-+++ b/common/lpf.c
-@@ -177,9 +177,15 @@ void if_deregister_send (info)
- extern struct sock_filter dhcp_bpf_filter [];
- extern int dhcp_bpf_filter_len;
-
-+extern struct sock_filter dhcp_bpf_pureip_filter [];
-+extern int dhcp_bpf_pureip_filter_len;
-+
- #if defined(RELAY_PORT)
- extern struct sock_filter dhcp_bpf_relay_filter [];
- extern int dhcp_bpf_relay_filter_len;
-+
-+extern struct sock_filter dhcp_bpf_pureip_relay_filter [];
-+extern int dhcp_bpf_pureip_relay_filter_len;
- #endif
-
- #if defined (HAVE_TR_SUPPORT)
-@@ -249,31 +255,52 @@ void if_deregister_receive (info)
- static void lpf_gen_filter_setup (info)
- struct interface_info *info;
- {
-+ int pure_ip = info -> hw_address.hbuf [0] == HTYPE_PUREIP;
- struct sock_fprog p;
-
- memset(&p, 0, sizeof(p));
-
-- /* Set up the bpf filter program structure. This is defined in
-- bpf.c */
-- p.len = dhcp_bpf_filter_len;
-- p.filter = dhcp_bpf_filter;
-+ /* Set up the bpf filter program structure and patch port(s).
-+ *
-+ * This is defined in bpf.c, XXX changes to filter program may
-+ * require changes to the insn number(s) used below! XXX
-+ */
-+
-+ if (pure_ip) {
-+ p.len = dhcp_bpf_pureip_filter_len;
-+ p.filter = dhcp_bpf_pureip_filter;
-+
-+ /* patch port */
-+ dhcp_bpf_pureip_filter [6].k = ntohs (local_port);
-+ } else {
-+ p.len = dhcp_bpf_filter_len;
-+ p.filter = dhcp_bpf_filter;
-+
-+ /* patch port */
-+ dhcp_bpf_filter [8].k = ntohs (local_port);
-+ }
-
-- /* Patch the server port into the LPF program...
-- XXX changes to filter program may require changes
-- to the insn number(s) used below! XXX */
- #if defined(RELAY_PORT)
-- if (relay_port) {
-- /*
-- * If user defined relay UDP port, we need to filter
-- * also on the user UDP port.
-- */
-+ /*
-+ * If user defined relay UDP port, we need to filter
-+ * also on the user UDP port.
-+ */
-+ if (relay_port && pure_ip) {
-+ p.len = dhcp_bpf_pureip_relay_filter_len;
-+ p.filter = dhcp_bpf_pureip_relay_filter;
-+
-+ /* patch ports */
-+ dhcp_bpf_pureip_relay_filter [6].k = ntohs (local_port);
-+ dhcp_bpf_pureip_relay_filter [8].k = ntohs (relay_port);
-+ } else if (relay_port) {
- p.len = dhcp_bpf_relay_filter_len;
- p.filter = dhcp_bpf_relay_filter;
-
-+ /* patch ports */
-+ dhcp_bpf_relay_filter [8].k = ntohs (local_port);
- dhcp_bpf_relay_filter [10].k = ntohs (relay_port);
- }
- #endif
-- dhcp_bpf_filter [8].k = ntohs (local_port);
-
- if (setsockopt (info -> rfdesc, SOL_SOCKET, SO_ATTACH_FILTER, &p,
- sizeof p) < 0) {
-@@ -578,6 +605,12 @@ get_hw_addr(const char *name, struct hardware *hw) {
- hw->hbuf[3] = 0xbe;
- hw->hbuf[4] = 0xef;
- break;
-+#endif
-+#ifdef ARPHRD_RAWIP
-+ case ARPHRD_RAWIP:
-+ hw->hlen = 1;
-+ hw->hbuf[0] = HTYPE_PUREIP;
-+ break;
- #endif
- default:
- log_fatal("Unsupported device type %ld for \"%s\"",
-diff --git a/common/packet.c b/common/packet.c
-index 49795c4..6745db7 100644
---- a/common/packet.c
-+++ b/common/packet.c
-@@ -119,6 +119,10 @@ void assemble_hw_header (interface, buf, bufix, to)
- case HTYPE_INFINIBAND:
- log_error("Attempt to assemble hw header for infiniband");
- break;
-+ case HTYPE_PUREIP:
-+ /* Nothing to do, there is no hw header */
-+ *bufix = 0;
-+ break;
- case HTYPE_ETHER:
- default:
- assemble_ethernet_header(interface, buf, bufix, to);
-@@ -219,6 +223,9 @@ ssize_t decode_hw_header (interface, buf, bufix, from)
- case HTYPE_INFINIBAND:
- log_error("Attempt to decode hw header for infiniband");
- return (0);
-+ case HTYPE_PUREIP:
-+ /* Nothing to do, there is no hw header */
-+ return 0;
- case HTYPE_ETHER:
- default:
- return (decode_ethernet_header(interface, buf, bufix, from));
-diff --git a/includes/dhcp.h b/includes/dhcp.h
-index d519821..75be1fb 100644
---- a/includes/dhcp.h
-+++ b/includes/dhcp.h
-@@ -76,6 +76,7 @@ struct dhcp_packet {
- #define HTYPE_IEEE802 6 /* IEEE 802.2 Token Ring... */
- #define HTYPE_FDDI 8 /* FDDI... */
- #define HTYPE_INFINIBAND 32 /* IP over Infiniband */
-+#define HTYPE_PUREIP 35 /* Pure IP */
- #define HTYPE_IPMP 255 /* IPMP - random hw address - there
- * is no standard for this so we
- * just steal a type */
---
-2.39.2
-
diff --git a/scripts/package-build/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch b/scripts/package-build/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch
deleted file mode 100644
index 60b693f6..00000000
--- a/scripts/package-build/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch
+++ /dev/null
@@ -1,170 +0,0 @@
-From e67d1b6b4178f412084459c4cb7e54a8c0019bd2 Mon Sep 17 00:00:00 2001
-From: Francis Dupont <fdupont@isc.org>
-Date: Fri, 6 Nov 2020 10:46:09 +0100
-Subject: [PATCH 2/4] Checkpoint: improved patch
-
----
- common/bpf.c | 10 +++---
- common/lpf.c | 89 +++++++++++++++++++++++++++++++++++-----------------
- 2 files changed, 65 insertions(+), 34 deletions(-)
-
-diff --git a/common/bpf.c b/common/bpf.c
-index 0c08574..30dcaa5 100644
---- a/common/bpf.c
-+++ b/common/bpf.c
-@@ -214,13 +214,13 @@ struct bpf_insn dhcp_bpf_pureip_filter [] = {
-
- /* Make sure it's to the right port... */
- BPF_STMT (BPF_LD + BPF_H + BPF_IND, 2),
-- BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 37, 0, 1), /* patch */
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 67, 0, 1), /* patch */
-
- /* If we passed all the tests, ask for the whole packet. */
-- BPF_STMT(BPF_RET+BPF_K, (u_int)-1),
-+ BPF_STMT(BPF_RET + BPF_K, (u_int)-1),
-
- /* Otherwise, drop it. */
-- BPF_STMT(BPF_RET+BPF_K, 0),
-+ BPF_STMT(BPF_RET + BPF_K, 0),
- };
-
- int dhcp_bpf_pureip_filter_len =
-@@ -278,11 +278,11 @@ struct bpf_insn dhcp_bpf_pureip_relay_filter [] = {
-
- /* Make sure it's to the right port... */
- BPF_STMT (BPF_LD + BPF_H + BPF_IND, 16),
-- BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 37, 2, 0), /* patch */
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 67, 2, 0), /* patch */
-
- /* relay can have an alternative port... */
- BPF_STMT (BPF_LD + BPF_H + BPF_IND, 16),
-- BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 37, 0, 1), /* patch */
-+ BPF_JUMP (BPF_JMP + BPF_JEQ + BPF_K, 67, 0, 1), /* patch */
-
- /* If we passed all the tests, ask for the whole packet. */
- BPF_STMT (BPF_RET + BPF_K, (u_int)-1),
-diff --git a/common/lpf.c b/common/lpf.c
-index d8f34a4..75609f5 100644
---- a/common/lpf.c
-+++ b/common/lpf.c
-@@ -221,6 +221,9 @@ void if_register_receive (info)
- lpf_tr_filter_setup (info);
- else
- #endif
-+ if (info -> hw_address.hbuf [0] == HTYPE_PUREIP)
-+ lpf_pureip_filter_setup (info);
-+ else
- lpf_gen_filter_setup (info);
-
- if (!quiet_interface_discovery)
-@@ -255,50 +258,78 @@ void if_deregister_receive (info)
- static void lpf_gen_filter_setup (info)
- struct interface_info *info;
- {
-- int pure_ip = info -> hw_address.hbuf [0] == HTYPE_PUREIP;
- struct sock_fprog p;
-
- memset(&p, 0, sizeof(p));
-
-- /* Set up the bpf filter program structure and patch port(s).
-- *
-- * This is defined in bpf.c, XXX changes to filter program may
-- * require changes to the insn number(s) used below! XXX
-- */
-+ /* Set up the bpf filter program structure. This is defined in
-+ bpf.c */
-+ p.len = dhcp_bpf_filter_len;
-+ p.filter = dhcp_bpf_filter;
-+
-+ dhcp_bpf_filter [8].k = ntohs (local_port);
-
-- if (pure_ip) {
-- p.len = dhcp_bpf_pureip_filter_len;
-- p.filter = dhcp_bpf_pureip_filter;
-+ /* Patch the server port into the LPF program...
-+ XXX changes to filter program may require changes
-+ to the insn number(s) used below! XXX */
-+#if defined(RELAY_PORT)
-+ if (relay_port) {
-+ /*
-+ * If user defined relay UDP port, we need to filter
-+ * also on the user UDP port.
-+ */
-+ p.len = dhcp_bpf_relay_filter_len;
-+ p.filter = dhcp_bpf_relay_filter;
-
-- /* patch port */
-- dhcp_bpf_pureip_filter [6].k = ntohs (local_port);
-- } else {
-- p.len = dhcp_bpf_filter_len;
-- p.filter = dhcp_bpf_filter;
-+ dhcp_bpf_relay_filter [8].k = ntohs (local_port);
-+ dhcp_bpf_relay_filter [10].k = ntohs (relay_port);
-+ }
-+#endif
-
-- /* patch port */
-- dhcp_bpf_filter [8].k = ntohs (local_port);
-+ if (setsockopt (info -> rfdesc, SOL_SOCKET, SO_ATTACH_FILTER, &p,
-+ sizeof p) < 0) {
-+ if (errno == ENOPROTOOPT || errno == EPROTONOSUPPORT ||
-+ errno == ESOCKTNOSUPPORT || errno == EPFNOSUPPORT ||
-+ errno == EAFNOSUPPORT) {
-+ log_error ("socket: %m - make sure");
-+ log_error ("CONFIG_PACKET (Packet socket) %s",
-+ "and CONFIG_FILTER");
-+ log_error ("(Socket Filtering) are enabled %s",
-+ "in your kernel");
-+ log_fatal ("configuration!");
-+ }
-+ log_fatal ("Can't install packet filter program: %m");
- }
-+}
-+
-+static void lpf_pureip_gen_filter_setup (info)
-+ struct interface_info *info;
-+{
-+ struct sock_fprog p;
-+
-+ memset(&p, 0, sizeof(p));
-+
-+ /* Set up the bpf filter program structure. This is defined in
-+ bpf.c */
-+ p.len = dhcp_bpf_pureip_filter_len;
-+ p.filter = dhcp_bpf_pureip_filter;
-+
-+ dhcp_bpf_pureip_filter [6].k = ntohs (local_port);
-
-+ /* Patch the server port into the LPF program...
-+ XXX changes to filter program may require changes
-+ to the insn number(s) used below! XXX */
- #if defined(RELAY_PORT)
-- /*
-- * If user defined relay UDP port, we need to filter
-- * also on the user UDP port.
-- */
-- if (relay_port && pure_ip) {
-+ if (relay_port) {
-+ /*
-+ * If user defined relay UDP port, we need to filter
-+ * also on the user UDP port.
-+ */
- p.len = dhcp_bpf_pureip_relay_filter_len;
- p.filter = dhcp_bpf_pureip_relay_filter;
-
-- /* patch ports */
- dhcp_bpf_pureip_relay_filter [6].k = ntohs (local_port);
- dhcp_bpf_pureip_relay_filter [8].k = ntohs (relay_port);
-- } else if (relay_port) {
-- p.len = dhcp_bpf_relay_filter_len;
-- p.filter = dhcp_bpf_relay_filter;
--
-- /* patch ports */
-- dhcp_bpf_relay_filter [8].k = ntohs (local_port);
-- dhcp_bpf_relay_filter [10].k = ntohs (relay_port);
- }
- #endif
-
---
-2.39.2
-
diff --git a/scripts/package-build/isc-dhcp/patches/0003-fix-compilation-errors.patch b/scripts/package-build/isc-dhcp/patches/0003-fix-compilation-errors.patch
deleted file mode 100644
index c66e0c7c..00000000
--- a/scripts/package-build/isc-dhcp/patches/0003-fix-compilation-errors.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From 58e0d3317795987b2f1ca788645196d0e3543f88 Mon Sep 17 00:00:00 2001
-From: Adam Smith <zero1three@gmail.com>
-Date: Tue, 23 Jan 2024 21:47:00 -0500
-Subject: [PATCH 3/4] fix compilation errors
-
----
- common/lpf.c | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/common/lpf.c b/common/lpf.c
-index 75609f5..1561d71 100644
---- a/common/lpf.c
-+++ b/common/lpf.c
-@@ -195,6 +195,7 @@ static void lpf_tr_filter_setup (struct interface_info *);
- #endif
-
- static void lpf_gen_filter_setup (struct interface_info *);
-+static void lpf_pureip_gen_filter_setup (struct interface_info *);
-
- void if_register_receive (info)
- struct interface_info *info;
-@@ -215,14 +216,13 @@ void if_register_receive (info)
- }
- #endif
-
--
- #if defined (HAVE_TR_SUPPORT)
- if (info -> hw_address.hbuf [0] == HTYPE_IEEE802)
- lpf_tr_filter_setup (info);
- else
- #endif
- if (info -> hw_address.hbuf [0] == HTYPE_PUREIP)
-- lpf_pureip_filter_setup (info);
-+ lpf_pureip_gen_filter_setup (info);
- else
- lpf_gen_filter_setup (info);
-
-@@ -349,6 +349,7 @@ static void lpf_pureip_gen_filter_setup (info)
- }
- }
-
-+
- #if defined (HAVE_TR_SUPPORT)
- static void lpf_tr_filter_setup (info)
- struct interface_info *info;
---
-2.39.2
-
diff --git a/scripts/package-build/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch b/scripts/package-build/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch
deleted file mode 100644
index 32089b4d..00000000
--- a/scripts/package-build/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From fd96a11b31cd05aae450ec65fde0b5c6e0b718c2 Mon Sep 17 00:00:00 2001
-From: Adam Smith <zero1three@gmail.com>
-Date: Tue, 23 Jan 2024 22:35:54 -0500
-Subject: [PATCH 4/4] add support for ARPHRD_NONE interface type
-
----
- common/lpf.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/common/lpf.c b/common/lpf.c
-index 1561d71..f7e84b1 100644
---- a/common/lpf.c
-+++ b/common/lpf.c
-@@ -643,6 +643,12 @@ get_hw_addr(const char *name, struct hardware *hw) {
- hw->hlen = 1;
- hw->hbuf[0] = HTYPE_PUREIP;
- break;
-+#endif
-+#ifdef ARPHRD_NONE
-+ case ARPHRD_NONE:
-+ hw->hlen = 1;
-+ hw->hbuf[0] = HTYPE_PUREIP;
-+ break;
- #endif
- default:
- log_fatal("Unsupported device type %ld for \"%s\"",
---
-2.39.2
-
diff --git a/packages/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0001-Add-support-for-raw-IP-interface-type.patch
index c13569ad..c13569ad 100644
--- a/packages/isc-dhcp/patches/0001-Add-support-for-raw-IP-interface-type.patch
+++ b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0001-Add-support-for-raw-IP-interface-type.patch
diff --git a/packages/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0002-Checkpoint-improved-patch.patch
index 60b693f6..60b693f6 100644
--- a/packages/isc-dhcp/patches/0002-Checkpoint-improved-patch.patch
+++ b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0002-Checkpoint-improved-patch.patch
diff --git a/packages/isc-dhcp/patches/0003-fix-compilation-errors.patch b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0003-fix-compilation-errors.patch
index c66e0c7c..c66e0c7c 100644
--- a/packages/isc-dhcp/patches/0003-fix-compilation-errors.patch
+++ b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0003-fix-compilation-errors.patch
diff --git a/packages/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0004-add-support-for-ARPHRD_NONE-interface-type.patch
index 32089b4d..32089b4d 100644
--- a/packages/isc-dhcp/patches/0004-add-support-for-ARPHRD_NONE-interface-type.patch
+++ b/scripts/package-build/isc-dhcp/patches/isc-dhcp/0004-add-support-for-ARPHRD_NONE-interface-type.patch
diff --git a/scripts/package-build/kea/.gitignore b/scripts/package-build/kea/.gitignore
index 1f9d42c9..70219f63 100644
--- a/scripts/package-build/kea/.gitignore
+++ b/scripts/package-build/kea/.gitignore
@@ -1,7 +1 @@
-isc-kea/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/isc-kea/
diff --git a/scripts/package-build/kea/package.toml b/scripts/package-build/kea/package.toml
index 0bfce21e..872be441 100644
--- a/scripts/package-build/kea/package.toml
+++ b/scripts/package-build/kea/package.toml
@@ -1,4 +1,4 @@
[[packages]]
name = "isc-kea"
-commit_id = "debian/2.4.1-3"
+commit_id = "debian/2.6.1-2"
scm_url = "https://salsa.debian.org/debian/isc-kea"
diff --git a/scripts/package-build/kea/patches/isc-kea/0001-Add-multithreading-test-mode.patch b/scripts/package-build/kea/patches/isc-kea/0001-Add-multithreading-test-mode.patch
new file mode 100644
index 00000000..981e6f1d
--- /dev/null
+++ b/scripts/package-build/kea/patches/isc-kea/0001-Add-multithreading-test-mode.patch
@@ -0,0 +1,135 @@
+From cb2b064162e2d5bf09331c619abf76a40130ade1 Mon Sep 17 00:00:00 2001
+From: sarthurdev <s.arthur@vyos.io>
+Date: Wed, 2 Apr 2025 08:48:48 +0000
+Subject: [PATCH 1/2] Add multithreading test mode
+
+---
+ src/bin/dhcp4/json_config_parser.cc | 4 ++++
+ src/bin/dhcp6/json_config_parser.cc | 6 +++++-
+ src/lib/config/cmd_http_listener.cc | 3 +++
+ src/lib/tcp/mt_tcp_listener_mgr.cc | 3 +++
+ src/lib/util/multi_threading_mgr.cc | 3 ++-
+ src/lib/util/multi_threading_mgr.h | 19 +++++++++++++++++++
+ 6 files changed, 36 insertions(+), 2 deletions(-)
+
+diff --git a/src/bin/dhcp4/json_config_parser.cc b/src/bin/dhcp4/json_config_parser.cc
+index c2e34c5..1350816 100644
+--- a/src/bin/dhcp4/json_config_parser.cc
++++ b/src/bin/dhcp4/json_config_parser.cc
+@@ -718,6 +718,10 @@ configureDhcp4Server(Dhcpv4Srv& server, isc::data::ConstElementPtr config_set,
+ LOG_DEBUG(dhcp4_logger, DBG_DHCP4_COMMAND, DHCP4_CONFIG_START)
+ .arg(server.redactConfig(config_set)->str());
+
++ if (check_only) {
++ MultiThreadingMgr::instance().setTestMode(true);
++ }
++
+ auto answer = processDhcp4Config(config_set);
+
+ int status_code = CONTROL_RESULT_SUCCESS;
+diff --git a/src/bin/dhcp6/json_config_parser.cc b/src/bin/dhcp6/json_config_parser.cc
+index 671d69a..a74a568 100644
+--- a/src/bin/dhcp6/json_config_parser.cc
++++ b/src/bin/dhcp6/json_config_parser.cc
+@@ -850,6 +850,10 @@ configureDhcp6Server(Dhcpv6Srv& server, isc::data::ConstElementPtr config_set,
+ LOG_DEBUG(dhcp6_logger, DBG_DHCP6_COMMAND, DHCP6_CONFIG_START)
+ .arg(server.redactConfig(config_set)->str());
+
++ if (check_only) {
++ MultiThreadingMgr::instance().setTestMode(true);
++ }
++
+ auto answer = processDhcp6Config(config_set);
+
+ int status_code = CONTROL_RESULT_SUCCESS;
+@@ -953,7 +957,7 @@ configureDhcp6Server(Dhcpv6Srv& server, isc::data::ConstElementPtr config_set,
+ // configuration. This will add created subnets and option values into
+ // the server's configuration.
+ // This operation should be exception safe but let's make sure.
+- if (status_code == CONTROL_RESULT_SUCCESS && (!check_only || extra_checks)) {
++ if (status_code == CONTROL_RESULT_SUCCESS && !check_only) {
+ try {
+
+ // Setup the command channel.
+diff --git a/src/lib/config/cmd_http_listener.cc b/src/lib/config/cmd_http_listener.cc
+index 9dfea59..394806e 100644
+--- a/src/lib/config/cmd_http_listener.cc
++++ b/src/lib/config/cmd_http_listener.cc
+@@ -40,6 +40,9 @@ CmdHttpListener::~CmdHttpListener() {
+
+ void
+ CmdHttpListener::start() {
++ if (MultiThreadingMgr::instance().isTestMode()) {
++ return;
++ }
+ // We must be in multi-threading mode.
+ if (!MultiThreadingMgr::instance().getMode()) {
+ isc_throw(InvalidOperation, "CmdHttpListener cannot be started"
+diff --git a/src/lib/tcp/mt_tcp_listener_mgr.cc b/src/lib/tcp/mt_tcp_listener_mgr.cc
+index e880284..4680717 100644
+--- a/src/lib/tcp/mt_tcp_listener_mgr.cc
++++ b/src/lib/tcp/mt_tcp_listener_mgr.cc
+@@ -40,6 +40,9 @@ MtTcpListenerMgr::~MtTcpListenerMgr() {
+
+ void
+ MtTcpListenerMgr::start() {
++ if (MultiThreadingMgr::instance().isTestMode()) {
++ return;
++ }
+ // We must be in multi-threading mode.
+ if (!MultiThreadingMgr::instance().getMode()) {
+ isc_throw(InvalidOperation, "MtTcpListenerMgr cannot be started"
+diff --git a/src/lib/util/multi_threading_mgr.cc b/src/lib/util/multi_threading_mgr.cc
+index d1526b9..cab284d 100644
+--- a/src/lib/util/multi_threading_mgr.cc
++++ b/src/lib/util/multi_threading_mgr.cc
+@@ -14,7 +14,8 @@ namespace isc {
+ namespace util {
+
+ MultiThreadingMgr::MultiThreadingMgr()
+- : enabled_(false), critical_section_count_(0), thread_pool_size_(0) {
++ : enabled_(false), test_mode_(false), critical_section_count_(0),
++ thread_pool_size_(0) {
+ }
+
+ MultiThreadingMgr::~MultiThreadingMgr() {
+diff --git a/src/lib/util/multi_threading_mgr.h b/src/lib/util/multi_threading_mgr.h
+index e86c488..f3da67b 100644
+--- a/src/lib/util/multi_threading_mgr.h
++++ b/src/lib/util/multi_threading_mgr.h
+@@ -154,6 +154,22 @@ public:
+ /// @param enabled The new mode.
+ void setMode(bool enabled);
+
++ /// @brief Sets or clears the test mode for @c MultiThreadingMgr.
++ ///
++ /// @param test_mode A flag which indicates that the @c MultiThreadingMgr is
++ /// in the test mode (if true), or not (if false).
++ void setTestMode(const bool test_mode) {
++ test_mode_ = test_mode;
++ }
++
++ /// @brief Checks if the @c MultiThreadingMgr is in the test mode.
++ ///
++ /// @return true if the @c MultiThreadingMgr is in the test mode, false
++ /// otherwise.
++ bool isTestMode() const {
++ return (test_mode_);
++ }
++
+ /// @brief Enter critical section.
+ ///
+ /// When entering @ref MultiThreadingCriticalSection, increment internal
+@@ -308,6 +324,9 @@ private:
+ /// otherwise.
+ bool enabled_;
+
++ /// @brief Indicates if the @c MultiThreadingMgr is in the test mode.
++ bool test_mode_;
++
+ /// @brief The critical section count.
+ ///
+ /// In case the configuration is applied within a
+--
+2.39.5
+
diff --git a/scripts/package-build/kea/patches/isc-kea/0002-Add-ping_check-hook-library.patch b/scripts/package-build/kea/patches/isc-kea/0002-Add-ping_check-hook-library.patch
new file mode 100644
index 00000000..c2f172ca
--- /dev/null
+++ b/scripts/package-build/kea/patches/isc-kea/0002-Add-ping_check-hook-library.patch
@@ -0,0 +1,13277 @@
+From 6f198a187195a7fa4ad2cf9d147532bd64724f65 Mon Sep 17 00:00:00 2001
+From: sarthurdev <965089+sarthurdev@users.noreply.github.com>
+Date: Mon, 24 Mar 2025 19:38:34 +0100
+Subject: [PATCH] Add ping_check hook library
+
+---
+ configure.ac | 3 +
+ src/hooks/dhcp/Makefile.am | 2 +-
+ src/hooks/dhcp/ping_check/Doxyfile | 2568 +++++++++++++++++
+ src/hooks/dhcp/ping_check/Makefile.am | 104 +
+ src/hooks/dhcp/ping_check/config_cache.cc | 107 +
+ src/hooks/dhcp/ping_check/config_cache.h | 146 +
+ src/hooks/dhcp/ping_check/icmp_endpoint.h | 134 +
+ src/hooks/dhcp/ping_check/icmp_msg.cc | 112 +
+ src/hooks/dhcp/ping_check/icmp_msg.h | 223 ++
+ src/hooks/dhcp/ping_check/icmp_socket.h | 359 +++
+ .../dhcp/ping_check/libloadtests/.gitignore | 1 +
+ .../dhcp/ping_check/libloadtests/Makefile.am | 60 +
+ .../libloadtests/load_unload_unittests.cc | 107 +
+ .../dhcp/ping_check/libloadtests/meson.build | 21 +
+ .../ping_check/libloadtests/run_unittests.cc | 19 +
+ src/hooks/dhcp/ping_check/meson.build | 41 +
+ src/hooks/dhcp/ping_check/ping_channel.cc | 466 +++
+ src/hooks/dhcp/ping_check/ping_channel.h | 371 +++
+ src/hooks/dhcp/ping_check/ping_check.dox | 44 +
+ .../dhcp/ping_check/ping_check_callouts.cc | 240 ++
+ .../dhcp/ping_check/ping_check_config.cc | 98 +
+ src/hooks/dhcp/ping_check/ping_check_config.h | 134 +
+ src/hooks/dhcp/ping_check/ping_check_log.cc | 17 +
+ src/hooks/dhcp/ping_check/ping_check_log.h | 23 +
+ .../dhcp/ping_check/ping_check_messages.cc | 99 +
+ .../dhcp/ping_check/ping_check_messages.h | 50 +
+ .../dhcp/ping_check/ping_check_messages.mes | 229 ++
+ src/hooks/dhcp/ping_check/ping_check_mgr.cc | 798 +++++
+ src/hooks/dhcp/ping_check/ping_check_mgr.h | 436 +++
+ src/hooks/dhcp/ping_check/ping_context.cc | 237 ++
+ src/hooks/dhcp/ping_check/ping_context.h | 280 ++
+ .../dhcp/ping_check/ping_context_store.cc | 144 +
+ .../dhcp/ping_check/ping_context_store.h | 240 ++
+ src/hooks/dhcp/ping_check/tests/.gitignore | 1 +
+ src/hooks/dhcp/ping_check/tests/Makefile.am | 70 +
+ .../tests/config_cache_unittests.cc | 245 ++
+ .../tests/icmp_endpoint_unittests.cc | 44 +
+ .../ping_check/tests/icmp_msg_unittests.cc | 172 ++
+ .../ping_check/tests/icmp_socket_unittests.cc | 380 +++
+ src/hooks/dhcp/ping_check/tests/meson.build | 21 +
+ .../tests/ping_channel_unittests.cc | 821 ++++++
+ .../tests/ping_check_config_unittests.cc | 287 ++
+ .../tests/ping_check_mgr_unittests.cc | 1878 ++++++++++++
+ .../tests/ping_context_store_unittests.cc | 467 +++
+ .../tests/ping_context_unittests.cc | 146 +
+ .../dhcp/ping_check/tests/ping_test_utils.h | 396 +++
+ .../dhcp/ping_check/tests/run_unittests.cc | 19 +
+ src/hooks/dhcp/ping_check/version.cc | 17 +
+ 48 files changed, 12876 insertions(+), 1 deletion(-)
+ create mode 100644 src/hooks/dhcp/ping_check/Doxyfile
+ create mode 100644 src/hooks/dhcp/ping_check/Makefile.am
+ create mode 100644 src/hooks/dhcp/ping_check/config_cache.cc
+ create mode 100644 src/hooks/dhcp/ping_check/config_cache.h
+ create mode 100644 src/hooks/dhcp/ping_check/icmp_endpoint.h
+ create mode 100644 src/hooks/dhcp/ping_check/icmp_msg.cc
+ create mode 100644 src/hooks/dhcp/ping_check/icmp_msg.h
+ create mode 100644 src/hooks/dhcp/ping_check/icmp_socket.h
+ create mode 100644 src/hooks/dhcp/ping_check/libloadtests/.gitignore
+ create mode 100644 src/hooks/dhcp/ping_check/libloadtests/Makefile.am
+ create mode 100644 src/hooks/dhcp/ping_check/libloadtests/load_unload_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/libloadtests/meson.build
+ create mode 100644 src/hooks/dhcp/ping_check/libloadtests/run_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/meson.build
+ create mode 100644 src/hooks/dhcp/ping_check/ping_channel.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_channel.h
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check.dox
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_callouts.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_config.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_config.h
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_log.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_log.h
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_messages.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_messages.h
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_messages.mes
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_mgr.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_check_mgr.h
+ create mode 100644 src/hooks/dhcp/ping_check/ping_context.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_context.h
+ create mode 100644 src/hooks/dhcp/ping_check/ping_context_store.cc
+ create mode 100644 src/hooks/dhcp/ping_check/ping_context_store.h
+ create mode 100644 src/hooks/dhcp/ping_check/tests/.gitignore
+ create mode 100644 src/hooks/dhcp/ping_check/tests/Makefile.am
+ create mode 100644 src/hooks/dhcp/ping_check/tests/config_cache_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/icmp_endpoint_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/icmp_msg_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/icmp_socket_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/meson.build
+ create mode 100644 src/hooks/dhcp/ping_check/tests/ping_channel_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/ping_check_config_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/ping_check_mgr_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/ping_context_store_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/ping_context_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/tests/ping_test_utils.h
+ create mode 100644 src/hooks/dhcp/ping_check/tests/run_unittests.cc
+ create mode 100644 src/hooks/dhcp/ping_check/version.cc
+
+diff --git a/configure.ac b/configure.ac
+index cc1b31af71..23c8eefb81 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1582,6 +1582,9 @@ AC_CONFIG_FILES([src/hooks/dhcp/lease_cmds/tests/Makefile])
+ AC_CONFIG_FILES([src/hooks/dhcp/mysql_cb/Makefile])
+ AC_CONFIG_FILES([src/hooks/dhcp/mysql_cb/libloadtests/Makefile])
+ AC_CONFIG_FILES([src/hooks/dhcp/mysql_cb/tests/Makefile])
++AC_CONFIG_FILES([src/hooks/dhcp/ping_check/Makefile])
++AC_CONFIG_FILES([src/hooks/dhcp/ping_check/libloadtests/Makefile])
++AC_CONFIG_FILES([src/hooks/dhcp/ping_check/tests/Makefile])
+ AC_CONFIG_FILES([src/hooks/dhcp/pgsql_cb/Makefile])
+ AC_CONFIG_FILES([src/hooks/dhcp/pgsql_cb/libloadtests/Makefile])
+ AC_CONFIG_FILES([src/hooks/dhcp/pgsql_cb/tests/Makefile])
+diff --git a/src/hooks/dhcp/Makefile.am b/src/hooks/dhcp/Makefile.am
+index 1b77976424..806e310a17 100644
+--- a/src/hooks/dhcp/Makefile.am
++++ b/src/hooks/dhcp/Makefile.am
+@@ -8,4 +8,4 @@ if HAVE_PGSQL
+ SUBDIRS += pgsql_cb
+ endif
+
+-SUBDIRS += run_script stat_cmds user_chk
++SUBDIRS += run_script stat_cmds user_chk ping_check
+diff --git a/src/hooks/dhcp/ping_check/Doxyfile b/src/hooks/dhcp/ping_check/Doxyfile
+new file mode 100644
+index 0000000000..7c8554b557
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/Doxyfile
+@@ -0,0 +1,2568 @@
++# Doxyfile 1.9.1
++
++# This file describes the settings to be used by the documentation system
++# doxygen (www.doxygen.org) for a project.
++#
++# All text after a double hash (##) is considered a comment and is placed in
++# front of the TAG it is preceding.
++#
++# All text after a single hash (#) is considered a comment and will be ignored.
++# The format is:
++# TAG = value [value, ...]
++# For lists, items can also be appended using:
++# TAG += value [value, ...]
++# Values that contain spaces should be placed between quotes (\" \").
++
++#---------------------------------------------------------------------------
++# Project related configuration options
++#---------------------------------------------------------------------------
++
++# This tag specifies the encoding used for all characters in the configuration
++# file that follow. The default is UTF-8 which is also the encoding used for all
++# text before the first occurrence of this tag. Doxygen uses libiconv (or the
++# iconv built into libc) for the transcoding. See
++# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
++# The default value is: UTF-8.
++
++DOXYFILE_ENCODING = UTF-8
++
++# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
++# double-quotes, unless you are using Doxywizard) that should identify the
++# project for which the documentation is generated. This name is used in the
++# title of most generated pages and in a few other places.
++# The default value is: My Project.
++
++PROJECT_NAME = "Kea Ping Check Hooks Library"
++
++# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
++# could be handy for archiving the generated documentation or if some version
++# control system is used.
++
++PROJECT_NUMBER =
++
++# Using the PROJECT_BRIEF tag one can provide an optional one line description
++# for a project that appears at the top of each page and should give viewer a
++# quick idea about the purpose of the project. Keep the description short.
++
++PROJECT_BRIEF =
++
++# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
++# in the documentation. The maximum height of the logo should not exceed 55
++# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
++# the logo to the output directory.
++
++PROJECT_LOGO = ../../../../../doc/images/kea-logo-100x70.png
++
++# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
++# into which the generated documentation will be written. If a relative path is
++# entered, it will be relative to the location where doxygen was started. If
++# left blank the current directory will be used.
++
++OUTPUT_DIRECTORY = html
++
++# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
++# directories (in 2 levels) under the output directory of each output format and
++# will distribute the generated files over these directories. Enabling this
++# option can be useful when feeding doxygen a huge amount of source files, where
++# putting all generated files in the same directory would otherwise causes
++# performance problems for the file system.
++# The default value is: NO.
++
++CREATE_SUBDIRS = YES
++
++# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
++# characters to appear in the names of generated files. If set to NO, non-ASCII
++# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
++# U+3044.
++# The default value is: NO.
++
++ALLOW_UNICODE_NAMES = NO
++
++# The OUTPUT_LANGUAGE tag is used to specify the language in which all
++# documentation generated by doxygen is written. Doxygen will use this
++# information to generate all constant output in the proper language.
++# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
++# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
++# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
++# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
++# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
++# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
++# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
++# Ukrainian and Vietnamese.
++# The default value is: English.
++
++OUTPUT_LANGUAGE = English
++
++# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
++# documentation generated by doxygen is written. Doxygen will use this
++# information to generate all generated output in the proper direction.
++# Possible values are: None, LTR, RTL and Context.
++# The default value is: None.
++
++OUTPUT_TEXT_DIRECTION = None
++
++# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
++# descriptions after the members that are listed in the file and class
++# documentation (similar to Javadoc). Set to NO to disable this.
++# The default value is: YES.
++
++BRIEF_MEMBER_DESC = YES
++
++# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
++# description of a member or function before the detailed description
++#
++# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
++# brief descriptions will be completely suppressed.
++# The default value is: YES.
++
++REPEAT_BRIEF = YES
++
++# This tag implements a quasi-intelligent brief description abbreviator that is
++# used to form the text in various listings. Each string in this list, if found
++# as the leading text of the brief description, will be stripped from the text
++# and the result, after processing the whole list, is used as the annotated
++# text. Otherwise, the brief description is used as-is. If left blank, the
++# following values are used ($name is automatically replaced with the name of
++# the entity):The $name class, The $name widget, The $name file, is, provides,
++# specifies, contains, represents, a, an and the.
++
++ABBREVIATE_BRIEF =
++
++# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
++# doxygen will generate a detailed section even if there is only a brief
++# description.
++# The default value is: NO.
++
++ALWAYS_DETAILED_SEC = NO
++
++# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
++# inherited members of a class in the documentation of that class as if those
++# members were ordinary class members. Constructors, destructors and assignment
++# operators of the base classes will not be shown.
++# The default value is: NO.
++
++INLINE_INHERITED_MEMB = NO
++
++# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
++# before files name in the file list and in the header files. If set to NO the
++# shortest path that makes the file name unique will be used
++# The default value is: YES.
++
++FULL_PATH_NAMES = NO
++
++# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
++# Stripping is only done if one of the specified strings matches the left-hand
++# part of the path. The tag can be used to show relative paths in the file list.
++# If left blank the directory from which doxygen is run is used as the path to
++# strip.
++#
++# Note that you can specify absolute paths here, but also relative paths, which
++# will be relative from the directory where doxygen is started.
++# This tag requires that the tag FULL_PATH_NAMES is set to YES.
++
++STRIP_FROM_PATH =
++
++# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
++# path mentioned in the documentation of a class, which tells the reader which
++# header file to include in order to use a class. If left blank only the name of
++# the header file containing the class definition is used. Otherwise one should
++# specify the list of include paths that are normally passed to the compiler
++# using the -I flag.
++
++STRIP_FROM_INC_PATH =
++
++# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
++# less readable) file names. This can be useful is your file systems doesn't
++# support long names like on DOS, Mac, or CD-ROM.
++# The default value is: NO.
++
++SHORT_NAMES = NO
++
++# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
++# first line (until the first dot) of a Javadoc-style comment as the brief
++# description. If set to NO, the Javadoc-style will behave just like regular Qt-
++# style comments (thus requiring an explicit @brief command for a brief
++# description.)
++# The default value is: NO.
++
++JAVADOC_AUTOBRIEF = YES
++
++# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
++# such as
++# /***************
++# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
++# Javadoc-style will behave just like regular comments and it will not be
++# interpreted by doxygen.
++# The default value is: NO.
++
++JAVADOC_BANNER = NO
++
++# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
++# line (until the first dot) of a Qt-style comment as the brief description. If
++# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
++# requiring an explicit \brief command for a brief description.)
++# The default value is: NO.
++
++QT_AUTOBRIEF = NO
++
++# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
++# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
++# a brief description. This used to be the default behavior. The new default is
++# to treat a multi-line C++ comment block as a detailed description. Set this
++# tag to YES if you prefer the old behavior instead.
++#
++# Note that setting this tag to YES also means that rational rose comments are
++# not recognized any more.
++# The default value is: NO.
++
++MULTILINE_CPP_IS_BRIEF = NO
++
++# By default Python docstrings are displayed as preformatted text and doxygen's
++# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
++# doxygen's special commands can be used and the contents of the docstring
++# documentation blocks is shown as doxygen documentation.
++# The default value is: YES.
++
++PYTHON_DOCSTRING = YES
++
++# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
++# documentation from any documented member that it re-implements.
++# The default value is: YES.
++
++INHERIT_DOCS = YES
++
++# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
++# page for each member. If set to NO, the documentation of a member will be part
++# of the file/class/namespace that contains it.
++# The default value is: NO.
++
++SEPARATE_MEMBER_PAGES = NO
++
++# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
++# uses this value to replace tabs by spaces in code fragments.
++# Minimum value: 1, maximum value: 16, default value: 4.
++
++TAB_SIZE = 4
++
++# This tag can be used to specify a number of aliases that act as commands in
++# the documentation. An alias has the form:
++# name=value
++# For example adding
++# "sideeffect=@par Side Effects:\n"
++# will allow you to put the command \sideeffect (or @sideeffect) in the
++# documentation, which will result in a user-defined paragraph with heading
++# "Side Effects:". You can put \n's in the value part of an alias to insert
++# newlines (in the resulting output). You can put ^^ in the value part of an
++# alias to insert a newline as if a physical newline was in the original file.
++# When you need a literal { or } or , in the value part of an alias you have to
++# escape them by means of a backslash (\), this can lead to conflicts with the
++# commands \{ and \} for these it is advised to use the version @{ and @} or use
++# a double escape (\\{ and \\})
++
++ALIASES =
++
++# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
++# only. Doxygen will then generate output that is more tailored for C. For
++# instance, some of the names that are used will be different. The list of all
++# members will be omitted, etc.
++# The default value is: NO.
++
++OPTIMIZE_OUTPUT_FOR_C = NO
++
++# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
++# Python sources only. Doxygen will then generate output that is more tailored
++# for that language. For instance, namespaces will be presented as packages,
++# qualified scopes will look different, etc.
++# The default value is: NO.
++
++OPTIMIZE_OUTPUT_JAVA = NO
++
++# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
++# sources. Doxygen will then generate output that is tailored for Fortran.
++# The default value is: NO.
++
++OPTIMIZE_FOR_FORTRAN = NO
++
++# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
++# sources. Doxygen will then generate output that is tailored for VHDL.
++# The default value is: NO.
++
++OPTIMIZE_OUTPUT_VHDL = NO
++
++# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
++# sources only. Doxygen will then generate output that is more tailored for that
++# language. For instance, namespaces will be presented as modules, types will be
++# separated into more groups, etc.
++# The default value is: NO.
++
++OPTIMIZE_OUTPUT_SLICE = NO
++
++# Doxygen selects the parser to use depending on the extension of the files it
++# parses. With this tag you can assign which parser to use for a given
++# extension. Doxygen has a built-in mapping, but you can override or extend it
++# using this tag. The format is ext=language, where ext is a file extension, and
++# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
++# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
++# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
++# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
++# tries to guess whether the code is fixed or free formatted code, this is the
++# default for Fortran type files). For instance to make doxygen treat .inc files
++# as Fortran files (default is PHP), and .f files as C (default is Fortran),
++# use: inc=Fortran f=C.
++#
++# Note: For files without extension you can use no_extension as a placeholder.
++#
++# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
++# the files are not read by doxygen. When specifying no_extension you should add
++# * to the FILE_PATTERNS.
++#
++# Note see also the list of default file extension mappings.
++
++EXTENSION_MAPPING =
++
++# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
++# according to the Markdown format, which allows for more readable
++# documentation. See https://daringfireball.net/projects/markdown/ for details.
++# The output of markdown processing is further processed by doxygen, so you can
++# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
++# case of backward compatibilities issues.
++# The default value is: YES.
++
++MARKDOWN_SUPPORT = YES
++
++# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
++# to that level are automatically included in the table of contents, even if
++# they do not have an id attribute.
++# Note: This feature currently applies only to Markdown headings.
++# Minimum value: 0, maximum value: 99, default value: 5.
++# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
++
++TOC_INCLUDE_HEADINGS = 5
++
++# When enabled doxygen tries to link words that correspond to documented
++# classes, or namespaces to their corresponding documentation. Such a link can
++# be prevented in individual cases by putting a % sign in front of the word or
++# globally by setting AUTOLINK_SUPPORT to NO.
++# The default value is: YES.
++
++AUTOLINK_SUPPORT = YES
++
++# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
++# to include (a tag file for) the STL sources as input, then you should set this
++# tag to YES in order to let doxygen match functions declarations and
++# definitions whose arguments contain STL classes (e.g. func(std::string);
++# versus func(std::string) {}). This also make the inheritance and collaboration
++# diagrams that involve STL classes more complete and accurate.
++# The default value is: NO.
++
++BUILTIN_STL_SUPPORT = YES
++
++# If you use Microsoft's C++/CLI language, you should set this option to YES to
++# enable parsing support.
++# The default value is: NO.
++
++CPP_CLI_SUPPORT = NO
++
++# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
++# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
++# will parse them like normal C++ but will assume all classes use public instead
++# of private inheritance when no explicit protection keyword is present.
++# The default value is: NO.
++
++SIP_SUPPORT = NO
++
++# For Microsoft's IDL there are propget and propput attributes to indicate
++# getter and setter methods for a property. Setting this option to YES will make
++# doxygen to replace the get and set methods by a property in the documentation.
++# This will only work if the methods are indeed getting or setting a simple
++# type. If this is not the case, or you want to show the methods anyway, you
++# should set this option to NO.
++# The default value is: YES.
++
++IDL_PROPERTY_SUPPORT = YES
++
++# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
++# tag is set to YES then doxygen will reuse the documentation of the first
++# member in the group (if any) for the other members of the group. By default
++# all members of a group must be documented explicitly.
++# The default value is: NO.
++
++DISTRIBUTE_GROUP_DOC = NO
++
++# If one adds a struct or class to a group and this option is enabled, then also
++# any nested class or struct is added to the same group. By default this option
++# is disabled and one has to add nested compounds explicitly via \ingroup.
++# The default value is: NO.
++
++GROUP_NESTED_COMPOUNDS = NO
++
++# Set the SUBGROUPING tag to YES to allow class member groups of the same type
++# (for instance a group of public functions) to be put as a subgroup of that
++# type (e.g. under the Public Functions section). Set it to NO to prevent
++# subgrouping. Alternatively, this can be done per class using the
++# \nosubgrouping command.
++# The default value is: YES.
++
++SUBGROUPING = YES
++
++# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
++# are shown inside the group in which they are included (e.g. using \ingroup)
++# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
++# and RTF).
++#
++# Note that this feature does not work in combination with
++# SEPARATE_MEMBER_PAGES.
++# The default value is: NO.
++
++INLINE_GROUPED_CLASSES = NO
++
++# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
++# with only public data fields or simple typedef fields will be shown inline in
++# the documentation of the scope in which they are defined (i.e. file,
++# namespace, or group documentation), provided this scope is documented. If set
++# to NO, structs, classes, and unions are shown on a separate page (for HTML and
++# Man pages) or section (for LaTeX and RTF).
++# The default value is: NO.
++
++INLINE_SIMPLE_STRUCTS = NO
++
++# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
++# enum is documented as struct, union, or enum with the name of the typedef. So
++# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
++# with name TypeT. When disabled the typedef will appear as a member of a file,
++# namespace, or class. And the struct will be named TypeS. This can typically be
++# useful for C code in case the coding convention dictates that all compound
++# types are typedef'ed and only the typedef is referenced, never the tag name.
++# The default value is: NO.
++
++TYPEDEF_HIDES_STRUCT = NO
++
++# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
++# cache is used to resolve symbols given their name and scope. Since this can be
++# an expensive process and often the same symbol appears multiple times in the
++# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
++# doxygen will become slower. If the cache is too large, memory is wasted. The
++# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
++# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
++# symbols. At the end of a run doxygen will report the cache usage and suggest
++# the optimal cache size from a speed point of view.
++# Minimum value: 0, maximum value: 9, default value: 0.
++
++LOOKUP_CACHE_SIZE = 0
++
++# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
++# during processing. When set to 0 doxygen will based this on the number of
++# cores available in the system. You can set it explicitly to a value larger
++# than 0 to get more control over the balance between CPU load and processing
++# speed. At this moment only the input processing can be done using multiple
++# threads. Since this is still an experimental feature the default is set to 1,
++# which effectively disables parallel processing. Please report any issues you
++# encounter. Generating dot graphs in parallel is controlled by the
++# DOT_NUM_THREADS setting.
++# Minimum value: 0, maximum value: 32, default value: 1.
++
++NUM_PROC_THREADS = 1
++
++#---------------------------------------------------------------------------
++# Build related configuration options
++#---------------------------------------------------------------------------
++
++# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
++# documentation are documented, even if no documentation was available. Private
++# class members and static file members will be hidden unless the
++# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
++# Note: This will also disable the warnings about undocumented members that are
++# normally produced when WARNINGS is set to YES.
++# The default value is: NO.
++
++EXTRACT_ALL = YES
++
++# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
++# be included in the documentation.
++# The default value is: NO.
++
++EXTRACT_PRIVATE = NO
++
++# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
++# methods of a class will be included in the documentation.
++# The default value is: NO.
++
++EXTRACT_PRIV_VIRTUAL = NO
++
++# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
++# scope will be included in the documentation.
++# The default value is: NO.
++
++EXTRACT_PACKAGE = NO
++
++# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
++# included in the documentation.
++# The default value is: NO.
++
++EXTRACT_STATIC = NO
++
++# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
++# locally in source files will be included in the documentation. If set to NO,
++# only classes defined in header files are included. Does not have any effect
++# for Java sources.
++# The default value is: YES.
++
++EXTRACT_LOCAL_CLASSES = YES
++
++# This flag is only useful for Objective-C code. If set to YES, local methods,
++# which are defined in the implementation section but not in the interface are
++# included in the documentation. If set to NO, only methods in the interface are
++# included.
++# The default value is: NO.
++
++EXTRACT_LOCAL_METHODS = NO
++
++# If this flag is set to YES, the members of anonymous namespaces will be
++# extracted and appear in the documentation as a namespace called
++# 'anonymous_namespace{file}', where file will be replaced with the base name of
++# the file that contains the anonymous namespace. By default anonymous namespace
++# are hidden.
++# The default value is: NO.
++
++EXTRACT_ANON_NSPACES = NO
++
++# If this flag is set to YES, the name of an unnamed parameter in a declaration
++# will be determined by the corresponding definition. By default unnamed
++# parameters remain unnamed in the output.
++# The default value is: YES.
++
++RESOLVE_UNNAMED_PARAMS = YES
++
++# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
++# undocumented members inside documented classes or files. If set to NO these
++# members will be included in the various overviews, but no documentation
++# section is generated. This option has no effect if EXTRACT_ALL is enabled.
++# The default value is: NO.
++
++HIDE_UNDOC_MEMBERS = NO
++
++# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
++# undocumented classes that are normally visible in the class hierarchy. If set
++# to NO, these classes will be included in the various overviews. This option
++# has no effect if EXTRACT_ALL is enabled.
++# The default value is: NO.
++
++HIDE_UNDOC_CLASSES = NO
++
++# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
++# declarations. If set to NO, these declarations will be included in the
++# documentation.
++# The default value is: NO.
++
++HIDE_FRIEND_COMPOUNDS = NO
++
++# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
++# documentation blocks found inside the body of a function. If set to NO, these
++# blocks will be appended to the function's detailed documentation block.
++# The default value is: NO.
++
++HIDE_IN_BODY_DOCS = NO
++
++# The INTERNAL_DOCS tag determines if documentation that is typed after a
++# \internal command is included. If the tag is set to NO then the documentation
++# will be excluded. Set it to YES to include the internal documentation.
++# The default value is: NO.
++
++INTERNAL_DOCS = NO
++
++# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
++# able to match the capabilities of the underlying filesystem. In case the
++# filesystem is case sensitive (i.e. it supports files in the same directory
++# whose names only differ in casing), the option must be set to YES to properly
++# deal with such files in case they appear in the input. For filesystems that
++# are not case sensitive the option should be be set to NO to properly deal with
++# output files written for symbols that only differ in casing, such as for two
++# classes, one named CLASS and the other named Class, and to also support
++# references to files without having to specify the exact matching casing. On
++# Windows (including Cygwin) and MacOS, users should typically set this option
++# to NO, whereas on Linux or other Unix flavors it should typically be set to
++# YES.
++# The default value is: system dependent.
++
++CASE_SENSE_NAMES = YES
++
++# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
++# their full class and namespace scopes in the documentation. If set to YES, the
++# scope will be hidden.
++# The default value is: NO.
++
++HIDE_SCOPE_NAMES = NO
++
++# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
++# append additional text to a page's title, such as Class Reference. If set to
++# YES the compound reference will be hidden.
++# The default value is: NO.
++
++HIDE_COMPOUND_REFERENCE= NO
++
++# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
++# the files that are included by a file in the documentation of that file.
++# The default value is: YES.
++
++SHOW_INCLUDE_FILES = YES
++
++# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
++# grouped member an include statement to the documentation, telling the reader
++# which file to include in order to use the member.
++# The default value is: NO.
++
++SHOW_GROUPED_MEMB_INC = NO
++
++# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
++# files with double quotes in the documentation rather than with sharp brackets.
++# The default value is: NO.
++
++FORCE_LOCAL_INCLUDES = NO
++
++# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
++# documentation for inline members.
++# The default value is: YES.
++
++INLINE_INFO = YES
++
++# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
++# (detailed) documentation of file and class members alphabetically by member
++# name. If set to NO, the members will appear in declaration order.
++# The default value is: YES.
++
++SORT_MEMBER_DOCS = YES
++
++# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
++# descriptions of file, namespace and class members alphabetically by member
++# name. If set to NO, the members will appear in declaration order. Note that
++# this will also influence the order of the classes in the class list.
++# The default value is: NO.
++
++SORT_BRIEF_DOCS = YES
++
++# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
++# (brief and detailed) documentation of class members so that constructors and
++# destructors are listed first. If set to NO the constructors will appear in the
++# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
++# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
++# member documentation.
++# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
++# detailed member documentation.
++# The default value is: NO.
++
++SORT_MEMBERS_CTORS_1ST = YES
++
++# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
++# of group names into alphabetical order. If set to NO the group names will
++# appear in their defined order.
++# The default value is: NO.
++
++SORT_GROUP_NAMES = YES
++
++# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
++# fully-qualified names, including namespaces. If set to NO, the class list will
++# be sorted only by class name, not including the namespace part.
++# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
++# Note: This option applies only to the class list, not to the alphabetical
++# list.
++# The default value is: NO.
++
++SORT_BY_SCOPE_NAME = NO
++
++# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
++# type resolution of all parameters of a function it will reject a match between
++# the prototype and the implementation of a member function even if there is
++# only one candidate or it is obvious which candidate to choose by doing a
++# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
++# accept a match between prototype and implementation in such cases.
++# The default value is: NO.
++
++STRICT_PROTO_MATCHING = NO
++
++# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
++# list. This list is created by putting \todo commands in the documentation.
++# The default value is: YES.
++
++GENERATE_TODOLIST = YES
++
++# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
++# list. This list is created by putting \test commands in the documentation.
++# The default value is: YES.
++
++GENERATE_TESTLIST = YES
++
++# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
++# list. This list is created by putting \bug commands in the documentation.
++# The default value is: YES.
++
++GENERATE_BUGLIST = YES
++
++# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
++# the deprecated list. This list is created by putting \deprecated commands in
++# the documentation.
++# The default value is: YES.
++
++GENERATE_DEPRECATEDLIST= YES
++
++# The ENABLED_SECTIONS tag can be used to enable conditional documentation
++# sections, marked by \if <section_label> ... \endif and \cond <section_label>
++# ... \endcond blocks.
++
++ENABLED_SECTIONS =
++
++# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
++# initial value of a variable or macro / define can have for it to appear in the
++# documentation. If the initializer consists of more lines than specified here
++# it will be hidden. Use a value of 0 to hide initializers completely. The
++# appearance of the value of individual variables and macros / defines can be
++# controlled using \showinitializer or \hideinitializer command in the
++# documentation regardless of this setting.
++# Minimum value: 0, maximum value: 10000, default value: 30.
++
++MAX_INITIALIZER_LINES = 30
++
++# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
++# the bottom of the documentation of classes and structs. If set to YES, the
++# list will mention the files that were used to generate the documentation.
++# The default value is: YES.
++
++SHOW_USED_FILES = YES
++
++# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
++# will remove the Files entry from the Quick Index and from the Folder Tree View
++# (if specified).
++# The default value is: YES.
++
++SHOW_FILES = YES
++
++# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
++# page. This will remove the Namespaces entry from the Quick Index and from the
++# Folder Tree View (if specified).
++# The default value is: YES.
++
++SHOW_NAMESPACES = YES
++
++# The FILE_VERSION_FILTER tag can be used to specify a program or script that
++# doxygen should invoke to get the current version for each file (typically from
++# the version control system). Doxygen will invoke the program by executing (via
++# popen()) the command command input-file, where command is the value of the
++# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
++# by doxygen. Whatever the program writes to standard output is used as the file
++# version. For an example see the documentation.
++
++FILE_VERSION_FILTER =
++
++# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
++# by doxygen. The layout file controls the global structure of the generated
++# output files in an output format independent way. To create the layout file
++# that represents doxygen's defaults, run doxygen with the -l option. You can
++# optionally specify a file name after the option, if omitted DoxygenLayout.xml
++# will be used as the name of the layout file.
++#
++# Note that if you run doxygen from a directory containing a file called
++# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
++# tag is left empty.
++
++LAYOUT_FILE =
++
++# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
++# the reference definitions. This must be a list of .bib files. The .bib
++# extension is automatically appended if omitted. This requires the bibtex tool
++# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
++# For LaTeX the style of the bibliography can be controlled using
++# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
++# search path. See also \cite for info how to create references.
++
++CITE_BIB_FILES =
++
++#---------------------------------------------------------------------------
++# Configuration options related to warning and progress messages
++#---------------------------------------------------------------------------
++
++# The QUIET tag can be used to turn on/off the messages that are generated to
++# standard output by doxygen. If QUIET is set to YES this implies that the
++# messages are off.
++# The default value is: NO.
++
++QUIET = YES
++
++# The WARNINGS tag can be used to turn on/off the warning messages that are
++# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
++# this implies that the warnings are on.
++#
++# Tip: Turn warnings on while writing the documentation.
++# The default value is: YES.
++
++WARNINGS = YES
++
++# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
++# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
++# will automatically be disabled.
++# The default value is: YES.
++
++WARN_IF_UNDOCUMENTED = YES
++
++# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
++# potential errors in the documentation, such as not documenting some parameters
++# in a documented function, or documenting parameters that don't exist or using
++# markup commands wrongly.
++# The default value is: YES.
++
++WARN_IF_DOC_ERROR = YES
++
++# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
++# are documented, but have no documentation for their parameters or return
++# value. If set to NO, doxygen will only warn about wrong or incomplete
++# parameter documentation, but not about the absence of documentation. If
++# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
++# The default value is: NO.
++
++WARN_NO_PARAMDOC = NO
++
++# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
++# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
++# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
++# at the end of the doxygen process doxygen will return with a non-zero status.
++# Possible values are: NO, YES and FAIL_ON_WARNINGS.
++# The default value is: NO.
++
++WARN_AS_ERROR = NO
++
++# The WARN_FORMAT tag determines the format of the warning messages that doxygen
++# can produce. The string should contain the $file, $line, and $text tags, which
++# will be replaced by the file and line number from which the warning originated
++# and the warning text. Optionally the format may contain $version, which will
++# be replaced by the version of the file (if it could be obtained via
++# FILE_VERSION_FILTER)
++# The default value is: $file:$line: $text.
++
++WARN_FORMAT = "$file:$line: $text"
++
++# The WARN_LOGFILE tag can be used to specify a file to which warning and error
++# messages should be written. If left blank the output is written to standard
++# error (stderr).
++
++WARN_LOGFILE =
++
++#---------------------------------------------------------------------------
++# Configuration options related to the input files
++#---------------------------------------------------------------------------
++
++# The INPUT tag is used to specify the files and/or directories that contain
++# documented source files. You may enter file names like myfile.cpp or
++# directories like /usr/src/myproject. Separate the files or directories with
++# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
++# Note: If this tag is empty the current directory is searched.
++
++INPUT =
++
++# This tag can be used to specify the character encoding of the source files
++# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
++# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
++# documentation (see:
++# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
++# The default value is: UTF-8.
++
++INPUT_ENCODING = UTF-8
++
++# If the value of the INPUT tag contains directories, you can use the
++# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
++# *.h) to filter out the source-files in the directories.
++#
++# Note that for custom extensions or not directly supported extensions you also
++# need to set EXTENSION_MAPPING for the extension otherwise the files are not
++# read by doxygen.
++#
++# Note the list of default checked file patterns might differ from the list of
++# default file extension mappings.
++#
++# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
++# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
++# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
++# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
++# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
++# *.ucf, *.qsf and *.ice.
++
++FILE_PATTERNS = *.c \
++ *.cc \
++ *.h \
++ *.hpp \
++ *.dox
++
++# The RECURSIVE tag can be used to specify whether or not subdirectories should
++# be searched for input files as well.
++# The default value is: NO.
++
++RECURSIVE = NO
++
++# The EXCLUDE tag can be used to specify files and/or directories that should be
++# excluded from the INPUT source files. This way you can easily exclude a
++# subdirectory from a directory tree whose root is specified with the INPUT tag.
++#
++# Note that relative paths are relative to the directory from which doxygen is
++# run.
++
++EXCLUDE =
++
++# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
++# directories that are symbolic links (a Unix file system feature) are excluded
++# from the input.
++# The default value is: NO.
++
++EXCLUDE_SYMLINKS = NO
++
++# If the value of the INPUT tag contains directories, you can use the
++# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
++# certain files from those directories.
++#
++# Note that the wildcards are matched against the file with absolute path, so to
++# exclude all test directories for example use the pattern */test/*
++
++EXCLUDE_PATTERNS =
++
++# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
++# (namespaces, classes, functions, etc.) that should be excluded from the
++# output. The symbol name can be a fully qualified name, a word, or if the
++# wildcard * is used, a substring. Examples: ANamespace, AClass,
++# AClass::ANamespace, ANamespace::*Test
++#
++# Note that the wildcards are matched against the file with absolute path, so to
++# exclude all test directories use the pattern */test/*
++
++EXCLUDE_SYMBOLS =
++
++# The EXAMPLE_PATH tag can be used to specify one or more files or directories
++# that contain example code fragments that are included (see the \include
++# command).
++
++EXAMPLE_PATH =
++
++# If the value of the EXAMPLE_PATH tag contains directories, you can use the
++# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
++# *.h) to filter out the source-files in the directories. If left blank all
++# files are included.
++
++EXAMPLE_PATTERNS =
++
++# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
++# searched for input files to be used with the \include or \dontinclude commands
++# irrespective of the value of the RECURSIVE tag.
++# The default value is: NO.
++
++EXAMPLE_RECURSIVE = NO
++
++# The IMAGE_PATH tag can be used to specify one or more files or directories
++# that contain images that are to be included in the documentation (see the
++# \image command).
++
++IMAGE_PATH = ../../../../../doc/images
++
++# The INPUT_FILTER tag can be used to specify a program that doxygen should
++# invoke to filter for each input file. Doxygen will invoke the filter program
++# by executing (via popen()) the command:
++#
++# <filter> <input-file>
++#
++# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
++# name of an input file. Doxygen will then use the output that the filter
++# program writes to standard output. If FILTER_PATTERNS is specified, this tag
++# will be ignored.
++#
++# Note that the filter must not add or remove lines; it is applied before the
++# code is scanned, but not when the output code is generated. If lines are added
++# or removed, the anchors will not be placed correctly.
++#
++# Note that for custom extensions or not directly supported extensions you also
++# need to set EXTENSION_MAPPING for the extension otherwise the files are not
++# properly processed by doxygen.
++
++INPUT_FILTER =
++
++# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
++# basis. Doxygen will compare the file name with each pattern and apply the
++# filter if there is a match. The filters are a list of the form: pattern=filter
++# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
++# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
++# patterns match the file name, INPUT_FILTER is applied.
++#
++# Note that for custom extensions or not directly supported extensions you also
++# need to set EXTENSION_MAPPING for the extension otherwise the files are not
++# properly processed by doxygen.
++
++FILTER_PATTERNS =
++
++# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
++# INPUT_FILTER) will also be used to filter the input files that are used for
++# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
++# The default value is: NO.
++
++FILTER_SOURCE_FILES = NO
++
++# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
++# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
++# it is also possible to disable source filtering for a specific pattern using
++# *.ext= (so without naming a filter).
++# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
++
++FILTER_SOURCE_PATTERNS =
++
++# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
++# is part of the input, its contents will be placed on the main page
++# (index.html). This can be useful if you have a project on for instance GitHub
++# and want to reuse the introduction page also for the doxygen output.
++
++USE_MDFILE_AS_MAINPAGE =
++
++#---------------------------------------------------------------------------
++# Configuration options related to source browsing
++#---------------------------------------------------------------------------
++
++# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
++# generated. Documented entities will be cross-referenced with these sources.
++#
++# Note: To get rid of all source code in the generated output, make sure that
++# also VERBATIM_HEADERS is set to NO.
++# The default value is: NO.
++
++SOURCE_BROWSER = YES
++
++# Setting the INLINE_SOURCES tag to YES will include the body of functions,
++# classes and enums directly into the documentation.
++# The default value is: NO.
++
++INLINE_SOURCES = NO
++
++# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
++# special comment blocks from generated source code fragments. Normal C, C++ and
++# Fortran comments will always remain visible.
++# The default value is: YES.
++
++STRIP_CODE_COMMENTS = YES
++
++# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
++# entity all documented functions referencing it will be listed.
++# The default value is: NO.
++
++REFERENCED_BY_RELATION = YES
++
++# If the REFERENCES_RELATION tag is set to YES then for each documented function
++# all documented entities called/used by that function will be listed.
++# The default value is: NO.
++
++REFERENCES_RELATION = YES
++
++# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
++# to YES then the hyperlinks from functions in REFERENCES_RELATION and
++# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
++# link to the documentation.
++# The default value is: YES.
++
++REFERENCES_LINK_SOURCE = YES
++
++# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
++# source code will show a tooltip with additional information such as prototype,
++# brief description and links to the definition and documentation. Since this
++# will make the HTML file larger and loading of large files a bit slower, you
++# can opt to disable this feature.
++# The default value is: YES.
++# This tag requires that the tag SOURCE_BROWSER is set to YES.
++
++SOURCE_TOOLTIPS = YES
++
++# If the USE_HTAGS tag is set to YES then the references to source code will
++# point to the HTML generated by the htags(1) tool instead of doxygen built-in
++# source browser. The htags tool is part of GNU's global source tagging system
++# (see https://www.gnu.org/software/global/global.html). You will need version
++# 4.8.6 or higher.
++#
++# To use it do the following:
++# - Install the latest version of global
++# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
++# - Make sure the INPUT points to the root of the source tree
++# - Run doxygen as normal
++#
++# Doxygen will invoke htags (and that will in turn invoke gtags), so these
++# tools must be available from the command line (i.e. in the search path).
++#
++# The result: instead of the source browser generated by doxygen, the links to
++# source code will now point to the output of htags.
++# The default value is: NO.
++# This tag requires that the tag SOURCE_BROWSER is set to YES.
++
++USE_HTAGS = NO
++
++# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
++# verbatim copy of the header file for each class for which an include is
++# specified. Set to NO to disable this.
++# See also: Section \class.
++# The default value is: YES.
++
++VERBATIM_HEADERS = YES
++
++#---------------------------------------------------------------------------
++# Configuration options related to the alphabetical class index
++#---------------------------------------------------------------------------
++
++# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
++# compounds will be generated. Enable this if the project contains a lot of
++# classes, structs, unions or interfaces.
++# The default value is: YES.
++
++ALPHABETICAL_INDEX = YES
++
++# In case all classes in a project start with a common prefix, all classes will
++# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
++# can be used to specify a prefix (or a list of prefixes) that should be ignored
++# while generating the index headers.
++# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
++
++IGNORE_PREFIX =
++
++#---------------------------------------------------------------------------
++# Configuration options related to the HTML output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
++# The default value is: YES.
++
++GENERATE_HTML = YES
++
++# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
++# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
++# it.
++# The default directory is: html.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_OUTPUT = ../html
++
++# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
++# generated HTML page (for example: .htm, .php, .asp).
++# The default value is: .html.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_FILE_EXTENSION = .html
++
++# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
++# each generated HTML page. If the tag is left blank doxygen will generate a
++# standard header.
++#
++# To get valid HTML the header file that includes any scripts and style sheets
++# that doxygen needs, which is dependent on the configuration options used (e.g.
++# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
++# default header using
++# doxygen -w html new_header.html new_footer.html new_stylesheet.css
++# YourConfigFile
++# and then modify the file new_header.html. See also section "Doxygen usage"
++# for information on how to generate the default header that doxygen normally
++# uses.
++# Note: The header is subject to change so you typically have to regenerate the
++# default header when upgrading to a newer version of doxygen. For a description
++# of the possible markers and block names see the documentation.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_HEADER =
++
++# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
++# generated HTML page. If the tag is left blank doxygen will generate a standard
++# footer. See HTML_HEADER for more information on how to generate a default
++# footer and what special commands can be used inside the footer. See also
++# section "Doxygen usage" for information on how to generate the default footer
++# that doxygen normally uses.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_FOOTER =
++
++# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
++# sheet that is used by each HTML page. It can be used to fine-tune the look of
++# the HTML output. If left blank doxygen will generate a default style sheet.
++# See also section "Doxygen usage" for information on how to generate the style
++# sheet that doxygen normally uses.
++# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
++# it is more robust and this tag (HTML_STYLESHEET) will in the future become
++# obsolete.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_STYLESHEET =
++
++# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
++# cascading style sheets that are included after the standard style sheets
++# created by doxygen. Using this option one can overrule certain style aspects.
++# This is preferred over using HTML_STYLESHEET since it does not replace the
++# standard style sheet and is therefore more robust against future updates.
++# Doxygen will copy the style sheet files to the output directory.
++# Note: The order of the extra style sheet files is of importance (e.g. the last
++# style sheet in the list overrules the setting of the previous ones in the
++# list). For an example see the documentation.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_EXTRA_STYLESHEET =
++
++# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
++# other source files which should be copied to the HTML output directory. Note
++# that these files will be copied to the base HTML output directory. Use the
++# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
++# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
++# files will be copied as-is; there are no commands or markers available.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_EXTRA_FILES =
++
++# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
++# will adjust the colors in the style sheet and background images according to
++# this color. Hue is specified as an angle on a colorwheel, see
++# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
++# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
++# purple, and 360 is red again.
++# Minimum value: 0, maximum value: 359, default value: 220.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_COLORSTYLE_HUE = 148
++
++# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
++# in the HTML output. For a value of 0 the output will use grayscales only. A
++# value of 255 will produce the most vivid colors.
++# Minimum value: 0, maximum value: 255, default value: 100.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_COLORSTYLE_SAT = 93
++
++# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
++# luminance component of the colors in the HTML output. Values below 100
++# gradually make the output lighter, whereas values above 100 make the output
++# darker. The value divided by 100 is the actual gamma applied, so 80 represents
++# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
++# change the gamma.
++# Minimum value: 40, maximum value: 240, default value: 80.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_COLORSTYLE_GAMMA = 80
++
++# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
++# page will contain the date and time when the page was generated. Setting this
++# to YES can help to show when doxygen was last run and thus if the
++# documentation is up to date.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_TIMESTAMP = YES
++
++# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
++# documentation will contain a main index with vertical navigation menus that
++# are dynamically created via JavaScript. If disabled, the navigation index will
++# consists of multiple levels of tabs that are statically embedded in every HTML
++# page. Disable this option to support browsers that do not have JavaScript,
++# like the Qt help browser.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_DYNAMIC_MENUS = YES
++
++# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
++# documentation will contain sections that can be hidden and shown after the
++# page has loaded.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_DYNAMIC_SECTIONS = YES
++
++# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
++# shown in the various tree structured indices initially; the user can expand
++# and collapse entries dynamically later on. Doxygen will expand the tree to
++# such a level that at most the specified number of entries are visible (unless
++# a fully collapsed tree already exceeds this amount). So setting the number of
++# entries 1 will produce a full collapsed tree by default. 0 is a special value
++# representing an infinite number of entries and will result in a full expanded
++# tree by default.
++# Minimum value: 0, maximum value: 9999, default value: 100.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_INDEX_NUM_ENTRIES = 100
++
++# If the GENERATE_DOCSET tag is set to YES, additional index files will be
++# generated that can be used as input for Apple's Xcode 3 integrated development
++# environment (see:
++# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
++# create a documentation set, doxygen will generate a Makefile in the HTML
++# output directory. Running make will produce the docset in that directory and
++# running make install will install the docset in
++# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
++# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
++# genXcode/_index.html for more information.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++GENERATE_DOCSET = NO
++
++# This tag determines the name of the docset feed. A documentation feed provides
++# an umbrella under which multiple documentation sets from a single provider
++# (such as a company or product suite) can be grouped.
++# The default value is: Doxygen generated docs.
++# This tag requires that the tag GENERATE_DOCSET is set to YES.
++
++DOCSET_FEEDNAME = "Doxygen generated docs"
++
++# This tag specifies a string that should uniquely identify the documentation
++# set bundle. This should be a reverse domain-name style string, e.g.
++# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
++# The default value is: org.doxygen.Project.
++# This tag requires that the tag GENERATE_DOCSET is set to YES.
++
++DOCSET_BUNDLE_ID = org.doxygen.Project
++
++# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
++# the documentation publisher. This should be a reverse domain-name style
++# string, e.g. com.mycompany.MyDocSet.documentation.
++# The default value is: org.doxygen.Publisher.
++# This tag requires that the tag GENERATE_DOCSET is set to YES.
++
++DOCSET_PUBLISHER_ID = org.doxygen.Publisher
++
++# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
++# The default value is: Publisher.
++# This tag requires that the tag GENERATE_DOCSET is set to YES.
++
++DOCSET_PUBLISHER_NAME = Publisher
++
++# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
++# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
++# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
++# (see:
++# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
++#
++# The HTML Help Workshop contains a compiler that can convert all HTML output
++# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
++# files are now used as the Windows 98 help format, and will replace the old
++# Windows help format (.hlp) on all Windows platforms in the future. Compressed
++# HTML files also contain an index, a table of contents, and you can search for
++# words in the documentation. The HTML workshop also contains a viewer for
++# compressed HTML files.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++GENERATE_HTMLHELP = NO
++
++# The CHM_FILE tag can be used to specify the file name of the resulting .chm
++# file. You can add a path in front of the file if the result should not be
++# written to the html output directory.
++# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
++
++CHM_FILE =
++
++# The HHC_LOCATION tag can be used to specify the location (absolute path
++# including file name) of the HTML help compiler (hhc.exe). If non-empty,
++# doxygen will try to run the HTML help compiler on the generated index.hhp.
++# The file has to be specified with full path.
++# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
++
++HHC_LOCATION =
++
++# The GENERATE_CHI flag controls if a separate .chi index file is generated
++# (YES) or that it should be included in the main .chm file (NO).
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
++
++GENERATE_CHI = NO
++
++# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
++# and project file content.
++# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
++
++CHM_INDEX_ENCODING =
++
++# The BINARY_TOC flag controls whether a binary table of contents is generated
++# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
++# enables the Previous and Next buttons.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
++
++BINARY_TOC = NO
++
++# The TOC_EXPAND flag can be set to YES to add extra items for group members to
++# the table of contents of the HTML help documentation and to the tree view.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
++
++TOC_EXPAND = NO
++
++# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
++# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
++# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
++# (.qch) of the generated HTML documentation.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++GENERATE_QHP = NO
++
++# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
++# the file name of the resulting .qch file. The path specified is relative to
++# the HTML output folder.
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QCH_FILE =
++
++# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
++# Project output. For more information please see Qt Help Project / Namespace
++# (see:
++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
++# The default value is: org.doxygen.Project.
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QHP_NAMESPACE =
++
++# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
++# Help Project output. For more information please see Qt Help Project / Virtual
++# Folders (see:
++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
++# The default value is: doc.
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QHP_VIRTUAL_FOLDER = doc
++
++# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
++# filter to add. For more information please see Qt Help Project / Custom
++# Filters (see:
++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QHP_CUST_FILTER_NAME =
++
++# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
++# custom filter to add. For more information please see Qt Help Project / Custom
++# Filters (see:
++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QHP_CUST_FILTER_ATTRS =
++
++# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
++# project's filter section matches. Qt Help Project / Filter Attributes (see:
++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QHP_SECT_FILTER_ATTRS =
++
++# The QHG_LOCATION tag can be used to specify the location (absolute path
++# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
++# run qhelpgenerator on the generated .qhp file.
++# This tag requires that the tag GENERATE_QHP is set to YES.
++
++QHG_LOCATION =
++
++# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
++# generated, together with the HTML files, they form an Eclipse help plugin. To
++# install this plugin and make it available under the help contents menu in
++# Eclipse, the contents of the directory containing the HTML and XML files needs
++# to be copied into the plugins directory of eclipse. The name of the directory
++# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
++# After copying Eclipse needs to be restarted before the help appears.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++GENERATE_ECLIPSEHELP = NO
++
++# A unique identifier for the Eclipse help plugin. When installing the plugin
++# the directory name containing the HTML and XML files should also have this
++# name. Each documentation set should have its own identifier.
++# The default value is: org.doxygen.Project.
++# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
++
++ECLIPSE_DOC_ID = org.doxygen.Project
++
++# If you want full control over the layout of the generated HTML pages it might
++# be necessary to disable the index and replace it with your own. The
++# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
++# of each HTML page. A value of NO enables the index and the value YES disables
++# it. Since the tabs in the index contain the same information as the navigation
++# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++DISABLE_INDEX = NO
++
++# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
++# structure should be generated to display hierarchical information. If the tag
++# value is set to YES, a side panel will be generated containing a tree-like
++# index structure (just like the one that is generated for HTML Help). For this
++# to work a browser that supports JavaScript, DHTML, CSS and frames is required
++# (i.e. any modern browser). Windows users are probably better off using the
++# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
++# further fine-tune the look of the index. As an example, the default style
++# sheet generated by doxygen has an example that shows how to put an image at
++# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
++# the same information as the tab index, you could consider setting
++# DISABLE_INDEX to YES when enabling this option.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++GENERATE_TREEVIEW = YES
++
++# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
++# doxygen will group on one line in the generated HTML documentation.
++#
++# Note that a value of 0 will completely suppress the enum values from appearing
++# in the overview section.
++# Minimum value: 0, maximum value: 20, default value: 4.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++ENUM_VALUES_PER_LINE = 4
++
++# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
++# to set the initial width (in pixels) of the frame in which the tree is shown.
++# Minimum value: 0, maximum value: 1500, default value: 250.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++TREEVIEW_WIDTH = 180
++
++# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
++# external symbols imported via tag files in a separate window.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++EXT_LINKS_IN_WINDOW = NO
++
++# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
++# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
++# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
++# the HTML output. These images will generally look nicer at scaled resolutions.
++# Possible values are: png (the default) and svg (looks nicer but requires the
++# pdf2svg or inkscape tool).
++# The default value is: png.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++HTML_FORMULA_FORMAT = png
++
++# Use this tag to change the font size of LaTeX formulas included as images in
++# the HTML documentation. When you change the font size after a successful
++# doxygen run you need to manually remove any form_*.png images from the HTML
++# output directory to force them to be regenerated.
++# Minimum value: 8, maximum value: 50, default value: 10.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++FORMULA_FONTSIZE = 10
++
++# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
++# generated for formulas are transparent PNGs. Transparent PNGs are not
++# supported properly for IE 6.0, but are supported on all modern browsers.
++#
++# Note that when changing this option you need to delete any form_*.png files in
++# the HTML output directory before the changes have effect.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++FORMULA_TRANSPARENT = YES
++
++# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
++# to create new LaTeX commands to be used in formulas as building blocks. See
++# the section "Including formulas" for details.
++
++FORMULA_MACROFILE =
++
++# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
++# https://www.mathjax.org) which uses client side JavaScript for the rendering
++# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
++# installed or if you want to formulas look prettier in the HTML output. When
++# enabled you may also need to install MathJax separately and configure the path
++# to it using the MATHJAX_RELPATH option.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++USE_MATHJAX = NO
++
++# When MathJax is enabled you can set the default output format to be used for
++# the MathJax output. See the MathJax site (see:
++# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
++# Possible values are: HTML-CSS (which is slower, but has the best
++# compatibility), NativeMML (i.e. MathML) and SVG.
++# The default value is: HTML-CSS.
++# This tag requires that the tag USE_MATHJAX is set to YES.
++
++MATHJAX_FORMAT = HTML-CSS
++
++# When MathJax is enabled you need to specify the location relative to the HTML
++# output directory using the MATHJAX_RELPATH option. The destination directory
++# should contain the MathJax.js script. For instance, if the mathjax directory
++# is located at the same level as the HTML output directory, then
++# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
++# Content Delivery Network so you can quickly see the result without installing
++# MathJax. However, it is strongly recommended to install a local copy of
++# MathJax from https://www.mathjax.org before deployment.
++# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
++# This tag requires that the tag USE_MATHJAX is set to YES.
++
++MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
++
++# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
++# extension names that should be enabled during MathJax rendering. For example
++# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
++# This tag requires that the tag USE_MATHJAX is set to YES.
++
++MATHJAX_EXTENSIONS =
++
++# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
++# of code that will be used on startup of the MathJax code. See the MathJax site
++# (see:
++# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
++# example see the documentation.
++# This tag requires that the tag USE_MATHJAX is set to YES.
++
++MATHJAX_CODEFILE =
++
++# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
++# the HTML output. The underlying search engine uses javascript and DHTML and
++# should work on any modern browser. Note that when using HTML help
++# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
++# there is already a search function so this one should typically be disabled.
++# For large projects the javascript based search engine can be slow, then
++# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
++# search using the keyboard; to jump to the search box use <access key> + S
++# (what the <access key> is depends on the OS and browser, but it is typically
++# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
++# key> to jump into the search results window, the results can be navigated
++# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
++# the search. The filter options can be selected when the cursor is inside the
++# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
++# to select a filter and <Enter> or <escape> to activate or cancel the filter
++# option.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_HTML is set to YES.
++
++SEARCHENGINE = NO
++
++# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
++# implemented using a web server instead of a web client using JavaScript. There
++# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
++# setting. When disabled, doxygen will generate a PHP script for searching and
++# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
++# and searching needs to be provided by external tools. See the section
++# "External Indexing and Searching" for details.
++# The default value is: NO.
++# This tag requires that the tag SEARCHENGINE is set to YES.
++
++SERVER_BASED_SEARCH = NO
++
++# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
++# script for searching. Instead the search results are written to an XML file
++# which needs to be processed by an external indexer. Doxygen will invoke an
++# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
++# search results.
++#
++# Doxygen ships with an example indexer (doxyindexer) and search engine
++# (doxysearch.cgi) which are based on the open source search engine library
++# Xapian (see:
++# https://xapian.org/).
++#
++# See the section "External Indexing and Searching" for details.
++# The default value is: NO.
++# This tag requires that the tag SEARCHENGINE is set to YES.
++
++EXTERNAL_SEARCH = NO
++
++# The SEARCHENGINE_URL should point to a search engine hosted by a web server
++# which will return the search results when EXTERNAL_SEARCH is enabled.
++#
++# Doxygen ships with an example indexer (doxyindexer) and search engine
++# (doxysearch.cgi) which are based on the open source search engine library
++# Xapian (see:
++# https://xapian.org/). See the section "External Indexing and Searching" for
++# details.
++# This tag requires that the tag SEARCHENGINE is set to YES.
++
++SEARCHENGINE_URL =
++
++# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
++# search data is written to a file for indexing by an external tool. With the
++# SEARCHDATA_FILE tag the name of this file can be specified.
++# The default file is: searchdata.xml.
++# This tag requires that the tag SEARCHENGINE is set to YES.
++
++SEARCHDATA_FILE = searchdata.xml
++
++# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
++# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
++# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
++# projects and redirect the results back to the right project.
++# This tag requires that the tag SEARCHENGINE is set to YES.
++
++EXTERNAL_SEARCH_ID =
++
++# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
++# projects other than the one defined by this configuration file, but that are
++# all added to the same external search index. Each project needs to have a
++# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
++# to a relative location where the documentation can be found. The format is:
++# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
++# This tag requires that the tag SEARCHENGINE is set to YES.
++
++EXTRA_SEARCH_MAPPINGS =
++
++#---------------------------------------------------------------------------
++# Configuration options related to the LaTeX output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
++# The default value is: YES.
++
++GENERATE_LATEX = NO
++
++# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
++# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
++# it.
++# The default directory is: latex.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_OUTPUT = latex
++
++# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
++# invoked.
++#
++# Note that when not enabling USE_PDFLATEX the default is latex when enabling
++# USE_PDFLATEX the default is pdflatex and when in the later case latex is
++# chosen this is overwritten by pdflatex. For specific output languages the
++# default can have been set differently, this depends on the implementation of
++# the output language.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_CMD_NAME = latex
++
++# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
++# index for LaTeX.
++# Note: This tag is used in the Makefile / make.bat.
++# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
++# (.tex).
++# The default file is: makeindex.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++MAKEINDEX_CMD_NAME = makeindex
++
++# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
++# generate index for LaTeX. In case there is no backslash (\) as first character
++# it will be automatically added in the LaTeX code.
++# Note: This tag is used in the generated output file (.tex).
++# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
++# The default value is: makeindex.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_MAKEINDEX_CMD = makeindex
++
++# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
++# documents. This may be useful for small projects and may help to save some
++# trees in general.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++COMPACT_LATEX = NO
++
++# The PAPER_TYPE tag can be used to set the paper type that is used by the
++# printer.
++# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
++# 14 inches) and executive (7.25 x 10.5 inches).
++# The default value is: a4.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++PAPER_TYPE = a4
++
++# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
++# that should be included in the LaTeX output. The package can be specified just
++# by its name or with the correct syntax as to be used with the LaTeX
++# \usepackage command. To get the times font for instance you can specify :
++# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
++# To use the option intlimits with the amsmath package you can specify:
++# EXTRA_PACKAGES=[intlimits]{amsmath}
++# If left blank no extra packages will be included.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++EXTRA_PACKAGES =
++
++# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
++# generated LaTeX document. The header should contain everything until the first
++# chapter. If it is left blank doxygen will generate a standard header. See
++# section "Doxygen usage" for information on how to let doxygen write the
++# default header to a separate file.
++#
++# Note: Only use a user-defined header if you know what you are doing! The
++# following commands have a special meaning inside the header: $title,
++# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
++# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
++# string, for the replacement values of the other commands the user is referred
++# to HTML_HEADER.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_HEADER =
++
++# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
++# generated LaTeX document. The footer should contain everything after the last
++# chapter. If it is left blank doxygen will generate a standard footer. See
++# LATEX_HEADER for more information on how to generate a default footer and what
++# special commands can be used inside the footer.
++#
++# Note: Only use a user-defined footer if you know what you are doing!
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_FOOTER =
++
++# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
++# LaTeX style sheets that are included after the standard style sheets created
++# by doxygen. Using this option one can overrule certain style aspects. Doxygen
++# will copy the style sheet files to the output directory.
++# Note: The order of the extra style sheet files is of importance (e.g. the last
++# style sheet in the list overrules the setting of the previous ones in the
++# list).
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_EXTRA_STYLESHEET =
++
++# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
++# other source files which should be copied to the LATEX_OUTPUT output
++# directory. Note that the files will be copied as-is; there are no commands or
++# markers available.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_EXTRA_FILES =
++
++# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
++# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
++# contain links (just like the HTML output) instead of page references. This
++# makes the output suitable for online browsing using a PDF viewer.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++PDF_HYPERLINKS = NO
++
++# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
++# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
++# files. Set this option to YES, to get a higher quality PDF documentation.
++#
++# See also section LATEX_CMD_NAME for selecting the engine.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++USE_PDFLATEX = NO
++
++# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
++# command to the generated LaTeX files. This will instruct LaTeX to keep running
++# if errors occur, instead of asking the user for help. This option is also used
++# when generating formulas in HTML.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_BATCHMODE = NO
++
++# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
++# index chapters (such as File Index, Compound Index, etc.) in the output.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_HIDE_INDICES = NO
++
++# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
++# code with syntax highlighting in the LaTeX output.
++#
++# Note that which sources are shown also depends on other settings such as
++# SOURCE_BROWSER.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_SOURCE_CODE = NO
++
++# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
++# bibliography, e.g. plainnat, or ieeetr. See
++# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
++# The default value is: plain.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_BIB_STYLE = plain
++
++# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
++# page will contain the date and time when the page was generated. Setting this
++# to NO can help when comparing the output of multiple runs.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_TIMESTAMP = NO
++
++# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
++# path from which the emoji images will be read. If a relative path is entered,
++# it will be relative to the LATEX_OUTPUT directory. If left blank the
++# LATEX_OUTPUT directory will be used.
++# This tag requires that the tag GENERATE_LATEX is set to YES.
++
++LATEX_EMOJI_DIRECTORY =
++
++#---------------------------------------------------------------------------
++# Configuration options related to the RTF output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
++# RTF output is optimized for Word 97 and may not look too pretty with other RTF
++# readers/editors.
++# The default value is: NO.
++
++GENERATE_RTF = NO
++
++# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
++# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
++# it.
++# The default directory is: rtf.
++# This tag requires that the tag GENERATE_RTF is set to YES.
++
++RTF_OUTPUT = rtf
++
++# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
++# documents. This may be useful for small projects and may help to save some
++# trees in general.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_RTF is set to YES.
++
++COMPACT_RTF = NO
++
++# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
++# contain hyperlink fields. The RTF file will contain links (just like the HTML
++# output) instead of page references. This makes the output suitable for online
++# browsing using Word or some other Word compatible readers that support those
++# fields.
++#
++# Note: WordPad (write) and others do not support links.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_RTF is set to YES.
++
++RTF_HYPERLINKS = NO
++
++# Load stylesheet definitions from file. Syntax is similar to doxygen's
++# configuration file, i.e. a series of assignments. You only have to provide
++# replacements, missing definitions are set to their default value.
++#
++# See also section "Doxygen usage" for information on how to generate the
++# default style sheet that doxygen normally uses.
++# This tag requires that the tag GENERATE_RTF is set to YES.
++
++RTF_STYLESHEET_FILE =
++
++# Set optional variables used in the generation of an RTF document. Syntax is
++# similar to doxygen's configuration file. A template extensions file can be
++# generated using doxygen -e rtf extensionFile.
++# This tag requires that the tag GENERATE_RTF is set to YES.
++
++RTF_EXTENSIONS_FILE =
++
++# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
++# with syntax highlighting in the RTF output.
++#
++# Note that which sources are shown also depends on other settings such as
++# SOURCE_BROWSER.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_RTF is set to YES.
++
++RTF_SOURCE_CODE = NO
++
++#---------------------------------------------------------------------------
++# Configuration options related to the man page output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
++# classes and files.
++# The default value is: NO.
++
++GENERATE_MAN = NO
++
++# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
++# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
++# it. A directory man3 will be created inside the directory specified by
++# MAN_OUTPUT.
++# The default directory is: man.
++# This tag requires that the tag GENERATE_MAN is set to YES.
++
++MAN_OUTPUT = man
++
++# The MAN_EXTENSION tag determines the extension that is added to the generated
++# man pages. In case the manual section does not start with a number, the number
++# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
++# optional.
++# The default value is: .3.
++# This tag requires that the tag GENERATE_MAN is set to YES.
++
++MAN_EXTENSION = .3
++
++# The MAN_SUBDIR tag determines the name of the directory created within
++# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
++# MAN_EXTENSION with the initial . removed.
++# This tag requires that the tag GENERATE_MAN is set to YES.
++
++MAN_SUBDIR =
++
++# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
++# will generate one additional man file for each entity documented in the real
++# man page(s). These additional files only source the real man page, but without
++# them the man command would be unable to find the correct page.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_MAN is set to YES.
++
++MAN_LINKS = NO
++
++#---------------------------------------------------------------------------
++# Configuration options related to the XML output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
++# captures the structure of the code including all documentation.
++# The default value is: NO.
++
++GENERATE_XML = NO
++
++# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
++# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
++# it.
++# The default directory is: xml.
++# This tag requires that the tag GENERATE_XML is set to YES.
++
++XML_OUTPUT = xml
++
++# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
++# listings (including syntax highlighting and cross-referencing information) to
++# the XML output. Note that enabling this will significantly increase the size
++# of the XML output.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_XML is set to YES.
++
++XML_PROGRAMLISTING = NO
++
++# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
++# namespace members in file scope as well, matching the HTML output.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_XML is set to YES.
++
++XML_NS_MEMB_FILE_SCOPE = NO
++
++#---------------------------------------------------------------------------
++# Configuration options related to the DOCBOOK output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
++# that can be used to generate PDF.
++# The default value is: NO.
++
++GENERATE_DOCBOOK = NO
++
++# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
++# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
++# front of it.
++# The default directory is: docbook.
++# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
++
++DOCBOOK_OUTPUT = docbook
++
++# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
++# program listings (including syntax highlighting and cross-referencing
++# information) to the DOCBOOK output. Note that enabling this will significantly
++# increase the size of the DOCBOOK output.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
++
++DOCBOOK_PROGRAMLISTING = NO
++
++#---------------------------------------------------------------------------
++# Configuration options for the AutoGen Definitions output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
++# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
++# the structure of the code including all documentation. Note that this feature
++# is still experimental and incomplete at the moment.
++# The default value is: NO.
++
++GENERATE_AUTOGEN_DEF = NO
++
++#---------------------------------------------------------------------------
++# Configuration options related to the Perl module output
++#---------------------------------------------------------------------------
++
++# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
++# file that captures the structure of the code including all documentation.
++#
++# Note that this feature is still experimental and incomplete at the moment.
++# The default value is: NO.
++
++GENERATE_PERLMOD = NO
++
++# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
++# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
++# output from the Perl module output.
++# The default value is: NO.
++# This tag requires that the tag GENERATE_PERLMOD is set to YES.
++
++PERLMOD_LATEX = NO
++
++# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
++# formatted so it can be parsed by a human reader. This is useful if you want to
++# understand what is going on. On the other hand, if this tag is set to NO, the
++# size of the Perl module output will be much smaller and Perl will parse it
++# just the same.
++# The default value is: YES.
++# This tag requires that the tag GENERATE_PERLMOD is set to YES.
++
++PERLMOD_PRETTY = YES
++
++# The names of the make variables in the generated doxyrules.make file are
++# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
++# so different doxyrules.make files included by the same Makefile don't
++# overwrite each other's variables.
++# This tag requires that the tag GENERATE_PERLMOD is set to YES.
++
++PERLMOD_MAKEVAR_PREFIX =
++
++#---------------------------------------------------------------------------
++# Configuration options related to the preprocessor
++#---------------------------------------------------------------------------
++
++# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
++# C-preprocessor directives found in the sources and include files.
++# The default value is: YES.
++
++ENABLE_PREPROCESSING = YES
++
++# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
++# in the source code. If set to NO, only conditional compilation will be
++# performed. Macro expansion can be done in a controlled way by setting
++# EXPAND_ONLY_PREDEF to YES.
++# The default value is: NO.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++MACRO_EXPANSION = YES
++
++# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
++# the macro expansion is limited to the macros specified with the PREDEFINED and
++# EXPAND_AS_DEFINED tags.
++# The default value is: NO.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++EXPAND_ONLY_PREDEF = NO
++
++# If the SEARCH_INCLUDES tag is set to YES, the include files in the
++# INCLUDE_PATH will be searched if a #include is found.
++# The default value is: YES.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++SEARCH_INCLUDES = YES
++
++# The INCLUDE_PATH tag can be used to specify one or more directories that
++# contain include files that are not input files but should be processed by the
++# preprocessor.
++# This tag requires that the tag SEARCH_INCLUDES is set to YES.
++
++INCLUDE_PATH =
++
++# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
++# patterns (like *.h and *.hpp) to filter out the header-files in the
++# directories. If left blank, the patterns specified with FILE_PATTERNS will be
++# used.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++INCLUDE_FILE_PATTERNS =
++
++# The PREDEFINED tag can be used to specify one or more macro names that are
++# defined before the preprocessor is started (similar to the -D option of e.g.
++# gcc). The argument of the tag is a list of macros of the form: name or
++# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
++# is assumed. To prevent a macro definition from being undefined via #undef or
++# recursively expanded use the := operator instead of the = operator.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++PREDEFINED =
++
++# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
++# tag can be used to specify a list of macro names that should be expanded. The
++# macro definition that is found in the sources will be used. Use the PREDEFINED
++# tag if you want to use a different macro definition that overrules the
++# definition found in the source code.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++EXPAND_AS_DEFINED =
++
++# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
++# remove all references to function-like macros that are alone on a line, have
++# an all uppercase name, and do not end with a semicolon. Such function macros
++# are typically used for boiler-plate code, and will confuse the parser if not
++# removed.
++# The default value is: YES.
++# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
++
++SKIP_FUNCTION_MACROS = YES
++
++#---------------------------------------------------------------------------
++# Configuration options related to external references
++#---------------------------------------------------------------------------
++
++# The TAGFILES tag can be used to specify one or more tag files. For each tag
++# file the location of the external documentation should be added. The format of
++# a tag file without this location is as follows:
++# TAGFILES = file1 file2 ...
++# Adding location for the tag files is done as follows:
++# TAGFILES = file1=loc1 "file2 = loc2" ...
++# where loc1 and loc2 can be relative or absolute paths or URLs. See the
++# section "Linking to external documentation" for more information about the use
++# of tag files.
++# Note: Each tag file must have a unique name (where the name does NOT include
++# the path). If a tag file is not located in the directory in which doxygen is
++# run, you must also specify the path to the tagfile here.
++
++TAGFILES =
++
++# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
++# tag file that is based on the input files it reads. See section "Linking to
++# external documentation" for more information about the usage of tag files.
++
++GENERATE_TAGFILE =
++
++# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
++# the class index. If set to NO, only the inherited external classes will be
++# listed.
++# The default value is: NO.
++
++ALLEXTERNALS = NO
++
++# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
++# in the modules index. If set to NO, only the current project's groups will be
++# listed.
++# The default value is: YES.
++
++EXTERNAL_GROUPS = YES
++
++# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
++# the related pages index. If set to NO, only the current project's pages will
++# be listed.
++# The default value is: YES.
++
++EXTERNAL_PAGES = YES
++
++#---------------------------------------------------------------------------
++# Configuration options related to the dot tool
++#---------------------------------------------------------------------------
++
++# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
++# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
++# NO turns the diagrams off. Note that this option also works with HAVE_DOT
++# disabled, but it is recommended to install and use dot, since it yields more
++# powerful graphs.
++# The default value is: YES.
++
++CLASS_DIAGRAMS = YES
++
++# You can include diagrams made with dia in doxygen documentation. Doxygen will
++# then run dia to produce the diagram and insert it in the documentation. The
++# DIA_PATH tag allows you to specify the directory where the dia binary resides.
++# If left empty dia is assumed to be found in the default search path.
++
++DIA_PATH =
++
++# If set to YES the inheritance and collaboration graphs will hide inheritance
++# and usage relations if the target is undocumented or is not a class.
++# The default value is: YES.
++
++HIDE_UNDOC_RELATIONS = YES
++
++# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
++# available from the path. This tool is part of Graphviz (see:
++# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
++# Bell Labs. The other options in this section have no effect if this option is
++# set to NO
++# The default value is: NO.
++
++HAVE_DOT = YES
++
++# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
++# to run in parallel. When set to 0 doxygen will base this on the number of
++# processors available in the system. You can set it explicitly to a value
++# larger than 0 to get control over the balance between CPU load and processing
++# speed.
++# Minimum value: 0, maximum value: 32, default value: 0.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_NUM_THREADS = 0
++
++# When you want a differently looking font in the dot files that doxygen
++# generates you can specify the font name using DOT_FONTNAME. You need to make
++# sure dot is able to find the font, which can be done by putting it in a
++# standard location or by setting the DOTFONTPATH environment variable or by
++# setting DOT_FONTPATH to the directory containing the font.
++# The default value is: Helvetica.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_FONTNAME = Helvetica
++
++# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
++# dot graphs.
++# Minimum value: 4, maximum value: 24, default value: 10.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_FONTSIZE = 10
++
++# By default doxygen will tell dot to use the default font as specified with
++# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
++# the path where dot can find it using this tag.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_FONTPATH =
++
++# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
++# each documented class showing the direct and indirect inheritance relations.
++# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++CLASS_GRAPH = YES
++
++# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
++# graph for each documented class showing the direct and indirect implementation
++# dependencies (inheritance, containment, and class references variables) of the
++# class with other documented classes.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++COLLABORATION_GRAPH = NO
++
++# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
++# groups, showing the direct groups dependencies.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++GROUP_GRAPHS = YES
++
++# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
++# collaboration diagrams in a style similar to the OMG's Unified Modeling
++# Language.
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++UML_LOOK = NO
++
++# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
++# class node. If there are many fields or methods and many nodes the graph may
++# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
++# number of items for each type to make the size more manageable. Set this to 0
++# for no limit. Note that the threshold may be exceeded by 50% before the limit
++# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
++# but if the number exceeds 15, the total amount of fields shown is limited to
++# 10.
++# Minimum value: 0, maximum value: 100, default value: 10.
++# This tag requires that the tag UML_LOOK is set to YES.
++
++UML_LIMIT_NUM_FIELDS = 10
++
++# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
++# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
++# tag is set to YES, doxygen will add type and arguments for attributes and
++# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
++# will not generate fields with class member information in the UML graphs. The
++# class diagrams will look similar to the default class diagrams but using UML
++# notation for the relationships.
++# Possible values are: NO, YES and NONE.
++# The default value is: NO.
++# This tag requires that the tag UML_LOOK is set to YES.
++
++DOT_UML_DETAILS = NO
++
++# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
++# to display on a single line. If the actual line length exceeds this threshold
++# significantly it will wrapped across multiple lines. Some heuristics are apply
++# to avoid ugly line breaks.
++# Minimum value: 0, maximum value: 1000, default value: 17.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_WRAP_THRESHOLD = 17
++
++# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
++# collaboration graphs will show the relations between templates and their
++# instances.
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++TEMPLATE_RELATIONS = NO
++
++# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
++# YES then doxygen will generate a graph for each documented file showing the
++# direct and indirect include dependencies of the file with other documented
++# files.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++INCLUDE_GRAPH = YES
++
++# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
++# set to YES then doxygen will generate a graph for each documented file showing
++# the direct and indirect include dependencies of the file with other documented
++# files.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++INCLUDED_BY_GRAPH = YES
++
++# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
++# dependency graph for every global function or class method.
++#
++# Note that enabling this option will significantly increase the time of a run.
++# So in most cases it will be better to enable call graphs for selected
++# functions only using the \callgraph command. Disabling a call graph can be
++# accomplished by means of the command \hidecallgraph.
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++CALL_GRAPH = YES
++
++# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
++# dependency graph for every global function or class method.
++#
++# Note that enabling this option will significantly increase the time of a run.
++# So in most cases it will be better to enable caller graphs for selected
++# functions only using the \callergraph command. Disabling a caller graph can be
++# accomplished by means of the command \hidecallergraph.
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++CALLER_GRAPH = NO
++
++# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
++# hierarchy of all classes instead of a textual one.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++GRAPHICAL_HIERARCHY = YES
++
++# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
++# dependencies a directory has on other directories in a graphical way. The
++# dependency relations are determined by the #include relations between the
++# files in the directories.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DIRECTORY_GRAPH = YES
++
++# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
++# generated by dot. For an explanation of the image formats see the section
++# output formats in the documentation of the dot tool (Graphviz (see:
++# http://www.graphviz.org/)).
++# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
++# to make the SVG files visible in IE 9+ (other browsers do not have this
++# requirement).
++# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
++# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
++# png:gdiplus:gdiplus.
++# The default value is: png.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_IMAGE_FORMAT = png
++
++# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
++# enable generation of interactive SVG images that allow zooming and panning.
++#
++# Note that this requires a modern browser other than Internet Explorer. Tested
++# and working are Firefox, Chrome, Safari, and Opera.
++# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
++# the SVG files visible. Older versions of IE do not have SVG support.
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++INTERACTIVE_SVG = NO
++
++# The DOT_PATH tag can be used to specify the path where the dot tool can be
++# found. If left blank, it is assumed the dot tool can be found in the path.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_PATH =
++
++# The DOTFILE_DIRS tag can be used to specify one or more directories that
++# contain dot files that are included in the documentation (see the \dotfile
++# command).
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOTFILE_DIRS =
++
++# The MSCFILE_DIRS tag can be used to specify one or more directories that
++# contain msc files that are included in the documentation (see the \mscfile
++# command).
++
++MSCFILE_DIRS =
++
++# The DIAFILE_DIRS tag can be used to specify one or more directories that
++# contain dia files that are included in the documentation (see the \diafile
++# command).
++
++DIAFILE_DIRS =
++
++# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
++# path where java can find the plantuml.jar file. If left blank, it is assumed
++# PlantUML is not used or called during a preprocessing step. Doxygen will
++# generate a warning when it encounters a \startuml command in this case and
++# will not generate output for the diagram.
++
++PLANTUML_JAR_PATH =
++
++# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
++# configuration file for plantuml.
++
++PLANTUML_CFG_FILE =
++
++# When using plantuml, the specified paths are searched for files specified by
++# the !include statement in a plantuml block.
++
++PLANTUML_INCLUDE_PATH =
++
++# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
++# that will be shown in the graph. If the number of nodes in a graph becomes
++# larger than this value, doxygen will truncate the graph, which is visualized
++# by representing a node as a red box. Note that doxygen if the number of direct
++# children of the root node in a graph is already larger than
++# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
++# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
++# Minimum value: 0, maximum value: 10000, default value: 50.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_GRAPH_MAX_NODES = 200
++
++# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
++# generated by dot. A depth value of 3 means that only nodes reachable from the
++# root by following a path via at most 3 edges will be shown. Nodes that lay
++# further from the root node will be omitted. Note that setting this option to 1
++# or 2 may greatly reduce the computation time needed for large code bases. Also
++# note that the size of a graph can be further restricted by
++# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
++# Minimum value: 0, maximum value: 1000, default value: 0.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++MAX_DOT_GRAPH_DEPTH = 0
++
++# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
++# background. This is disabled by default, because dot on Windows does not seem
++# to support this out of the box.
++#
++# Warning: Depending on the platform used, enabling this option may lead to
++# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
++# read).
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_TRANSPARENT = NO
++
++# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
++# files in one run (i.e. multiple -o and -T options on the command line). This
++# makes dot run faster, but since only newer versions of dot (>1.8.10) support
++# this, this feature is disabled by default.
++# The default value is: NO.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++DOT_MULTI_TARGETS = NO
++
++# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
++# explaining the meaning of the various boxes and arrows in the dot generated
++# graphs.
++# The default value is: YES.
++# This tag requires that the tag HAVE_DOT is set to YES.
++
++GENERATE_LEGEND = YES
++
++# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
++# files that are used to generate the various graphs.
++#
++# Note: This setting is not only used for dot files but also for msc and
++# plantuml temporary files.
++# The default value is: YES.
++
++DOT_CLEANUP = YES
+diff --git a/src/hooks/dhcp/ping_check/Makefile.am b/src/hooks/dhcp/ping_check/Makefile.am
+new file mode 100644
+index 0000000000..a7ea17f400
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/Makefile.am
+@@ -0,0 +1,104 @@
++SUBDIRS = . libloadtests tests
++
++AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
++AM_CPPFLAGS += $(BOOST_INCLUDES) $(CRYPTO_CFLAGS) $(CRYPTO_INCLUDES)
++AM_CXXFLAGS = $(KEA_CXXFLAGS)
++
++# Ensure that the message file and doxygen file is included in the distribution
++EXTRA_DIST = ping_check_messages.mes
++
++CLEANFILES = *.gcno *.gcda
++
++# convenience archive
++
++noinst_LTLIBRARIES = libping_check.la
++
++libping_check_la_SOURCES = ping_check_callouts.cc
++libping_check_la_SOURCES += ping_check_log.cc ping_check_log.h
++libping_check_la_SOURCES += ping_check_messages.cc ping_check_messages.h
++libping_check_la_SOURCES += icmp_endpoint.h icmp_socket.h
++libping_check_la_SOURCES += ping_context.cc ping_context.h
++libping_check_la_SOURCES += ping_context_store.cc ping_context_store.h
++libping_check_la_SOURCES += icmp_msg.h icmp_msg.cc
++libping_check_la_SOURCES += ping_channel.cc ping_channel.h
++libping_check_la_SOURCES += ping_check_mgr.cc ping_check_mgr.h
++libping_check_la_SOURCES += ping_check_config.cc ping_check_config.h
++libping_check_la_SOURCES += config_cache.cc config_cache.h
++libping_check_la_SOURCES += version.cc
++
++libping_check_la_CXXFLAGS = $(AM_CXXFLAGS)
++libping_check_la_CPPFLAGS = $(AM_CPPFLAGS)
++
++# install the shared object into $(libdir)/kea/hooks
++lib_hooksdir = $(libdir)/kea/hooks
++lib_hooks_LTLIBRARIES = libdhcp_ping_check.la
++
++libdhcp_ping_check_la_SOURCES =
++libdhcp_ping_check_la_LDFLAGS = $(AM_LDFLAGS)
++libdhcp_ping_check_la_LDFLAGS += -avoid-version -export-dynamic -module
++libdhcp_ping_check_la_LIBADD = libping_check.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/dhcpsrv/libkea-dhcpsrv.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/process/libkea-process.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/eval/libkea-eval.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/dhcp_ddns/libkea-dhcp_ddns.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/stats/libkea-stats.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/config/libkea-cfgclient.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/http/libkea-http.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/dhcp/libkea-dhcp++.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/hooks/libkea-hooks.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/database/libkea-database.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/cc/libkea-cc.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/asiolink/libkea-asiolink.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/dns/libkea-dns++.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/cryptolink/libkea-cryptolink.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/log/libkea-log.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/util/libkea-util.la
++libdhcp_ping_check_la_LIBADD += $(top_builddir)/src/lib/exceptions/libkea-exceptions.la
++libdhcp_ping_check_la_LIBADD += $(LOG4CPLUS_LIBS)
++libdhcp_ping_check_la_LIBADD += $(CRYPTO_LIBS)
++libdhcp_ping_check_la_LIBADD += $(BOOST_LIBS)
++
++# Doxygen documentation
++EXTRA_DIST += ping_check.dox Doxyfile
++
++devel:
++ mkdir -p html
++ (cat Doxyfile; echo PROJECT_NUMBER=$(PACKAGE_VERSION)) | doxygen - > html/doxygen.log 2> html/doxygen-error.log
++ echo `grep -i ": warning:" html/doxygen-error.log | wc -l` warnings/errors detected.
++
++clean-local:
++ rm -rf html
++
++# If we want to get rid of all generated messages files, we need to use
++# make maintainer-clean. The proper way to introduce custom commands for
++# that operation is to define maintainer-clean-local target. However,
++# make maintainer-clean also removes Makefile, so running configure script
++# is required. To make it easy to rebuild messages without going through
++# reconfigure, a new target messages-clean has been added.
++maintainer-clean-local:
++ rm -f ping_check_messages.h ping_check_messages.cc
++
++# To regenerate messages files, one can do:
++#
++# make messages-clean
++# make messages
++#
++# This is needed only when a .mes file is modified.
++messages-clean: maintainer-clean-local
++
++if GENERATE_MESSAGES
++
++# Define rule to build logging source files from message file
++messages: ping_check_messages.h ping_check_messages.cc
++ @echo Message files regenerated
++
++ping_check_messages.h ping_check_messages.cc: ping_check_messages.mes
++ (cd $(top_srcdir); \
++ $(abs_top_builddir)/src/lib/log/compiler/kea-msg-compiler src/hooks/dhcp/ping_check/ping_check_messages.mes)
++
++else
++
++messages ping_check_messages.h ping_check_messages.cc:
++ @echo Messages generation disabled. Configure with --enable-generate-messages to enable it.
++
++endif
+diff --git a/src/hooks/dhcp/ping_check/config_cache.cc b/src/hooks/dhcp/ping_check/config_cache.cc
+new file mode 100644
+index 0000000000..9a8f9dd4bb
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/config_cache.cc
+@@ -0,0 +1,107 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <config_cache.h>
++#include <util/multi_threading_mgr.h>
++
++using namespace isc;
++using namespace isc::data;
++using namespace isc::dhcp;
++using namespace isc::util;
++using namespace std;
++
++namespace isc {
++namespace ping_check {
++
++PingCheckConfigPtr&
++ConfigCache::getGlobalConfig() {
++ return (global_config_);
++}
++
++void
++ConfigCache::setGlobalConfig(PingCheckConfigPtr& config) {
++ if (!config) {
++ isc_throw(BadValue, "ConfigCache - global config cannot be empty");
++ }
++
++ global_config_ = config;
++}
++
++bool
++ConfigCache::findConfig(const SubnetID& subnet_id, PingCheckConfigPtr& config) {
++ MultiThreadingLock lock(*mutex_);
++ return (findConfigInternal(subnet_id, config));
++}
++
++bool
++ConfigCache::findConfigInternal(const SubnetID& subnet_id, PingCheckConfigPtr& config) const {
++ auto it = configs_.find(subnet_id);
++ if (it != configs_.end()) {
++ config = it->second;
++ return (true);
++ }
++
++ config = PingCheckConfigPtr();
++ return (false);
++}
++
++PingCheckConfigPtr
++ConfigCache::parseAndCacheConfig(const SubnetID& subnet_id, ConstElementPtr& user_context) {
++ PingCheckConfigPtr config;
++ if (user_context) {
++ ConstElementPtr ping_check_params = user_context->get("ping-check");
++ if (ping_check_params) {
++ // Copy construct from global to start with.
++ config.reset(new PingCheckConfig(*getGlobalConfig()));
++
++ // Now parse in subnet-specific values. This may throw a DhcpConfigError but
++ // that's OK, dealt with by the caller.
++ try {
++ config->parse(ping_check_params);
++ } catch (...) {
++ throw;
++ }
++ }
++ }
++
++ // Cache the config. We allow empty configs so higher precedence scopes may
++ // override lower precedence scopes.
++ cacheConfig(subnet_id, config);
++ return (config);
++}
++
++void
++ConfigCache::cacheConfig(const SubnetID& subnet_id, PingCheckConfigPtr& config) {
++ MultiThreadingLock lock(*mutex_);
++ configs_[subnet_id] = config;
++}
++
++void
++ConfigCache::flush() {
++ MultiThreadingLock lock(*mutex_);
++ // Discard the contents.
++ configs_.clear();
++
++ // We use modification time to remember the last time we flushed.
++ updateModificationTime();
++}
++
++size_t
++ConfigCache::size() {
++ MultiThreadingLock lock(*mutex_);
++ return (configs_.size());
++}
++
++boost::posix_time::ptime
++ConfigCache::getLastFlushTime() {
++ MultiThreadingLock lock(*mutex_);
++ return (BaseStampedElement::getModificationTime());
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
+diff --git a/src/hooks/dhcp/ping_check/config_cache.h b/src/hooks/dhcp/ping_check/config_cache.h
+new file mode 100644
+index 0000000000..b69cf6f124
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/config_cache.h
+@@ -0,0 +1,146 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef CONFIG_CACHE_H
++#define CONFIG_CACHE_H
++
++#include <ping_check_config.h>
++#include <cc/base_stamped_element.h>
++#include <cc/data.h>
++#include <dhcpsrv/subnet.h>
++
++#include <map>
++#include <mutex>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief ConfigCache stores ping check config per subnet
++///
++/// The intent is parse subnet ping-check parameters from its user-context
++/// as few times as possible rather than on every ping check request, while
++/// also allowing for run time updates via config back end or subnet cmds.
++///
++/// For every subnet we store:
++///
++/// -# subnet id
++/// -# PingCheckConfig pointer
++/// where:
++/// - empty config pointer means that subnet does not specify ping check config
++/// - non-empty means subnet specifies at least some ping check parameters
++///
++/// Each time we clear the cache we update the modification time.
++///
++/// When presented with a subnet:
++///
++/// 1. no cache entry:
++/// cache it
++///
++/// 2. entry exists:
++/// subnet mod time >= last flush
++/// cache is stale flush it
++/// cache it
++///
++/// subnet mod time < last flush
++/// use it
++///
++class ConfigCache : public data::BaseStampedElement {
++public:
++ /// @brief Constructor
++ ConfigCache() : configs_(), global_config_(new PingCheckConfig()), mutex_(new std::mutex) {
++ }
++
++ /// @brief Destructor
++ virtual ~ConfigCache() = default;
++
++ /// @brief Get the config for a given subnet.
++ ///
++ /// @param subnet_id ID of the subnet for which the config is desired.
++ /// @param[out] config a reference to a pointer in which to store the
++ /// config if found. If there is no entry for the subnet, it will be set
++ /// to an empty pointer.
++ ///
++ /// @return True if an entry for subnet was found, false otherwise. This
++ /// allows callers to distinguish between unknown subnets (entries that do
++ /// not exist) and subnets that are known but do not define a config.
++ bool findConfig(const dhcp::SubnetID& subnet_id,
++ PingCheckConfigPtr& config);
++
++ /// @brief Parses a config string and caches for the given subnet.
++ ///
++ /// @param subnet_id ID of the subnet for which the config is desired.
++ /// @param user_context user-context Element map of the subnet.
++ ///
++ /// @return pointer to the parsed config.
++ /// @throw BadValue if an error occurred during config parsing.
++ PingCheckConfigPtr parseAndCacheConfig(const dhcp::SubnetID& subnet_id,
++ data::ConstElementPtr& user_context);
++
++ /// @brief Adds (or replaces) the config for a given subnet to the cache.
++ ///
++ /// @param subnet_id ID of the subnet for which the config is desired.
++ /// @param config pointer to the config to store. This may be an
++ /// empty pointer.
++ void cacheConfig(const dhcp::SubnetID& subnet_id,
++ PingCheckConfigPtr& config);
++
++ /// @brief Discards the subnet entries in the cache.
++ void flush();
++
++ /// @brief Get the number of entries in the cache.
++ ///
++ /// @return number of entries in the cache.
++ size_t size();
++
++ /// @brief Get the last time the cache was flushed.
++ ///
++ /// @return the last time the cache was flushed (or the time it was
++ /// created if it has never been flushed).
++ boost::posix_time::ptime getLastFlushTime();
++
++ /// @brief Get the global level configuration.
++ ///
++ /// @return pointer to the global configuration.
++ PingCheckConfigPtr& getGlobalConfig();
++
++ /// @brief Set the global level configuration.
++ ///
++ /// @param config configuration to store as the global configuration.
++ void setGlobalConfig(PingCheckConfigPtr& config);
++
++private:
++ /// @brief Get the config for a given subnet.
++ ///
++ /// Must be called from with a thread-safe context.
++ ///
++ /// @param subnet_id ID of the subnet for which the config is desired.
++ /// @param[out] config a reference to a pointer in which to store the
++ /// config if found. If there is no entry for the subnet, it will be set
++ /// to an empty pointer.
++ ///
++ /// @return True if an entry for subnet was found, false otherwise. This
++ /// allows callers to distinguish between unknown subnets (entries that do
++ /// not exist) and subnets that are known but do not define a config.
++ bool findConfigInternal(const dhcp::SubnetID& subnet_id,
++ PingCheckConfigPtr& config) const;
++
++ /// @brief Per subnet config cache. Note that the global config in stored
++ /// using SUBNET_ID_GLOBAL.
++ std::map<dhcp::SubnetID, PingCheckConfigPtr> configs_;
++
++ /// @brief Stores the global configuration parameters.
++ PingCheckConfigPtr global_config_;
++
++ /// @brief The mutex used to protect internal state.
++ const boost::scoped_ptr<std::mutex> mutex_;
++};
++
++/// @brief Defines a shared pointer to a ConfigCache.
++typedef boost::shared_ptr<ConfigCache> ConfigCachePtr;
++
++} // end of namespace ping_check
++} // end of namespace isc
++#endif
+diff --git a/src/hooks/dhcp/ping_check/icmp_endpoint.h b/src/hooks/dhcp/ping_check/icmp_endpoint.h
+new file mode 100644
+index 0000000000..5d047d286f
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/icmp_endpoint.h
+@@ -0,0 +1,134 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef ICMP_ENDPOINT_H
++#define ICMP_ENDPOINT_H 1
++
++#include <asiolink/io_endpoint.h>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief The @c ICMPEndpoint class is a concrete derived class of
++/// @c IOEndpoint that represents an endpoint of a ICMP packet.
++///
++/// Other notes about @c TCPEndpoint applies to this class, too.
++class ICMPEndpoint : public asiolink::IOEndpoint {
++public:
++ ///
++ /// @name Constructors and Destructor.
++ ///
++ //@{
++
++ /// @brief Default Constructor
++ ///
++ /// Creates an internal endpoint. This is expected to be set by some
++ /// external call.
++ ICMPEndpoint() :
++ asio_endpoint_placeholder_(new boost::asio::ip::icmp::endpoint()),
++ asio_endpoint_(*asio_endpoint_placeholder_)
++ {}
++
++ /// @brief Constructor from an address.
++ ///
++ /// @param address The IP address of the endpoint.
++ explicit ICMPEndpoint(const asiolink::IOAddress& address) :
++ asio_endpoint_placeholder_(
++ new boost::asio::ip::icmp::endpoint(boost::asio::ip::make_address(address.toText()), 0)),
++ asio_endpoint_(*asio_endpoint_placeholder_)
++ {}
++
++ /// @brief Copy Constructor from an ASIO ICMP endpoint.
++ ///
++ /// This constructor is designed to be an efficient wrapper for the
++ /// corresponding ASIO class, @c icmp::endpoint.
++ ///
++ /// @param asio_endpoint The ASIO representation of the ICMP endpoint.
++ explicit ICMPEndpoint(boost::asio::ip::icmp::endpoint& asio_endpoint) :
++ asio_endpoint_placeholder_(0), asio_endpoint_(asio_endpoint)
++ {}
++
++ /// @brief Constructor from a const ASIO ICMP endpoint.
++ ///
++ /// This constructor is designed to be an efficient wrapper for the
++ /// corresponding ASIO class, @c icmp::endpoint.
++ ///
++ /// @param asio_endpoint The ASIO representation of the TCP endpoint.
++ explicit ICMPEndpoint(const boost::asio::ip::icmp::endpoint& asio_endpoint) :
++ asio_endpoint_placeholder_(new boost::asio::ip::icmp::endpoint(asio_endpoint)),
++ asio_endpoint_(*asio_endpoint_placeholder_)
++ {}
++
++ /// @brief The destructor.
++ virtual ~ICMPEndpoint() { delete asio_endpoint_placeholder_; }
++ //@}
++
++ /// @brief Fetches the IP address of the endpoint.
++ ///
++ /// @return the endpoint's IP address as an IOAddress.
++ virtual asiolink::IOAddress getAddress() const {
++ return (asio_endpoint_.address());
++ }
++
++ /// @brief Fetches the IP address of the endpoint in native form.
++ ///
++ /// @return the endpoint's IP address as a struct sockaddr.
++ virtual const struct sockaddr& getSockAddr() const {
++ return (*asio_endpoint_.data());
++ }
++
++ /// @brief Fetches the IP port number of the endpoint.
++ ///
++ /// @return the endpoint's port number as a unit16_t.
++ virtual uint16_t getPort() const {
++ return (asio_endpoint_.port());
++ }
++
++ /// @brief Fetches the network protocol of the endpoint.
++ ///
++ /// @return the endpoint's protocol as a short
++ virtual short getProtocol() const {
++ return (asio_endpoint_.protocol().protocol());
++ }
++
++ /// @brief Fetches the network protocol family of the endpoint.
++ ///
++ /// @return the endpoint's protocol as a short
++ virtual short getFamily() const {
++ return (asio_endpoint_.protocol().family());
++ }
++
++ /// @brief Fetches the underlying ASIO endpoint implementation
++ ///
++ /// This is not part of the exposed IOEndpoint API but allows
++ /// direct access to the ASIO implementation of the endpoint
++ ///
++ /// @return the wrapped ASIO endpoint instance as a const
++ inline const boost::asio::ip::icmp::endpoint& getASIOEndpoint() const {
++ return (asio_endpoint_);
++ }
++
++ /// @brief Fetches the underlying ASIO endpoint implementation
++ ///
++ /// This is not part of the exposed IOEndpoint API but allows
++ /// direct access to the ASIO implementation of the endpoint
++ ///
++ /// @return the wrapped ASIO endpoint instance as a non-const
++ inline boost::asio::ip::icmp::endpoint& getASIOEndpoint() {
++ return (asio_endpoint_);
++ }
++
++private:
++ /// @brief Pointer to the ASIO endpoint placeholder.
++ boost::asio::ip::icmp::endpoint* asio_endpoint_placeholder_;
++
++ /// @brief Reference to the underlying ASIO endpoint instance.
++ boost::asio::ip::icmp::endpoint& asio_endpoint_;
++};
++
++} // namespace ping_check
++} // namespace isc
++#endif // ICMP_ENDPOINT_H
+diff --git a/src/hooks/dhcp/ping_check/icmp_msg.cc b/src/hooks/dhcp/ping_check/icmp_msg.cc
+new file mode 100644
+index 0000000000..3d236820da
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/icmp_msg.cc
+@@ -0,0 +1,112 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++#include <icmp_msg.h>
++#include <util/io.h>
++#include <exceptions/exceptions.h>
++
++#include <netinet/ip_icmp.h>
++#include <iostream>
++
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::util;
++
++namespace isc {
++namespace ping_check {
++
++ICMPMsg::ICMPMsg()
++ : source_(IOAddress::IPV4_ZERO_ADDRESS()),
++ destination_(IOAddress::IPV4_ZERO_ADDRESS()),
++ msg_type_(0), code_(0), check_sum_(0), id_(0), sequence_(0),
++ payload_(0) {
++}
++
++ICMPMsgPtr
++ICMPMsg::unpack(const uint8_t* wire_data, size_t length) {
++ ICMPMsgPtr msg(new ICMPMsg());
++ if (length < sizeof(struct ip)) {
++ isc_throw(BadValue,
++ "ICMPMsg::unpack - truncated ip header, length: "
++ << length);
++ }
++
++ // Find the IP header length...
++ struct ip* ip_header = (struct ip*)(wire_data);
++ auto hlen = (ip_header->ip_hl << 2);
++
++ // Make sure we received enough data.
++ if (length < (hlen + sizeof(struct icmp))) {
++ isc_throw(BadValue, "ICMPMsg::truncated packet? length: "
++ << length << ", hlen: " << hlen);
++ }
++
++ // Grab the source and destination addresses.
++ msg->setSource(IOAddress(ntohl(ip_header->ip_src.s_addr)));
++ msg->setDestination(IOAddress(ntohl(ip_header->ip_dst.s_addr)));
++
++ // Get the message type.
++ struct icmp* reply = (struct icmp*)(wire_data + hlen);
++ msg->setType(reply->icmp_type);
++ msg->setCode(reply->icmp_code);
++
++ msg->setChecksum(ntohs(reply->icmp_cksum));
++ msg->setId(ntohs(reply->icmp_hun.ih_idseq.icd_id));
++ msg->setSequence(ntohs(reply->icmp_hun.ih_idseq.icd_seq));
++
++ auto payload_len = length - hlen - ICMP_HEADER_SIZE;
++ msg->setPayload((const uint8_t*)(&reply->icmp_dun), payload_len);
++
++ return (msg);
++}
++
++ICMPPtr
++ICMPMsg::pack() const {
++ ICMPPtr outbound(new struct icmp());
++ memset(outbound.get(), 0x00, sizeof(struct icmp));
++ outbound->icmp_type = msg_type_;
++ outbound->icmp_id = htons(id_);
++ outbound->icmp_seq = htons(sequence_);
++ /// @todo copy in payload - not needed for ECHO REQUEST
++ outbound->icmp_cksum = htons(~calcChecksum((const uint8_t*)(outbound.get()), sizeof(struct icmp)));
++ return (outbound);
++}
++
++void
++ICMPMsg::setPayload(const uint8_t* data, size_t length) {
++ payload_.insert(payload_.end(), data, data + length);
++}
++
++uint32_t
++ICMPMsg::calcChecksum(const uint8_t* buf, size_t length) {
++ uint32_t sum = 0;
++
++ /* Checksum all the pairs of bytes first... */
++ size_t i;
++ for (i = 0; i < (length & ~1U); i += 2) {
++ sum += static_cast<uint32_t>(readUint16(buf + i, sizeof(uint16_t)));
++ /* Add carry. */
++ if (sum > 0xFFFF) {
++ sum -= 0xFFFF;
++ }
++ }
++
++ /* If there's a single byte left over, checksum it, too. Network
++ byte order is big-endian, so the remaining byte is the high byte. */
++ if (i < length) {
++ sum += buf[i] << 8;
++ /* Add carry. */
++ if (sum > 0xFFFF) {
++ sum -= 0xFFFF;
++ }
++ }
++
++ return (sum);
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
+diff --git a/src/hooks/dhcp/ping_check/icmp_msg.h b/src/hooks/dhcp/ping_check/icmp_msg.h
+new file mode 100644
+index 0000000000..ace322d1ca
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/icmp_msg.h
+@@ -0,0 +1,223 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef ICMP_MSG_H
++#define ICMP_MSG_H
++
++#include <asiolink/io_address.h>
++
++#include <arpa/inet.h>
++#include <netinet/in.h>
++#include <netinet/ip.h>
++#include <unistd.h>
++#include <netinet/ip_icmp.h>
++#include <boost/shared_ptr.hpp>
++
++namespace isc {
++namespace ping_check {
++
++// Forward class definition.
++class ICMPMsg;
++
++/// @brief Shared pointer type for ICMPMsg.
++typedef boost::shared_ptr<ICMPMsg> ICMPMsgPtr;
++
++/// @brief Shared pointer type for struct icmp.
++typedef boost::shared_ptr<struct icmp> ICMPPtr;
++
++/// @brief Embodies an ICMP message
++///
++/// Provides functions for marshalling of ICMP protocol
++/// messages to and from wire form
++class ICMPMsg {
++public:
++ /// @brief ICMP message types. We only define the ones
++ /// we care about.
++ enum ICMPMsgType {
++ ECHO_REPLY = 0,
++ TARGET_UNREACHABLE = 3,
++ ECHO_REQUEST = 8
++ };
++
++ /// @brief Size in octets of ICMP message header.
++ /// 1 (msg type) + 1 (code) + 2 (checksum) + 4 (either unused
++ /// or used differently basing on the ICMP type and code e.g
++ /// Identifier and Sequence Number for Echo or Echo Reply Message)
++ constexpr static size_t ICMP_HEADER_SIZE = 8;
++
++ /// @brief Constructor.
++ ICMPMsg();
++
++ /// @brief Destructor.
++ virtual ~ICMPMsg() = default;
++
++ /// @brief Unpacks an ICMP message from the given wire_data
++ ///
++ /// The wire data is expected to include the IP header followed
++ /// by an ICMP message.
++ ///
++ /// @param wire_data raw data received from the socket
++ /// @param length number of bytes in the wire_data contents
++ ///
++ /// @return Pointer to the newly constructed message
++ /// @throw BadValue if the wire data is invalid
++ static ICMPMsgPtr unpack(const uint8_t* wire_data, size_t length);
++
++ /// @brief Packs the message into an ICMP structure.
++ ///
++ /// @return Pointer to the newly constructed ICMP structure.
++ ICMPPtr pack() const;
++
++ /// @brief Fetches the ICMP message type (e.g. ECHO_REQUEST, ECHO_REPLY)
++ ///
++ /// @return message type as a uint8_t
++ uint8_t getType() const {
++ return (msg_type_);
++ }
++
++ /// @brief Sets the ICMP message type
++ ///
++ /// @param msg_type new value for the message type
++ void setType(uint8_t msg_type) {
++ msg_type_ = msg_type;
++ }
++
++ /// @brief Fetches the ICMP message code
++ ///
++ /// @return uint8_t containing the message code
++ uint8_t getCode() const {
++ return (code_);
++ }
++
++ /// @brief Sets the ICMP code
++ ///
++ /// @param code new value for the message type
++ void setCode(uint8_t code) {
++ code_ = code;
++ }
++
++ /// @brief Fetches the checksum
++ ///
++ /// @return uint16_t containing the message checksum
++ uint16_t getChecksum() const {
++ return (check_sum_);
++ }
++
++ /// @brief Sets the check sum
++ ///
++ /// @param check_sum new value for the check sum
++ void setChecksum(uint16_t check_sum) {
++ check_sum_ = check_sum;
++ }
++
++ /// @brief Fetches the message id
++ ///
++ /// @return uint16_t containing the id
++ uint16_t getId() const {
++ return (id_);
++ }
++
++ /// @brief Sets the message id
++ ///
++ /// @param id new value for the message id
++ void setId(const uint16_t id) {
++ id_ = id;
++ }
++
++ /// @brief Fetches the message sequence number
++ ///
++ /// @return uint16_t containing the sequence number
++ uint16_t getSequence() const {
++ return (sequence_);
++ }
++
++ /// @brief Sets the message sequence number
++ ///
++ /// @param sequence new value for the message sequence number
++ void setSequence(uint16_t sequence) {
++ sequence_ = sequence;
++ }
++
++ /// @brief Fetches the source IP address
++ ///
++ /// @return IOAddress containing the IP address of the message source
++ const isc::asiolink::IOAddress& getSource() const {
++ return (source_);
++ }
++
++ /// @brief Sets the source IP address
++ ///
++ /// @param source new value for the source IP address
++ void setSource(const isc::asiolink::IOAddress& source) {
++ source_ = source;
++ }
++
++ /// @brief Fetches the destination IP address
++ ///
++ /// @return IOAddress containing the IP address of the message destination
++ const isc::asiolink::IOAddress& getDestination() const {
++ return (destination_);
++ }
++
++ /// @brief Sets the destination IP address
++ ///
++ /// @param destination new value for the destination IP address
++ void setDestination(const isc::asiolink::IOAddress& destination) {
++ destination_ = destination;
++ }
++
++ /// @brief Fetches the message payload
++ ///
++ /// @return vector containing the message payload
++ const std::vector<uint8_t>& getPayload() const {
++ return (payload_);
++ }
++
++ /// @brief Sets the message payload to the given data
++ ///
++ /// @param data pointer to data buffer from which to copy
++ /// @param length number of bytes in data buffer
++ void setPayload(const uint8_t* data, size_t length);
++
++ /// @brief Calculates the checksum of the given data buffer
++ ///
++ /// @param data pointer to data buffer from which to copy
++ /// @param length number of bytes in data buffer
++ ///
++ /// @return uint32_t containing the calculated checksum
++ static uint32_t calcChecksum(const uint8_t* data, size_t length);
++
++private:
++ /// @brief IP address from which the message origin
++ isc::asiolink::IOAddress source_;
++
++ /// @brief IP address of the message destination
++ isc::asiolink::IOAddress destination_;
++
++ /// @brief ICMP message type
++ uint8_t msg_type_;
++
++ /// @brief ICMP message code
++ uint8_t code_;
++
++ /// @brief Checksum of the message
++ uint16_t check_sum_;
++
++ /// @brief Message ID
++ uint16_t id_;
++
++ /// @brief Message sequence number
++ uint16_t sequence_;
++
++ // data beyond the ICMP header
++ std::vector<uint8_t> payload_;
++};
++
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/icmp_socket.h b/src/hooks/dhcp/ping_check/icmp_socket.h
+new file mode 100644
+index 0000000000..091057d749
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/icmp_socket.h
+@@ -0,0 +1,359 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef ICMP_SOCKET_H
++#define ICMP_SOCKET_H 1
++
++#include <netinet/in.h>
++#include <sys/socket.h>
++#include <unistd.h>
++
++#include <cstddef>
++
++#include <asiolink/io_asio_socket.h>
++#include <asiolink/io_service.h>
++#include <icmp_endpoint.h>
++
++#include <exceptions/isc_assert.h>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief The @c ICMPSocket class is a concrete derived class of @c IOAsioSocket
++/// that represents a ICMP socket.
++///
++/// @param C Callback type
++template <typename C>
++class ICMPSocket : public asiolink::IOAsioSocket<C> {
++private:
++ /// @brief Class is non-copyable
++ explicit ICMPSocket(const ICMPSocket&);
++ ICMPSocket& operator=(const ICMPSocket&);
++
++public:
++ enum {
++ MIN_SIZE = 4096 // Minimum send and receive size
++ };
++
++ /// @brief Constructor from an ASIO ICMP socket.
++ ///
++ /// @param socket The ASIO representation of the ICMP socket. It is assumed
++ /// that the caller will open and close the socket, so these
++ /// operations are a no-op for that socket.
++ explicit ICMPSocket(boost::asio::ip::icmp::socket& socket);
++
++ /// @brief Constructor
++ ///
++ /// Used when the ICMPSocket is being asked to manage its own internal
++ /// socket. In this case, the open() and close() methods are used.
++ ///
++ /// @param service I/O Service object used to manage the socket.
++ explicit ICMPSocket(const asiolink::IOServicePtr& service);
++
++ /// @brief Destructor
++ virtual ~ICMPSocket();
++
++ /// @brief Return file descriptor of underlying socket
++ ///
++ /// @return socket's native file descriptor as an int.
++ virtual int getNative() const {
++#if BOOST_VERSION < 106600
++ return (socket_.native());
++#else
++ return (socket_.native_handle());
++#endif
++ }
++
++ /// @brief Return protocol of socket
++ ///
++ /// @return Always IPPROTO_ICMP.
++ virtual int getProtocol() const {
++ return (IPPROTO_ICMP);
++ }
++
++ /// @brief Is "open()" synchronous?
++ ///
++ /// Indicates that the opening of a ICMP socket is synchronous.
++ /// @return Always true.
++ virtual bool isOpenSynchronous() const {
++ return true;
++ }
++
++ /// @brief Indicates if the socket is currently open.
++ ///
++ /// @return true if socket is open.
++ virtual bool isOpen() const {
++ return isopen_;
++ }
++
++ /// @brief Open Socket
++ ///
++ /// Opens the ICMP socket. This is a synchronous operation.
++ ///
++ /// @param endpoint Endpoint to which the socket will send data. This is
++ /// used to determine the address family that should be used for the
++ /// underlying socket.
++ /// @param callback Unused as the operation is synchronous.
++ virtual void open(const asiolink::IOEndpoint* endpoint, C& callback);
++
++ /// @brief Send Asynchronously
++ ///
++ /// Calls the underlying socket's async_send_to() method to send a packet of
++ /// data asynchronously to the remote endpoint. The callback will be called
++ /// on completion.
++ ///
++ /// @param data Data to send
++ /// @param length Length of data to send
++ /// @param endpoint Target of the send
++ /// @param callback Callback object.
++ virtual void asyncSend(const void* data, size_t length,
++ const asiolink::IOEndpoint* endpoint, C& callback);
++
++ /// @brief Receive Asynchronously
++ ///
++ /// Calls the underlying socket's async_receive_from() method to read a
++ /// packet of data from a remote endpoint. Arrival of the data is signalled
++ /// via a call to the callback function.
++ ///
++ /// @param data Buffer to receive incoming message
++ /// @param length Length of the data buffer
++ /// @param offset Offset into buffer where data is to be put
++ /// @param endpoint Source of the communication
++ /// @param callback Callback object
++ virtual void asyncReceive(void* data, size_t length, size_t offset,
++ asiolink::IOEndpoint* endpoint, C& callback);
++
++ /// @brief Process received data
++ ///
++ /// See the description of IOAsioSocket::receiveComplete for a complete
++ /// description of this method.
++ ///
++ /// @param staging Pointer to the start of the staging buffer.
++ /// @param length Amount of data in the staging buffer.
++ /// @param cumulative Amount of data received before the staging buffer is
++ /// processed.
++ /// @param offset Unused.
++ /// @param expected unused.
++ /// @param outbuff Output buffer. Data in the staging buffer is be copied
++ /// to this output buffer in the call.
++ ///
++ /// @return Always true
++ virtual bool processReceivedData(const void* staging, size_t length,
++ size_t& cumulative, size_t& offset,
++ size_t& expected,
++ isc::util::OutputBufferPtr& outbuff);
++
++ /// @brief Cancel I/O On Socket
++ virtual void cancel();
++
++ /// @brief Close socket
++ virtual void close();
++
++ /// @brief Calculates the checksum for the given buffer of data.
++ ///
++ /// @param buf pointer to the data buffer.
++ /// @param buf_size number of bytes in the data buffer.
++ ///
++ /// @return calculated checksum of the data as a uint16_t.
++ static uint16_t calcChecksum(const uint8_t* buf, const uint32_t buf_size);
++
++private:
++ /// @brief The IO service used to handle events.
++ isc::asiolink::IOServicePtr io_service_;
++
++ // Two variables to hold the socket - a socket and a pointer to it. This
++ // handles the case where a socket is passed to the ICMPSocket on
++ // construction, or where it is asked to manage its own socket.
++
++ /// Pointer to own socket
++ std::unique_ptr<boost::asio::ip::icmp::socket> socket_ptr_;
++
++ // Socket
++ boost::asio::ip::icmp::socket& socket_;
++
++ // True when socket is open
++ bool isopen_;
++};
++
++// Constructor - caller manages socket
++
++template <typename C>
++ICMPSocket<C>::ICMPSocket(boost::asio::ip::icmp::socket& socket) :
++ socket_ptr_(), socket_(socket), isopen_(true) {
++}
++
++// Constructor - create socket on the fly
++
++template <typename C>
++ICMPSocket<C>::ICMPSocket(const asiolink::IOServicePtr& io_service) :
++ io_service_(io_service),
++ socket_ptr_(new boost::asio::ip::icmp::socket(io_service_->getInternalIOService())),
++ socket_(*socket_ptr_), isopen_(false) {
++}
++
++// Destructor.
++
++template <typename C>
++ICMPSocket<C>::~ICMPSocket() {
++}
++
++// Open the socket.
++
++template <typename C> void
++ICMPSocket<C>::open(const asiolink::IOEndpoint* endpoint, C&) {
++
++ // Ignore opens on already-open socket. (Don't throw a failure because
++ // of uncertainties as to what precedes when using asynchronous I/O.)
++ // It also allows us a treat a passed-in socket in exactly the same way as
++ // a self-managed socket (in that we can call the open() and close() methods
++ // of this class).
++ if (!isopen_) {
++ if (endpoint->getFamily() == AF_INET) {
++ socket_.open(boost::asio::ip::icmp::v4());
++ } else {
++ socket_.open(boost::asio::ip::icmp::v6());
++ }
++ isopen_ = true;
++
++ // Ensure it can send and receive at least 4K buffers.
++ boost::asio::ip::icmp::socket::send_buffer_size snd_size;
++ socket_.get_option(snd_size);
++ if (snd_size.value() < MIN_SIZE) {
++ snd_size = MIN_SIZE;
++ socket_.set_option(snd_size);
++ }
++
++ boost::asio::ip::icmp::socket::receive_buffer_size rcv_size;
++ socket_.get_option(rcv_size);
++ if (rcv_size.value() < MIN_SIZE) {
++ rcv_size = MIN_SIZE;
++ socket_.set_option(rcv_size);
++ }
++
++ boost::asio::socket_base::do_not_route option(true);
++ socket_.set_option(option);
++ }
++}
++
++// Send a message. Should never do this if the socket is not open, so throw
++// an exception if this is the case.
++
++template <typename C> void
++ICMPSocket<C>::asyncSend(const void* data, size_t length,
++ const asiolink::IOEndpoint* endpoint, C& callback) {
++ if (isopen_) {
++
++ // Upconvert to a ICMPEndpoint. We need to do this because although
++ // IOEndpoint is the base class of ICMPEndpoint and TCPEndpoint, it
++ // does not contain a method for getting at the underlying endpoint
++ // type - that is in the derived class and the two classes differ on
++ // return type.
++ isc_throw_assert(endpoint->getProtocol() == IPPROTO_ICMP);
++ const ICMPEndpoint* udp_endpoint =
++ static_cast<const ICMPEndpoint*>(endpoint);
++
++ // ... and send the message.
++ socket_.async_send_to(boost::asio::buffer(data, length),
++ udp_endpoint->getASIOEndpoint(), callback);
++ } else {
++ isc_throw(asiolink::SocketNotOpen,
++ "attempt to send on a ICMP socket that is not open");
++ }
++}
++
++// Receive a message. Should never do this if the socket is not open, so throw
++// an exception if this is the case.
++
++template <typename C> void
++ICMPSocket<C>::asyncReceive(void* data, size_t length, size_t offset,
++ asiolink::IOEndpoint* endpoint, C& callback) {
++ if (isopen_) {
++
++ // Upconvert the endpoint again.
++ isc_throw_assert(endpoint->getProtocol() == IPPROTO_ICMP);
++ ICMPEndpoint* udp_endpoint = static_cast<ICMPEndpoint*>(endpoint);
++
++ // Ensure we can write into the buffer
++ if (offset >= length) {
++ isc_throw(asiolink::BufferOverflow, "attempt to read into area beyond end of "
++ "ICMP receive buffer");
++ }
++ void* buffer_start = static_cast<void*>(static_cast<uint8_t*>(data) + offset);
++
++ // Issue the read
++ socket_.async_receive_from(boost::asio::buffer(buffer_start, length - offset),
++ udp_endpoint->getASIOEndpoint(), callback);
++ } else {
++ isc_throw(asiolink::SocketNotOpen,
++ "attempt to receive from a ICMP socket that is not open");
++ }
++}
++
++// Receive complete. Just copy the data across to the output buffer and
++// update arguments as appropriate.
++
++template <typename C> bool
++ICMPSocket<C>::processReceivedData(const void* staging, size_t length,
++ size_t& cumulative, size_t& offset,
++ size_t& expected,
++ isc::util::OutputBufferPtr& outbuff) {
++ // Set return values to what we should expect.
++ cumulative = length;
++ expected = length;
++ offset = 0;
++
++ // Copy data across
++ outbuff->writeData(staging, length);
++
++ // ... and mark that we have everything.
++ return (true);
++}
++
++// Cancel I/O on the socket. No-op if the socket is not open.
++
++template <typename C> void
++ICMPSocket<C>::cancel() {
++ if (isopen_) {
++ socket_.cancel();
++ }
++}
++
++// Close the socket down. Can only do this if the socket is open and we are
++// managing it ourself.
++
++template <typename C> void
++ICMPSocket<C>::close() {
++ if (isopen_ && socket_ptr_) {
++ socket_.close();
++ isopen_ = false;
++ }
++}
++
++template <typename C> uint16_t
++ICMPSocket<C>::calcChecksum(const uint8_t* buf, const uint32_t buf_size) {
++ uint32_t sum = 0;
++ uint32_t i;
++ for (i = 0; i < (buf_size & ~1U); i += 2) {
++ uint16_t chunk = buf[i] << 8 | buf[i + 1];
++ sum += chunk;
++ if (sum > 0xFFFF) {
++ sum -= 0xFFFF;
++ }
++ }
++ // If one byte has left, we also need to add it to the checksum.
++ if (i < buf_size) {
++ sum += buf[i] << 8;
++ if (sum > 0xFFFF) {
++ sum -= 0xFFFF;
++ }
++ }
++
++ return (sum);
++}
++
++} // namespace ping_check
++} // namespace isc
++#endif // ICMP_SOCKET_H
+diff --git a/src/hooks/dhcp/ping_check/libloadtests/.gitignore b/src/hooks/dhcp/ping_check/libloadtests/.gitignore
+new file mode 100644
+index 0000000000..ada6ed5036
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/libloadtests/.gitignore
+@@ -0,0 +1 @@
++hook_load_unittests
+diff --git a/src/hooks/dhcp/ping_check/libloadtests/Makefile.am b/src/hooks/dhcp/ping_check/libloadtests/Makefile.am
+new file mode 100644
+index 0000000000..139a068b3c
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/libloadtests/Makefile.am
+@@ -0,0 +1,60 @@
++SUBDIRS = .
++
++AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
++AM_CPPFLAGS += -I$(top_builddir)/src/hooks/dhcp/ping_check -I$(top_srcdir)/src/hooks/dhcp/ping_check
++AM_CPPFLAGS += $(BOOST_INCLUDES) $(CRYPTO_CFLAGS) $(CRYPTO_INCLUDES)
++AM_CPPFLAGS += -DPING_CHECK_LIB_SO=\"$(abs_top_builddir)/src/hooks/dhcp/ping_check/.libs/libdhcp_ping_check.so\"
++AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
++
++AM_CXXFLAGS = $(KEA_CXXFLAGS)
++
++if USE_STATIC_LINK
++AM_LDFLAGS = -static
++endif
++
++# Unit test data files need to get installed.
++EXTRA_DIST =
++
++CLEANFILES = *.gcno *.gcda
++
++TESTS_ENVIRONMENT = $(LIBTOOL) --mode=execute $(VALGRIND_COMMAND)
++
++LOG_COMPILER = $(LIBTOOL)
++AM_LOG_FLAGS = --mode=execute
++
++TESTS =
++if HAVE_GTEST
++TESTS += hook_load_unittests
++
++hook_load_unittests_SOURCES = run_unittests.cc
++hook_load_unittests_SOURCES += load_unload_unittests.cc
++
++hook_load_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
++
++hook_load_unittests_LDFLAGS = $(AM_LDFLAGS) $(CRYPTO_LDFLAGS) $(GTEST_LDFLAGS)
++
++hook_load_unittests_CXXFLAGS = $(AM_CXXFLAGS)
++
++hook_load_unittests_LDADD = $(top_builddir)/src/lib/dhcpsrv/libkea-dhcpsrv.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/process/libkea-process.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/eval/libkea-eval.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/dhcp_ddns/libkea-dhcp_ddns.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/stats/libkea-stats.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/config/libkea-cfgclient.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/http/libkea-http.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libkea-dhcp++.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/hooks/libkea-hooks.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/database/libkea-database.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/cc/libkea-cc.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libkea-asiolink.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/dns/libkea-dns++.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libkea-cryptolink.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/log/libkea-log.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/util/libkea-util.la
++hook_load_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libkea-exceptions.la
++hook_load_unittests_LDADD += $(LOG4CPLUS_LIBS)
++hook_load_unittests_LDADD += $(CRYPTO_LIBS)
++hook_load_unittests_LDADD += $(BOOST_LIBS)
++hook_load_unittests_LDADD += $(GTEST_LDADD)
++endif
++noinst_PROGRAMS = $(TESTS)
+diff --git a/src/hooks/dhcp/ping_check/libloadtests/load_unload_unittests.cc b/src/hooks/dhcp/ping_check/libloadtests/load_unload_unittests.cc
+new file mode 100644
+index 0000000000..67275db617
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/libloadtests/load_unload_unittests.cc
+@@ -0,0 +1,107 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the load and unload
++/// functions in the ddns tuning hook library. In order to test the load
++/// function, one must be able to pass it hook library parameters. The
++/// the only way to populate these parameters is by actually loading the
++/// library via HooksManager::loadLibraries().
++
++#include <config.h>
++
++#include <dhcpsrv/testutils/lib_load_test_fixture.h>
++#include <testutils/gtest_utils.h>
++
++#include <gtest/gtest.h>
++#include <errno.h>
++
++using namespace std;
++using namespace isc;
++using namespace isc::hooks;
++using namespace isc::data;
++using namespace isc::dhcp;
++using namespace isc::process;
++
++namespace {
++
++/// @brief Test fixture for testing loading and unloading the ddns tuning library
++class PingCheckLibLoadTest : public isc::test::LibLoadTest {
++public:
++ /// @brief Constructor
++ PingCheckLibLoadTest() : LibLoadTest(PING_CHECK_LIB_SO) {
++ }
++
++ /// @brief Destructor
++ virtual ~PingCheckLibLoadTest() {
++ }
++
++ /// @brief Registers hooks in the hook manager.
++ /// Normally this is done by the server core code (@c Dhcpv4Srv).
++ void registerHooks() {
++ hook_index_dhcp4_srv_configured_ = HooksManager::registerHook("dhcp4_srv_configured");
++ hook_index_lease4_offer_ = HooksManager::registerHook("lease4_offer");
++ }
++
++ /// @brief Checks that expected callouts are present.
++ void calloutsPresent() {
++ bool result;
++ ASSERT_NO_THROW_LOG(result = HooksManager::calloutsPresent(hook_index_dhcp4_srv_configured_));
++ EXPECT_TRUE(result);
++ ASSERT_NO_THROW_LOG(result = HooksManager::calloutsPresent(hook_index_lease4_offer_));
++ EXPECT_TRUE(result);
++ }
++
++ /// @brief Creates a valid set of ping-check hook parameters.
++ virtual ElementPtr validConfigParams() {
++ ElementPtr params = Element::createMap();
++ params->set("min-ping-requests", Element::create(3));
++ params->set("reply-timeout", Element::create(100));
++ params->set("enable-ping-check", Element::create(true));
++ params->set("ping-cltt-secs", Element::create(60));
++ params->set("ping-channel-threads", Element::create(1));
++ return (params);
++ }
++
++ /// @brief Hook index values.
++ int hook_index_dhcp4_srv_configured_;
++ int hook_index_lease4_offer_;
++};
++
++// Simple V4 test that checks the library can be loaded and unloaded several times.
++TEST_F(PingCheckLibLoadTest, validLoad4) {
++ validDaemonTest("kea-dhcp4", AF_INET, valid_params_);
++}
++
++// Simple test that checks the library cannot be loaded by invalid daemons.
++TEST_F(PingCheckLibLoadTest, invalidDaemonLoad) {
++ // V6 is invalid regardless of family.
++ invalidDaemonTest("kea-dhcp6", AF_INET, valid_params_);
++ invalidDaemonTest("kea-dhcp6", AF_INET6, valid_params_);
++
++ invalidDaemonTest("kea-ctrl-agent", AF_INET, valid_params_);
++ invalidDaemonTest("kea-dhcp-ddns", AF_INET, valid_params_);
++ invalidDaemonTest("bogus", AF_INET, valid_params_);
++}
++
++// Verifies that callout functions exist after loading the library.
++TEST_F(PingCheckLibLoadTest, verifyCallouts) {
++ // Set family and daemon's proc name and register hook points.
++ isc::dhcp::CfgMgr::instance().setFamily(AF_INET);
++ isc::process::Daemon::setProcName("kea-dhcp4");
++ registerHooks();
++
++ // Add library to config and load it.
++ ASSERT_NO_THROW_LOG(addLibrary(lib_so_name_, valid_params_));
++ ASSERT_NO_THROW_LOG(loadLibraries());
++
++ // Verify that expected callouts are present.
++ calloutsPresent();
++
++ // Unload the library.
++ ASSERT_NO_THROW_LOG(unloadLibraries());
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/libloadtests/meson.build b/src/hooks/dhcp/ping_check/libloadtests/meson.build
+new file mode 100644
+index 0000000000..da8bf439c0
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/libloadtests/meson.build
+@@ -0,0 +1,21 @@
++if not TESTS_OPT.enabled()
++ subdir_done()
++endif
++
++dhcp_ping_check_libloadtests = executable(
++ 'dhcp-ping-check-libload-tests',
++ 'load_unload_unittests.cc',
++ 'run_unittests.cc',
++ cpp_args: [
++ f'-DPING_CHECK_LIB_SO="@TOP_BUILD_DIR@/src/hooks/dhcp/ping_check/libdhcp_ping_check.so"',
++ ],
++ dependencies: [GTEST_DEP, CRYPTO_DEP],
++ include_directories: [include_directories('.')] + INCLUDES,
++ link_with: LIBS_BUILT_SO_FAR,
++)
++test(
++ 'dhcp-ping-check-libloadtests',
++ dhcp_ping_check_libloadtests,
++ depends: [dhcp_ping_check_lib],
++ protocol: 'gtest',
++)
+diff --git a/src/hooks/dhcp/ping_check/libloadtests/run_unittests.cc b/src/hooks/dhcp/ping_check/libloadtests/run_unittests.cc
+new file mode 100644
+index 0000000000..d249e2362e
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/libloadtests/run_unittests.cc
+@@ -0,0 +1,19 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <log/logger_support.h>
++#include <gtest/gtest.h>
++
++int
++main(int argc, char* argv[]) {
++ ::testing::InitGoogleTest(&argc, argv);
++ isc::log::initLogger();
++ int result = RUN_ALL_TESTS();
++
++ return (result);
++}
+diff --git a/src/hooks/dhcp/ping_check/meson.build b/src/hooks/dhcp/ping_check/meson.build
+new file mode 100644
+index 0000000000..d3a1e70b49
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/meson.build
+@@ -0,0 +1,41 @@
++dhcp_ping_check_lib = shared_library(
++ 'dhcp_ping_check',
++ 'config_cache.cc',
++ 'icmp_msg.cc',
++ 'ping_channel.cc',
++ 'ping_check_callouts.cc',
++ 'ping_check_config.cc',
++ 'ping_check_log.cc',
++ 'ping_check_messages.cc',
++ 'ping_check_mgr.cc',
++ 'ping_context.cc',
++ 'ping_context_store.cc',
++ 'version.cc',
++ dependencies: [CRYPTO_DEP],
++ include_directories: [include_directories('.')] + INCLUDES,
++ install: true,
++ install_dir: HOOKS_PATH,
++ install_rpath: INSTALL_RPATH,
++ build_rpath: BUILD_RPATH,
++ link_with: LIBS_BUILT_SO_FAR,
++ name_suffix: 'so',
++)
++dhcp_ping_check_archive = static_library(
++ 'dhcp_ping_check',
++ objects: dhcp_ping_check_lib.extract_all_objects(recursive: false),
++)
++subdir('libloadtests')
++subdir('tests')
++
++if KEA_MSG_COMPILER.found()
++ target_gen_messages = run_target(
++ 'src-hooks-dhcp-ping_check-ping_check_messages',
++ command: [
++ CD_AND_RUN,
++ TOP_SOURCE_DIR,
++ KEA_MSG_COMPILER,
++ 'src/hooks/dhcp/ping_check/ping_check_messages.mes',
++ ],
++ )
++ TARGETS_GEN_MESSAGES += [target_gen_messages]
++endif
+diff --git a/src/hooks/dhcp/ping_check/ping_channel.cc b/src/hooks/dhcp/ping_check/ping_channel.cc
+new file mode 100644
+index 0000000000..6a6a88c038
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_channel.cc
+@@ -0,0 +1,466 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++#include <ping_channel.h>
++#include <ping_check_log.h>
++#include <dhcp/iface_mgr.h>
++#include <exceptions/exceptions.h>
++#include <util/multi_threading_mgr.h>
++#include <iostream>
++
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::util;
++
++namespace ph = std::placeholders;
++
++namespace isc {
++namespace ping_check {
++
++uint32_t
++PingChannel::nextEchoInstanceNum() {
++ static uint32_t echo_instance_num = 0x00010000;
++ if (echo_instance_num == UINT32_MAX) {
++ echo_instance_num = 0x00010001;
++ } else {
++ ++echo_instance_num;
++ }
++
++ return (echo_instance_num);
++}
++
++PingChannel::PingChannel(IOServicePtr& io_service,
++ NextToSendCallback next_to_send_cb,
++ EchoSentCallback echo_sent_cb,
++ ReplyReceivedCallback reply_received_cb,
++ ShutdownCallback shutdown_cb)
++ : io_service_(io_service),
++ next_to_send_cb_(next_to_send_cb),
++ echo_sent_cb_(echo_sent_cb),
++ reply_received_cb_(reply_received_cb),
++ shutdown_cb_(shutdown_cb),
++ socket_(0), input_buf_(256),
++ reading_(false), sending_(false), stopping_(false), mutex_(new std::mutex),
++ single_threaded_(!MultiThreadingMgr::instance().getMode()),
++ watch_socket_(0), registered_write_fd_(-1), registered_read_fd_(-1) {
++ if (!io_service_) {
++ isc_throw(BadValue,
++ "PingChannel ctor - io_service cannot be empty");
++ }
++}
++
++PingChannel::~PingChannel() {
++ close();
++}
++
++void
++PingChannel::open() {
++ try {
++ MultiThreadingLock lock(*mutex_);
++ if (socket_ && socket_->isOpen()) {
++ return;
++ }
++
++ // For open(), the endpoint is only used to determine protocol,
++ // the address is irrelevant.
++ ICMPEndpoint ping_to_endpoint(IOAddress::IPV4_ZERO_ADDRESS());
++ SocketCallback socket_cb(
++ [](boost::system::error_code ec, size_t /*length */) {
++ isc_throw(Unexpected, "ICMPSocket open is synchronous, should not invoke cb: "
++ << ec.message());
++ }
++ );
++
++ socket_.reset(new PingSocket(io_service_));
++ socket_->open(&ping_to_endpoint, socket_cb);
++ reading_ = false;
++ sending_ = false;
++ stopping_ = false;
++
++ if (single_threaded_) {
++ // Open new watch socket.
++ watch_socket_.reset(new util::WatchSocket());
++
++ // Register the WatchSocket with IfaceMgr to signal data ready to write.
++ registered_write_fd_ = watch_socket_->getSelectFd();
++ IfaceMgr::instance().addExternalSocket(registered_write_fd_, IfaceMgr::SocketCallback());
++
++ // Register ICMPSocket with IfaceMgr to signal data ready to read.
++ registered_read_fd_ = socket_->getNative();
++ IfaceMgr::instance().addExternalSocket(registered_read_fd_, IfaceMgr::SocketCallback());
++ }
++
++ } catch (const std::exception& ex) {
++ isc_throw(Unexpected, "PingChannel::open failed:" << ex.what());
++ }
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC, PING_CHECK_CHANNEL_SOCKET_OPENED);
++}
++
++bool
++PingChannel::isOpen() const {
++ MultiThreadingLock lock(*mutex_);
++ return (socket_ && socket_->isOpen());
++}
++
++void
++PingChannel::close() {
++ try {
++ MultiThreadingLock lock(*mutex_);
++
++ if (single_threaded_) {
++ // Unregister from IfaceMgr.
++ if (registered_write_fd_ != -1) {
++ IfaceMgr::instance().deleteExternalSocket(registered_write_fd_);
++ registered_write_fd_ = -1;
++ }
++
++ if (registered_read_fd_ != -1) {
++ IfaceMgr::instance().deleteExternalSocket(registered_read_fd_);
++ registered_read_fd_ = -1;
++ }
++
++ // Close watch socket.
++ if (watch_socket_) {
++ std::string error_string;
++ watch_socket_->closeSocket(error_string);
++ if (!error_string.empty()) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_CHANNEL_WATCH_SOCKET_CLOSE_ERROR)
++ .arg(error_string);
++ }
++
++ watch_socket_.reset();
++ }
++ }
++
++ if (!socket_ || !socket_->isOpen()) {
++ return;
++ }
++
++ socket_->close();
++ } catch (const std::exception& ex) {
++ // On close error, log but do not throw.
++ LOG_ERROR(ping_check_logger, PING_CHECK_CHANNEL_SOCKET_CLOSE_ERROR)
++ .arg(ex.what());
++ }
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC, PING_CHECK_CHANNEL_SOCKET_CLOSED);
++}
++
++void
++PingChannel::stopChannel() {
++ {
++ MultiThreadingLock lock(*mutex_);
++ if (stopping_) {
++ return;
++ }
++
++ stopping_ = true;
++ }
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC, PING_CHECK_CHANNEL_STOP);
++ close();
++
++ if (shutdown_cb_) {
++ (shutdown_cb_)();
++ }
++}
++
++void
++PingChannel::asyncReceive(void* data, size_t length, size_t offset,
++ asiolink::IOEndpoint* endpoint, SocketCallback& callback) {
++ socket_->asyncReceive(data, length, offset, endpoint, callback);
++}
++
++void
++PingChannel::asyncSend(void* data, size_t length, asiolink::IOEndpoint* endpoint,
++ SocketCallback& callback) {
++ socket_->asyncSend(data, length, endpoint, callback);
++
++ if (single_threaded_) {
++ // Set IO ready marker so sender activity is visible to select() or poll().
++ watch_socket_->markReady();
++ }
++}
++
++void
++PingChannel::doRead() {
++ try {
++ MultiThreadingLock lock(*mutex_);
++ if (!canRead()) {
++ return;
++ }
++
++ reading_ = true;
++
++ // Create instance of the callback. It is safe to pass the
++ // local instance of the callback, because the underlying
++ // std functions make copies as needed.
++ SocketCallback cb(std::bind(&PingChannel::socketReadCallback,
++ shared_from_this(),
++ ph::_1, // error
++ ph::_2)); // bytes_transferred
++ asyncReceive(static_cast<void*>(getInputBufData()), getInputBufSize(),
++ 0, &reply_endpoint_, cb);
++ } catch (const std::exception& ex) {
++ // Normal IO failures should be passed to the callback. A failure here
++ // indicates the call to asyncReceive() itself failed.
++ LOG_ERROR(ping_check_logger, PING_CHECK_UNEXPECTED_READ_ERROR)
++ .arg(ex.what());
++ stopChannel();
++ }
++}
++
++void
++PingChannel::socketReadCallback(boost::system::error_code ec, size_t length) {
++ {
++ MultiThreadingLock lock(*mutex_);
++ if (stopping_) {
++ return;
++ }
++ }
++
++ if (ec) {
++ if (ec.value() == boost::asio::error::operation_aborted) {
++ // IO service has been stopped and the connection is probably
++ // going to be shutting down.
++ return;
++ } else if ((ec.value() == boost::asio::error::try_again) ||
++ (ec.value() == boost::asio::error::would_block)) {
++ // We got EWOULDBLOCK or EAGAIN which indicates that we may be able to
++ // read something from the socket on the next attempt. Just make sure
++ // we don't try to read anything now in case there is any garbage
++ // passed in length.
++ length = 0;
++ } else {
++ // Anything else is fatal for the socket.
++ LOG_ERROR(ping_check_logger, PING_CHECK_CHANNEL_SOCKET_READ_FAILED)
++ .arg(ec.message());
++ stopChannel();
++ return;
++ }
++ }
++
++ // Unpack the reply and pass it to the reply callback.
++ ICMPMsgPtr reply;
++ if (length > 0) {
++ {
++ try {
++ MultiThreadingLock lock(*mutex_);
++ reply = ICMPMsg::unpack(getInputBufData(), getInputBufSize());
++ if (reply->getType() == ICMPMsg::ECHO_REPLY) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_CHANNEL_ECHO_REPLY_RECEIVED)
++ .arg(reply->getSource())
++ .arg(reply->getId())
++ .arg(reply->getSequence());
++ }
++ } catch (const std::exception& ex) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC,
++ PING_CHECK_CHANNEL_MALFORMED_PACKET_RECEIVED)
++ .arg(ex.what());
++ }
++ }
++ }
++
++ {
++ MultiThreadingLock lock(*mutex_);
++ reading_ = false;
++ }
++
++ if (reply) {
++ (reply_received_cb_)(reply);
++ }
++
++ // Start the next read.
++ doRead();
++}
++
++void
++PingChannel::startSend() {
++ MultiThreadingLock lock(*mutex_);
++ if (canSend()) {
++ // Post the call to sendNext to the IOService.
++ // This ensures its carried out on a thread
++ // associated with the channel's IOService
++ // not the thread invoking this function.
++ auto f = [](PingChannelPtr ptr) { ptr->sendNext(); };
++ io_service_->post(std::bind(f, shared_from_this()));
++ }
++}
++
++void
++PingChannel::startRead() {
++ MultiThreadingLock lock(*mutex_);
++ if (canRead()) {
++ // Post the call to doRead to the IOService.
++ // This ensures its carried out on a thread
++ // associated with the channel's IOService
++ // not the thread invoking this function.
++ auto f = [](PingChannelPtr ptr) { ptr->doRead(); };
++ io_service_->post(std::bind(f, shared_from_this()));
++ }
++}
++
++void
++PingChannel::sendNext() {
++ try {
++ MultiThreadingLock lock(*mutex_);
++ if (!canSend()) {
++ // Can't send right now, get out.
++ return;
++ }
++
++ // Fetch the next one to send.
++ IOAddress target("0.0.0.0");
++ if (!((next_to_send_cb_)(target))) {
++ // Nothing to send.
++ return;
++ }
++
++ // Have an target IP, build an ECHO REQUEST for it.
++ sending_ = true;
++ ICMPMsgPtr next_echo(new ICMPMsg());
++ next_echo->setType(ICMPMsg::ECHO_REQUEST);
++ next_echo->setDestination(target);
++
++ uint32_t instance_num = nextEchoInstanceNum();
++ next_echo->setId(static_cast<uint16_t>(instance_num >> 16));
++ next_echo->setSequence(static_cast<uint16_t>(instance_num & 0x0000FFFF));
++
++ // Get packed wire-form.
++ ICMPPtr echo_icmp = next_echo->pack();
++
++ // Create instance of the callback. It is safe to pass the
++ // local instance of the callback, because the underlying
++ // std functions make copies as needed.
++ SocketCallback cb(std::bind(&PingChannel::socketWriteCallback,
++ shared_from_this(),
++ next_echo,
++ ph::_1, // error
++ ph::_2)); // bytes_transferred
++
++ ICMPEndpoint target_endpoint(target);
++ asyncSend(echo_icmp.get(), sizeof(struct icmp), &target_endpoint, cb);
++ } catch (const std::exception& ex) {
++ // Normal IO failures should be passed to the callback. A failure here
++ // indicates the call to asyncSend() itself failed.
++ LOG_ERROR(ping_check_logger, PING_CHECK_UNEXPECTED_WRITE_ERROR)
++ .arg(ex.what());
++ stopChannel();
++ return;
++ }
++}
++
++void
++PingChannel::socketWriteCallback(ICMPMsgPtr echo, boost::system::error_code ec,
++ size_t length) {
++ {
++ MultiThreadingLock lock(*mutex_);
++ if (stopping_) {
++ return;
++ }
++ }
++
++ if (single_threaded_) {
++ try {
++ // Clear the IO ready marker.
++ watch_socket_->clearReady();
++ } catch (const std::exception& ex) {
++ // This can only happen if the WatchSocket's select_fd has been
++ // compromised which is a programmatic error. We'll log the error
++ // here, then continue on and process the IO result we were given.
++ // WatchSocket issue will resurface on the next send as a closed
++ // fd in markReady() rather than fail out of this callback.
++ LOG_ERROR(ping_check_logger, PING_CHECK_CHANNEL_WATCH_SOCKET_CLEAR_ERROR)
++ .arg(ex.what());
++ }
++ }
++
++ // Handle an error. Note we can't use a case statement as some values
++ // on some OSes are the same (e.g. try_again and would_block) which causes
++ // duplicate case compilation errors.
++ bool send_failed = false;
++ if (ec) {
++ auto error_value = ec.value();
++ if (error_value == boost::asio::error::operation_aborted) {
++ // IO service has been stopped and the connection is probably
++ // going to be shutting down.
++ return;
++ } else if ((error_value == boost::asio::error::try_again) ||
++ (error_value == boost::asio::error::would_block)) {
++ // We got EWOULDBLOCK or EAGAIN which indicates that we may be able to
++ // write something from the socket on the next attempt. Set the length
++ // to zero so we skip the completion callback.
++ length = 0;
++ } else if ((error_value == boost::asio::error::network_unreachable) ||
++ (error_value == boost::asio::error::host_unreachable) ||
++ (error_value == boost::asio::error::network_down)) {
++ // One of these implies an interface might be down, or there's no
++ // way to ping this network. Other networks might be working OK.
++ send_failed = true;
++ } else if (error_value == boost::asio::error::no_buffer_space) {
++ // Writing faster than the kernel will write them out.
++ send_failed = true;
++ } else if (error_value == boost::asio::error::access_denied) {
++ // Means the address we tried to ping is not allowed. Most likey a broadcast
++ // address.
++ send_failed = true;
++ } else {
++ // Anything else is fatal for the socket.
++ LOG_ERROR(ping_check_logger, PING_CHECK_CHANNEL_SOCKET_WRITE_FAILED)
++ .arg(ec.message());
++ stopChannel();
++ return;
++ }
++ }
++
++ {
++ MultiThreadingLock lock(*mutex_);
++ sending_ = false;
++ }
++
++ if (send_failed) {
++ // Invoke the callback with send failed. This instructs the manager
++ // to treat the address as free to use.
++ LOG_ERROR(ping_check_logger, PING_CHECK_CHANNEL_NETWORK_WRITE_ERROR)
++ .arg(echo->getDestination())
++ .arg(ec.message());
++ // Invoke the send completed callback.
++ (echo_sent_cb_)(echo, true);
++ } else if (length > 0) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_CHANNEL_ECHO_REQUEST_SENT)
++ .arg(echo->getDestination())
++ .arg(echo->getId())
++ .arg(echo->getSequence());
++ // Invoke the send completed callback.
++ (echo_sent_cb_)(echo, false);
++ }
++
++ // Schedule the next send.
++ sendNext();
++}
++
++size_t
++PingChannel::getInputBufSize() const {
++ return (input_buf_.size());
++}
++
++unsigned char*
++PingChannel::getInputBufData() {
++ if (input_buf_.empty()) {
++ isc_throw(InvalidOperation,
++ "PingChannel::getInputBufData() - cannot access empty buffer");
++ }
++
++ return (input_buf_.data());
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
+diff --git a/src/hooks/dhcp/ping_check/ping_channel.h b/src/hooks/dhcp/ping_check/ping_channel.h
+new file mode 100644
+index 0000000000..ad798188e3
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_channel.h
+@@ -0,0 +1,371 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_CHANNEL_H
++#define PING_CHANNEL_H
++
++#include <asiolink/asio_wrapper.h>
++#include <asiolink/io_address.h>
++#include <asiolink/io_service.h>
++#include <util/watch_socket.h>
++#include <icmp_msg.h>
++#include <icmp_socket.h>
++
++#include <boost/scoped_ptr.hpp>
++#include <boost/enable_shared_from_this.hpp>
++
++#include <iostream>
++#include <mutex>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief Type of the function implementing a callback invoked by the
++/// @c SocketCallback functor.
++typedef std::function<void(boost::system::error_code ec, size_t length)> SocketCallbackFunction;
++
++/// @brief Functor associated with the socket object.
++///
++/// This functor calls a callback function specified in the constructor.
++class SocketCallback {
++public:
++ /// @brief Constructor.
++ ///
++ /// @param socket_callback Callback to be invoked by the functor upon
++ /// an event associated with the socket.
++ explicit inline SocketCallback(SocketCallbackFunction socket_callback)
++ : callback_(socket_callback) {
++ };
++
++ /// @brief Operator called when event associated with a socket occurs.
++ ///
++ /// This operator returns immediately when received @c boost::system::error_code
++ /// is equal to @c boost::asio::error::operation_aborted.
++ ///
++ /// @param ec Error code.
++ /// @param length Data length.
++ inline void operator()(boost::system::error_code ec, size_t length = 0) {
++ if (ec.value() == boost::asio::error::operation_aborted) {
++ return;
++ }
++
++ callback_(ec, length);
++ };
++
++private:
++ /// @brief Supplied callback.
++ SocketCallbackFunction callback_;
++};
++
++/// @brief Socket type for performing ICMP socket IO.
++typedef ICMPSocket<SocketCallback> PingSocket;
++
++/// @brief Defines a pointer to PingSocket.
++typedef boost::shared_ptr<PingSocket> PingSocketPtr;
++
++/// @brief Function type for callback that fetches next IOAddress to ping.
++typedef std::function<bool(asiolink::IOAddress& target)> NextToSendCallback;
++
++/// @brief Function type for callback to invoke upon ECHO send completion.
++typedef std::function<void(ICMPMsgPtr& echo, bool send_failed)> EchoSentCallback;
++
++/// @brief Function type for callback to invoke when an ICMP reply has been
++/// received.
++typedef std::function<void(ICMPMsgPtr& reply)> ReplyReceivedCallback;
++
++/// @brief Function type for callback to invoke when the channel has shutdown.
++typedef std::function<void()> ShutdownCallback;
++
++/// @brief Provides thread-safe ICMP ECHO REQUEST/ECHO REPLY service
++///
++/// PingChannel uses a @ref PingSocket to send out ECHO REQUESTs and
++/// receive ICMP replies. It is thread-safe and can be driven either
++/// with a single-threaded IOService or a multi-threaded
++/// IOServiceThreadPool. It uses series of callbacks to perpetually
++/// send requests to target addresses and feed back replies received:
++///
++/// -# next_to_send_cb_ - callback to invoke to fetch the next address to ping
++/// -# echo_sent_cb_ - callback to invoke when an ECHO REQUEST has been sent out
++/// -# reply_received_cb_ - callback to invoke when an ICMP reply has been received.
++/// -# channel_shutdown_cb_ - callback to invoke when the channel has shutdown
++///
++/// Callback handlers are supplied via the PingChannel constructor. Higher order
++/// functions are provided, that once instantiated, can be used by calling layers
++/// to control the channel (e.g. open the channel, initiate reading, initiate
++/// writing, and close the channel).
++///
++/// @note Callbacks handlers must be thread-safe if the channel is
++/// driven by an IOServiceThreadPool.
++///
++class PingChannel : public boost::enable_shared_from_this<PingChannel> {
++public:
++ /// @brief Constructor
++ ///
++ /// Instantiates the channel with its socket closed.
++ ///
++ /// @param io_service pointer to the IOService instance that will manage
++ /// the channel's IO. Must not be empty
++ /// @param next_to_send_cb callback to invoke to fetch the next IOAddress
++ /// to ping
++ /// @param echo_sent_cb callback to invoke when an ECHO send has completed
++ /// @param reply_received_cb callback to invoke when an ICMP reply has been
++ /// received. This callback is passed all inbound ICMP messages (e.g. ECHO
++ /// REPLY, UNREACHABLE, etc...)
++ /// @param shutdown_cb callback to invoke when the channel has shutdown due
++ /// to an error
++ ///
++ /// @throw BadValue if io_service is empty.
++ PingChannel(asiolink::IOServicePtr& io_service,
++ NextToSendCallback next_to_send_cb,
++ EchoSentCallback echo_sent_cb,
++ ReplyReceivedCallback reply_received_cb,
++ ShutdownCallback shutdown_cb = ShutdownCallback());
++
++ /// @brief Destructor
++ ///
++ /// Closes the socket if its open.
++ virtual ~PingChannel();
++
++ /// @brief Opens the socket for communications
++ ///
++ /// (Re)Creates the @ref PingSocket instance and opens it.
++ ///
++ /// @throw Unexpected if the open fails.
++ void open();
++
++ /// @brief Indicates whether or not the channel socket is open.
++ ///
++ /// @return true if the socket is open.
++ bool isOpen() const;
++
++ // @brief Schedules the next send.
++ //
++ // If the socket is not currently sending it posts a call to @c sendNext()
++ // to the channel's IOService.
++ virtual void startSend();
++
++ // @brief Schedules the next read.
++ //
++ // If the socket is not currently reading it posts a call to @c doRead()
++ // to the channel's IOService.
++ void startRead();
++
++ /// @brief Closes the channel's socket.
++ void close();
++
++ /// @brief Fetches the channel's IOService
++ ///
++ /// @return pointer to the IOService.
++ asiolink::IOServicePtr getIOService() {
++ return (io_service_);
++ }
++
++protected:
++ /// @brief Receive data on the socket asynchronously
++ ///
++ /// Calls the underlying socket's asyncReceive() method to read a
++ /// packet of data from a remote endpoint. Arrival of the data is signalled
++ /// via a call to the callback function.
++ ///
++ /// This virtual function is provided as means to inject errors during
++ /// read operations to facilitate testing.
++ ///
++ /// @param data buffer to receive incoming message
++ /// @param length length of the data buffer
++ /// @param offset offset into buffer where data is to be put
++ /// @param endpoint source of the communication
++ /// @param callback callback object
++ virtual void asyncReceive(void* data, size_t length, size_t offset,
++ asiolink::IOEndpoint* endpoint, SocketCallback& callback);
++
++ /// @brief Send data on the socket asynchronously
++ ///
++ /// Calls the underlying socket's asyncSend() method to send a
++ /// packet of data from a remote endpoint. Arrival of the data is signalled
++ /// via a call to the callback function.
++ ///
++ /// This virtual function is provided as means to inject errors during
++ /// write operations to facilitate testing.
++ ///
++ /// @param data buffer containing the data to send
++ /// @param length length of the data buffer
++ /// @param endpoint destination of the communication
++ /// @param callback callback object
++ virtual void asyncSend(void* data, size_t length, asiolink::IOEndpoint* endpoint,
++ SocketCallback& callback);
++
++protected:
++ /// @brief Initiates an asynchronous socket read.
++ ///
++ /// If the channel is able to read (is open, not stopping and not
++ /// currently reading) it invokes @ref PingSocket::asyncReceive()
++ /// otherwise it simply returns. If the call to asyncReceive() fails
++ /// it calls @c stopChannel() otherwise, when it completes it will
++ /// invoke @c socketReadCallback().
++ void doRead();
++
++ /// @brief Socket read completion callback
++ ///
++ /// Invoked when PingSocket::asyncRead() completes.
++ /// Upon read success and data received:
++ ///
++ /// -# Unpacks the wire data
++ /// -# Pass the resultant ICMPMsg to reply received callback
++ /// -# start next read
++ ///
++ /// On error conditions:
++ ///
++ /// -# Operation aborted: socket is shutting down, simply return
++ /// -# Operation would block/try again: start a new read
++ /// -# Any other error, shut down the channel
++ ///
++ /// @param ec error code indicating either success or the error encountered
++ /// @param length number of bytes read
++ void socketReadCallback(boost::system::error_code ec, size_t length);
++
++ /// @brief Initiates sending the next ECHO REQUEST
++ ///
++ /// If the channel is able to send (i.e is open, not stopping and not
++ /// currently writing):
++ /// -# Invoke next to send callback to fetch the next target IP address
++ /// -# If there is no next target, return
++ /// -# Construct the ECHO REQUEST for the target and pack it into wire form
++ /// -# Begin sending the request by passing to @c PingSocket::asyncSend()
++ /// -# If the asyncSend() call fails shutdown the channel, otherwise when
++ /// it completes it invokes @c socketWriteCallback().
++ virtual void sendNext();
++
++ /// @brief Socket write completion callback
++ ///
++ /// Invoked when PingSocket::asyncWrite() completes.
++ /// Upon write success:
++ ///
++ /// -# Pass the ECHO REQUEST (i.e. echo_sent) to echo sent callback
++ /// -# start next write
++ ///
++ /// On error conditions:
++ ///
++ /// -# Operation aborted: socket is shutting down, simply return
++ /// -# Operation would block/try again: start a new write
++ /// -# Any other error, shut down the channel
++ ///
++ /// @param echo_sent ECHO REQUEST that was written (or attempted to be
++ /// written)
++ /// @param ec error code indicating either success or the error encountered
++ /// @param length number of bytes written
++ void socketWriteCallback(ICMPMsgPtr echo_sent, boost::system::error_code ec,
++ size_t length);
++
++ /// @brief Closes the socket channel and invokes the shutdown callback.
++ ///
++ /// This function is invoked to notify the calling layer that the socket
++ /// has encountered an unrecoverable error and is stopping operations.
++ void stopChannel();
++
++ /// @brief returns the next unique ECHO instance number.
++ ///
++ /// This method generates and returns the next ECHO instance
++ /// number by incrementing the current value. It is a strictly
++ /// monotonously increasing value beginning at 0x00010001.
++ /// At roll over it resets to 0x00010001.
++ ///
++ /// Must be called in a thread-safe context
++ ///
++ /// @return the next unique instance number.
++ static uint32_t nextEchoInstanceNum();
++
++ /// @brief Indicates whether or not a send can be initiated.
++ ///
++ /// Must be called in a thread-safe context
++ ///
++ /// @return True if the socket is open, is not attempting to stop, and is
++ /// not currently sending.
++ bool canSend() {
++ return (socket_ && socket_->isOpen() && !stopping_ && !sending_);
++ }
++
++ /// @brief Indicates whether or not a read can be initiated.
++ ///
++ /// Must be called in a thread-safe context
++ ///
++ /// @return True if the socket is open, is not attempting to stop, and is
++ /// not currently reading.
++ bool canRead() {
++ return (socket_ && socket_->isOpen() && !stopping_ && !reading_);
++ }
++
++ /// @brief Returns input buffer size.
++ ///
++ /// Must be called in a thread-safe context
++ ///
++ /// @return size of the input buf
++ size_t getInputBufSize() const;
++
++ /// @brief Returns pointer to the first byte of the input buffer.
++ ///
++ /// Must be called in a thread-safe context
++ ///
++ /// @return pointer to the data buffer
++ /// @throw InvalidOperation if called when the buffer is empty.
++ unsigned char* getInputBufData();
++
++ /// @brief IOService instance the drives socket IO
++ asiolink::IOServicePtr io_service_;
++
++ /// @brief Callback to invoke to fetch the next address to ping.
++ NextToSendCallback next_to_send_cb_;
++
++ /// @brief Callback to invoke when an ECHO write has completed.
++ EchoSentCallback echo_sent_cb_;
++
++ /// @brief Callback to invoke when an ICMP reply has been received.
++ ReplyReceivedCallback reply_received_cb_;
++
++ /// @brief Callback to invoke when the channel has shutdown.
++ ShutdownCallback shutdown_cb_;
++
++ /// @brief Socket through which to ping.
++ PingSocketPtr socket_;
++
++ /// @brief Buffer to hold the contents for most recent socket read.
++ std::vector<uint8_t> input_buf_;
++
++ /// @brief Retains the endpoint from which the most recent reply was received.
++ ICMPEndpoint reply_endpoint_;
++
++ /// @brief Indicates whether or not the socket has a read in progress.
++ bool reading_;
++
++ /// @brief Indicates whether or not the socket has a write in progress.
++ bool sending_;
++
++ /// @brief Indicates whether or not the channel has been told to stop.
++ bool stopping_;
++
++ /// @brief The mutex used to protect internal state.
++ const boost::scoped_ptr<std::mutex> mutex_;
++
++ /// @brief True if channel was opened in single-threaded mode, false
++ /// otherwise.
++ bool single_threaded_;
++
++ /// @brief Pointer to WatchSocket instance supplying the "select-fd".
++ util::WatchSocketPtr watch_socket_;
++
++ /// @brief WatchSocket fd registered with IfaceMgr.
++ int registered_write_fd_;
++
++ /// @brief ICMPSocket fd registered with IfaceMgr.
++ int registered_read_fd_;
++};
++
++/// @brief Defines a smart pointer to PingChannel
++typedef boost::shared_ptr<PingChannel> PingChannelPtr;
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/ping_check.dox b/src/hooks/dhcp/ping_check/ping_check.dox
+new file mode 100644
+index 0000000000..a7fbe839c0
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check.dox
+@@ -0,0 +1,44 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/**
++
++@mainpage Kea Ping Check Hooks Library
++
++Welcome to Kea Ping Check Hooks Library. This documentation is
++addressed at developers who are interested in internal operation of the
++library. This file provides information needed to understand and perhaps
++extend this library.
++
++This documentation is stand-alone: you should have read and
++understood <a href="https://reports.kea.isc.org/dev_guide/">Kea
++Developer's Guide</a> and in particular its section about hooks: <a
++href="https://reports.kea.isc.org/dev_guide/df/d46/hooksdgDevelopersGuide.html">
++Hooks Developer's Guide</a>.
++
++@section cbPingCheckOverview Overview
++The @c ping_check hooks library provides the ability for kea-dhcp4 to carry
++out an ICMP ECHO test of a candidate IP address prior to sending that address to
++a DHCPv4 client in a DHCPOFFER message.
++
++@section cbPingCheckInternals Library Internals
++
++In addition to the requisite @ref load() and @ref unload() functions, the library
++implements the following callouts:
++
++- @ref dhcp4_srv_configured() - schedules a (re)start of the ICMP IO layer
++- @ref lease4_offer() - handles requests from kea-dhcp4 core to initiate a ping check
++for a candidate lease
++
++The load() function instantiates an instance of @ref isc::ping_check::PingCheckMgr.
++This class is the top level object that provides configuration processing and supervises
++the execution of ping checks.
++
++@section cbPingCheckMTCompatibility Multi-Threading Compatibility
++
++The @c ping_check hooks library requires multi-threading.
++
++*/
+diff --git a/src/hooks/dhcp/ping_check/ping_check_callouts.cc b/src/hooks/dhcp/ping_check/ping_check_callouts.cc
+new file mode 100644
+index 0000000000..ae006359be
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_callouts.cc
+@@ -0,0 +1,240 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <asiolink/io_service_mgr.h>
++#include <database/audit_entry.h>
++#include <dhcpsrv/cfgmgr.h>
++#include <ping_check_log.h>
++#include <ping_check_mgr.h>
++#include <hooks/hooks.h>
++#include <process/daemon.h>
++#include <string>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief PingCheckMgr singleton
++PingCheckMgrPtr mgr;
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::log;
++using namespace isc::data;
++using namespace isc::db;
++using namespace isc::dhcp;
++using namespace isc::ping_check;
++using namespace isc::hooks;
++using namespace isc::process;
++using namespace std;
++
++// Functions accessed by the hooks framework use C linkage to avoid the name
++// mangling that accompanies use of the C++ compiler as well as to avoid
++// issues related to namespaces.
++extern "C" {
++
++/// @brief dhcp4_srv_configured implementation.
++///
++/// @param handle callout handle.
++int dhcp4_srv_configured(CalloutHandle& handle) {
++ try {
++ SrvConfigPtr server_config;
++ handle.getArgument("server_config", server_config);
++ mgr->updateSubnetConfig(server_config);
++
++ NetworkStatePtr network_state;
++ handle.getArgument("network_state", network_state);
++
++ // Schedule a start of the services. This ensures we begin after
++ // the dust has settled and Kea MT mode has been firmly established.
++ mgr->startService(network_state);
++ IOServiceMgr::instance().registerIOService(mgr->getIOService());
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_DHCP4_SRV_CONFIGURED_FAILED)
++ .arg(ex.what());
++
++ handle.setStatus(isc::hooks::CalloutHandle::NEXT_STEP_DROP);
++ ostringstream os;
++ os << "Error: " << ex.what();
++ string error(os.str());
++ handle.setArgument("error", error);
++ return (1);
++ }
++
++ return (0);
++}
++
++/// @brief cb4_updated callout implementation.
++///
++/// If it detects that any subnets were altered by the update it
++/// replaces the subnet cache contents. If any of the subnets
++/// fail to parse, the error is logged and the function returns
++/// a non-zero value.
++///
++/// @param handle CalloutHandle.
++///
++/// @return 0 upon success, 1 otherwise
++int cb4_updated(CalloutHandle& handle) {
++ AuditEntryCollectionPtr audit_entries;
++ handle.getArgument("audit_entries", audit_entries);
++
++ auto const& object_type_idx = audit_entries->get<AuditEntryObjectTypeTag>();
++ auto range = object_type_idx.equal_range("dhcp4_subnet");
++ if (std::distance(range.first, range.second)) {
++ try {
++ // Server config has been committed, so use the current configuration.
++ mgr->updateSubnetConfig(CfgMgr::instance().getCurrentCfg());
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_CB4_UPDATE_FAILED)
++ .arg(ex.what());
++ return (1);
++ }
++ }
++
++ return (0);
++}
++
++/// @brief lease4_offer callout implementation.
++///
++/// @param handle callout handle.
++int lease4_offer(CalloutHandle& handle) {
++ CalloutHandle::CalloutNextStep status = handle.getStatus();
++ if (status == CalloutHandle::NEXT_STEP_DROP ||
++ status == CalloutHandle::NEXT_STEP_SKIP) {
++ return (0);
++ }
++
++ Pkt4Ptr query4;
++ Lease4Ptr lease4;
++ ParkingLotHandlePtr parking_lot;
++ try {
++ // Get all arguments available for the leases4_committed hook point.
++ // If any of these arguments is not available this is a programmatic
++ // error. An exception will be thrown which will be caught by the
++ // caller and logged.
++ handle.getArgument("query4", query4);
++
++ Lease4CollectionPtr leases4;
++ handle.getArgument("leases4", leases4);
++
++ uint32_t offer_lifetime;
++ handle.getArgument("offer_lifetime", offer_lifetime);
++
++ Lease4Ptr old_lease;
++ handle.getArgument("old_lease", old_lease);
++
++ if (query4->getType() != DHCPDISCOVER) {
++ isc_throw(InvalidOperation, "query4 is not a DHCPDISCOVER");
++ }
++
++ if (!leases4) {
++ isc_throw(InvalidOperation, "leases4 is null");
++ }
++
++ if (!leases4->empty()) {
++ lease4 = (*leases4)[0];
++ }
++
++ if (!lease4) {
++ isc_throw(InvalidOperation, "leases4 is empty, no lease to check");
++ }
++
++ // Fetch the parking lot. If it's empty the server is not employing
++ // parking, which is fine.
++ // Create a reference to the parked packet. This signals that we have a
++ // stake in unparking it.
++ parking_lot = handle.getParkingLotHandlePtr();
++ if (parking_lot) {
++ parking_lot->reference(query4);
++ }
++
++ // Get configuration based on the lease's subnet.
++ auto const& config = mgr->getScopedConfig(lease4);
++
++ // Call shouldPing() to determine if we should ping check or not.
++ // - status == PARK - ping check it
++ // - status == CONTINUE - check not needed, release DHCPOFFER to client
++ // - status == DROP - duplicate check, drop the duplicate DHCPOFFER
++ status = mgr->shouldPing(lease4, query4, old_lease, config);
++ handle.setStatus(status);
++ if (status == CalloutHandle::NEXT_STEP_PARK) {
++ mgr->startPing(lease4, query4, parking_lot, config);
++ } else {
++ // Dereference the parked packet. This releases our stake in it.
++ if (parking_lot) {
++ parking_lot->dereference(query4);
++ }
++ }
++
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_LEASE4_OFFER_FAILED)
++ .arg(query4 ? query4->getLabel() : "<no query>")
++ .arg(lease4 ? lease4->addr_.toText() : "<no lease>")
++ .arg(ex.what());
++ // Make sure we dereference.
++ if (parking_lot) {
++ parking_lot->dereference(query4);
++ }
++
++ return (1);
++ }
++
++ return (0);
++}
++
++/// @brief This function is called when the library is loaded.
++///
++/// @param handle library handle
++/// @return 0 when initialization is successful, 1 otherwise
++int load(LibraryHandle& handle) {
++ try {
++ // Make the hook library only loadable by kea-dhcp4.
++ const string& proc_name = Daemon::getProcName();
++ if (proc_name != "kea-dhcp4") {
++ isc_throw(isc::Unexpected, "Bad process name: " << proc_name
++ << ", expected kea-dhcp4");
++ }
++
++ // Instantiate the manager singleton.
++ mgr.reset(new PingCheckMgr());
++
++ // Configure the manager using the hook library's parameters.
++ ConstElementPtr json = handle.getParameters();
++ mgr->configure(json);
++ } catch (const exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_LOAD_ERROR)
++ .arg(ex.what());
++ return (1);
++ }
++
++ LOG_INFO(ping_check_logger, PING_CHECK_LOAD_OK);
++ return (0);
++}
++
++/// @brief This function is called when the library is unloaded.
++///
++/// @return always 0.
++int unload() {
++ if (mgr) {
++ IOServiceMgr::instance().unregisterIOService(mgr->getIOService());
++ mgr.reset();
++ }
++ LOG_INFO(ping_check_logger, PING_CHECK_UNLOAD);
++ return (0);
++}
++
++/// @brief This function is called to retrieve the multi-threading compatibility.
++///
++/// @return 1 which means compatible with multi-threading.
++int multi_threading_compatible() {
++ return (1);
++}
++
++} // end extern "C"
+diff --git a/src/hooks/dhcp/ping_check/ping_check_config.cc b/src/hooks/dhcp/ping_check/ping_check_config.cc
+new file mode 100644
+index 0000000000..a1c69da61e
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_config.cc
+@@ -0,0 +1,98 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <ping_check_config.h>
++
++using namespace isc;
++using namespace isc::data;
++using namespace isc::dhcp;
++
++namespace isc {
++namespace ping_check {
++
++const data::SimpleKeywords
++PingCheckConfig::CONFIG_KEYWORDS =
++{
++ { "enable-ping-check", Element::boolean },
++ { "min-ping-requests", Element::integer },
++ { "reply-timeout", Element::integer },
++ { "ping-cltt-secs", Element::integer},
++ { "ping-channel-threads", Element::integer}
++};
++
++PingCheckConfig::PingCheckConfig() :
++ enable_ping_check_(true),
++ min_ping_requests_(1),
++ reply_timeout_(100),
++ ping_cltt_secs_(60),
++ ping_channel_threads_(0) {
++}
++
++void
++PingCheckConfig::parse(data::ConstElementPtr config) {
++ // Use a local instance to collect values. This way we
++ // avoid corrupting current values if there are any errors.
++ PingCheckConfig local;
++
++ // Note checkKeywords() will throw DhcpConfigError if there is a problem.
++ SimpleParser::checkKeywords(CONFIG_KEYWORDS, config);
++ ConstElementPtr value = config->get("enable-ping-check");
++ if (value) {
++ local.setEnablePingCheck(value->boolValue());
++ }
++
++ value = config->get("min-ping-requests");
++ if (value) {
++ int64_t val = value->intValue();
++ if (val <= 0) {
++ isc_throw(DhcpConfigError, "invalid min-ping-requests: '"
++ << val << "', must be greater than 0");
++ }
++
++ local.setMinPingRequests(static_cast<size_t>(val));
++ }
++
++ value = config->get("reply-timeout");
++ if (value) {
++ int64_t val = value->intValue();
++ if (val <= 0) {
++ isc_throw(DhcpConfigError, "invalid reply-timeout: '"
++ << val << "', must be greater than 0");
++ }
++
++ local.setReplyTimeout(static_cast<size_t>(val));
++ }
++
++ value = config->get("ping-cltt-secs");
++ if (value) {
++ int64_t val = value->intValue();
++ if (val < 0) {
++ isc_throw(DhcpConfigError, "invalid ping-cltt-secs: '"
++ << val << "', cannot be less than 0");
++ }
++
++ local.setPingClttSecs(static_cast<size_t>(val));
++ }
++
++ value = config->get("ping-channel-threads");
++ if (value) {
++ int64_t val = value->intValue();
++ if (val < 0) {
++ isc_throw(DhcpConfigError, "invalid ping-channel-threads: '"
++ << val << "', cannot be less than 0");
++ }
++
++ local.setPingChannelThreads(static_cast<size_t>(val));
++ }
++
++ // All values good, copy from local instance.
++ *this = local;
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
+diff --git a/src/hooks/dhcp/ping_check/ping_check_config.h b/src/hooks/dhcp/ping_check/ping_check_config.h
+new file mode 100644
+index 0000000000..9fd23eba59
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_config.h
+@@ -0,0 +1,134 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_CHECK_CONFIG_H
++#define PING_CHECK_CONFIG_H
++
++#include <cc/data.h>
++#include <cc/simple_parser.h>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief Houses the Ping check configuration parameters for a single scope
++/// (e.g. global, subnet...);
++class PingCheckConfig {
++public:
++ /// @brief List of valid parameters and expected types.
++ static const data::SimpleKeywords CONFIG_KEYWORDS;
++
++ /// @brief Constructor
++ PingCheckConfig();
++
++ /// @brief Destructor
++ ~PingCheckConfig() = default;
++
++ /// @brief Extracts member values from an Element::map
++ ///
++ /// @param config map of configuration parameters
++ ///
++ /// @throw BadValue if invalid values are detected.
++ void parse(data::ConstElementPtr config);
++
++ /// @brief Fetches the value of enable-ping-check
++ ///
++ /// @return boolean value of enable-ping-check
++ bool getEnablePingCheck() const {
++ return (enable_ping_check_);
++ };
++
++ /// @brief Sets the value of enable-ping-check
++ ///
++ /// @param value new value for enable-ping-check
++ void setEnablePingCheck(bool value) {
++ enable_ping_check_ = value;
++ }
++
++ /// @brief Fetches the value of min-ping-requests
++ ///
++ /// @return integer value of min-ping-requests
++ uint32_t getMinPingRequests() const {
++ return (min_ping_requests_);
++ };
++
++ /// @brief Sets the value of min-ping-requests
++ ///
++ /// @param value new value for min-ping-requests
++ void setMinPingRequests(uint32_t value) {
++ min_ping_requests_ = value;
++ }
++
++ /// @brief Fetches the value of reply-timeout
++ ///
++ /// @return integer value of reply-timeout
++ uint32_t getReplyTimeout() const {
++ return (reply_timeout_);
++ }
++
++ /// @brief Sets the value of reply-timeout
++ ///
++ /// @param value new value for reply-timeout
++ void setReplyTimeout(uint32_t value) {
++ reply_timeout_ = value;
++ }
++
++ /// @brief Fetches the value of ping-cltt-secs
++ ///
++ /// @return integer value of ping-cltt-secs
++ uint32_t getPingClttSecs() const {
++ return (ping_cltt_secs_);
++ }
++
++ /// @brief Sets the value of ping-cltt-secs
++ ///
++ /// @param value new value for ping-cltt-secs
++ void setPingClttSecs(uint32_t value) {
++ ping_cltt_secs_ = value;
++ }
++
++ /// @brief Fetches the value of ping-channel-threads
++ ///
++ /// @return integer value of ping-channel-threads
++ uint32_t getPingChannelThreads() const {
++ return (ping_channel_threads_);
++ }
++
++ /// @brief Sets the value of ping-channel-threads
++ ///
++ /// @param value new value for ping-channel-threads
++ void setPingChannelThreads(uint32_t value) {
++ ping_channel_threads_ = value;
++ }
++
++private:
++ // @brief True if checking is enabled.
++ bool enable_ping_check_;
++
++ /// @brief minimum number of ECHO REQUESTs sent, without replies received,
++ /// required to declare an address free to offer.
++ uint32_t min_ping_requests_;
++
++ /// @brief maximum number of milliseconds to wait for an ECHO REPLY after
++ /// an ECHO REQUEST has been sent.
++ uint32_t reply_timeout_;
++
++ /// @brief minimum number of seconds that must elapse after the lease's CLTT
++ /// before a ping check will be conducted, when the client is the lease's
++ /// previous owner.
++ uint32_t ping_cltt_secs_;
++
++ /// @brief Number of threads to use if Kea core is multi-threaded.
++ /// Defaults to 0 (for now) which means follow core number of threads.
++ size_t ping_channel_threads_;
++};
++
++/// @brief Defines a shared pointer to a PingCheckConfig.
++typedef boost::shared_ptr<PingCheckConfig> PingCheckConfigPtr;
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/ping_check_log.cc b/src/hooks/dhcp/ping_check/ping_check_log.cc
+new file mode 100644
+index 0000000000..9e877ff9b5
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_log.cc
+@@ -0,0 +1,17 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <ping_check_log.h>
++
++namespace isc {
++namespace ping_check {
++
++isc::log::Logger ping_check_logger("ping-check-hooks");
++
++} // namespace ping_check
++} // namespace isc
+diff --git a/src/hooks/dhcp/ping_check/ping_check_log.h b/src/hooks/dhcp/ping_check/ping_check_log.h
+new file mode 100644
+index 0000000000..22e0fca953
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_log.h
+@@ -0,0 +1,23 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_CHECK_LOG_H
++#define PING_CHECK_LOG_H
++
++#include <log/logger_support.h>
++#include <log/macros.h>
++#include <log/log_dbglevels.h>
++#include <ping_check_messages.h>
++#include <iostream>
++
++namespace isc {
++namespace ping_check {
++
++extern isc::log::Logger ping_check_logger;
++
++} // end of namespace ping_check
++} // end of namespace isc
++#endif
+diff --git a/src/hooks/dhcp/ping_check/ping_check_messages.cc b/src/hooks/dhcp/ping_check/ping_check_messages.cc
+new file mode 100644
+index 0000000000..7dea2c2397
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_messages.cc
+@@ -0,0 +1,99 @@
++// File created from src/hooks/dhcp/ping_check/ping_check_messages.mes
++
++#include <cstddef>
++#include <log/message_types.h>
++#include <log/message_initializer.h>
++
++extern const isc::log::MessageID PING_CHECK_CB4_UPDATE_FAILED = "PING_CHECK_CB4_UPDATE_FAILED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_ECHO_REPLY_RECEIVED = "PING_CHECK_CHANNEL_ECHO_REPLY_RECEIVED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_ECHO_REQUEST_SENT = "PING_CHECK_CHANNEL_ECHO_REQUEST_SENT";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_MALFORMED_PACKET_RECEIVED = "PING_CHECK_CHANNEL_MALFORMED_PACKET_RECEIVED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_NETWORK_WRITE_ERROR = "PING_CHECK_CHANNEL_NETWORK_WRITE_ERROR";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_CLOSED = "PING_CHECK_CHANNEL_SOCKET_CLOSED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_CLOSE_ERROR = "PING_CHECK_CHANNEL_SOCKET_CLOSE_ERROR";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_OPENED = "PING_CHECK_CHANNEL_SOCKET_OPENED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_READ_FAILED = "PING_CHECK_CHANNEL_SOCKET_READ_FAILED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_WRITE_FAILED = "PING_CHECK_CHANNEL_SOCKET_WRITE_FAILED";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_STOP = "PING_CHECK_CHANNEL_STOP";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_WATCH_SOCKET_CLEAR_ERROR = "PING_CHECK_CHANNEL_WATCH_SOCKET_CLEAR_ERROR";
++extern const isc::log::MessageID PING_CHECK_CHANNEL_WATCH_SOCKET_CLOSE_ERROR = "PING_CHECK_CHANNEL_WATCH_SOCKET_CLOSE_ERROR";
++extern const isc::log::MessageID PING_CHECK_DHCP4_SRV_CONFIGURED_FAILED = "PING_CHECK_DHCP4_SRV_CONFIGURED_FAILED";
++extern const isc::log::MessageID PING_CHECK_DUPLICATE_CHECK = "PING_CHECK_DUPLICATE_CHECK";
++extern const isc::log::MessageID PING_CHECK_LEASE4_OFFER_FAILED = "PING_CHECK_LEASE4_OFFER_FAILED";
++extern const isc::log::MessageID PING_CHECK_LOAD_ERROR = "PING_CHECK_LOAD_ERROR";
++extern const isc::log::MessageID PING_CHECK_LOAD_OK = "PING_CHECK_LOAD_OK";
++extern const isc::log::MessageID PING_CHECK_MGR_CHANNEL_DOWN = "PING_CHECK_MGR_CHANNEL_DOWN";
++extern const isc::log::MessageID PING_CHECK_MGR_LEASE_FREE_TO_USE = "PING_CHECK_MGR_LEASE_FREE_TO_USE";
++extern const isc::log::MessageID PING_CHECK_MGR_NEXT_ECHO_SCHEDULED = "PING_CHECK_MGR_NEXT_ECHO_SCHEDULED";
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_ECHO_REPLY = "PING_CHECK_MGR_RECEIVED_ECHO_REPLY";
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_UNEXPECTED_ECHO_REPLY = "PING_CHECK_MGR_RECEIVED_UNEXPECTED_ECHO_REPLY";
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_UNEXPECTED_UNREACHABLE_MSG = "PING_CHECK_MGR_RECEIVED_UNEXPECTED_UNREACHABLE_MSG";
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_UNREACHABLE_MSG = "PING_CHECK_MGR_RECEIVED_UNREACHABLE_MSG";
++extern const isc::log::MessageID PING_CHECK_MGR_REPLY_RECEIVED_ERROR = "PING_CHECK_MGR_REPLY_RECEIVED_ERROR";
++extern const isc::log::MessageID PING_CHECK_MGR_REPLY_TIMEOUT_EXPIRED = "PING_CHECK_MGR_REPLY_TIMEOUT_EXPIRED";
++extern const isc::log::MessageID PING_CHECK_MGR_SEND_COMPLETED_ERROR = "PING_CHECK_MGR_SEND_COMPLETED_ERROR";
++extern const isc::log::MessageID PING_CHECK_MGR_STARTED = "PING_CHECK_MGR_STARTED";
++extern const isc::log::MessageID PING_CHECK_MGR_STARTED_SINGLE_THREADED = "PING_CHECK_MGR_STARTED_SINGLE_THREADED";
++extern const isc::log::MessageID PING_CHECK_MGR_START_PING_CHECK = "PING_CHECK_MGR_START_PING_CHECK";
++extern const isc::log::MessageID PING_CHECK_MGR_STOPPED = "PING_CHECK_MGR_STOPPED";
++extern const isc::log::MessageID PING_CHECK_MGR_STOPPING = "PING_CHECK_MGR_STOPPING";
++extern const isc::log::MessageID PING_CHECK_MGR_SUBNET_CONFIG_FAILED = "PING_CHECK_MGR_SUBNET_CONFIG_FAILED";
++extern const isc::log::MessageID PING_CHECK_PAUSE_FAILED = "PING_CHECK_PAUSE_FAILED";
++extern const isc::log::MessageID PING_CHECK_PAUSE_ILLEGAL = "PING_CHECK_PAUSE_ILLEGAL";
++extern const isc::log::MessageID PING_CHECK_PAUSE_PERMISSIONS_FAILED = "PING_CHECK_PAUSE_PERMISSIONS_FAILED";
++extern const isc::log::MessageID PING_CHECK_RESUME_FAILED = "PING_CHECK_RESUME_FAILED";
++extern const isc::log::MessageID PING_CHECK_UNEXPECTED_READ_ERROR = "PING_CHECK_UNEXPECTED_READ_ERROR";
++extern const isc::log::MessageID PING_CHECK_UNEXPECTED_WRITE_ERROR = "PING_CHECK_UNEXPECTED_WRITE_ERROR";
++extern const isc::log::MessageID PING_CHECK_UNLOAD = "PING_CHECK_UNLOAD";
++
++namespace {
++
++const char* values[] = {
++ "PING_CHECK_CB4_UPDATE_FAILED", "A subnet ping-check parameters failed to parse after being updated %1",
++ "PING_CHECK_CHANNEL_ECHO_REPLY_RECEIVED", "from address %1, id %2, sequence %3",
++ "PING_CHECK_CHANNEL_ECHO_REQUEST_SENT", "to address %1, id %2, sequence %3",
++ "PING_CHECK_CHANNEL_MALFORMED_PACKET_RECEIVED", "error occurred unpacking message %1, discarding it",
++ "PING_CHECK_CHANNEL_NETWORK_WRITE_ERROR", "occurred trying to ping %1, error %2",
++ "PING_CHECK_CHANNEL_SOCKET_CLOSED", "ICMP socket has been closed.",
++ "PING_CHECK_CHANNEL_SOCKET_CLOSE_ERROR", "an attempt to close the ICMP socket failed %1",
++ "PING_CHECK_CHANNEL_SOCKET_OPENED", "ICMP socket been opened successfully.",
++ "PING_CHECK_CHANNEL_SOCKET_READ_FAILED", "socket read completed with an error %1",
++ "PING_CHECK_CHANNEL_SOCKET_WRITE_FAILED", "socket write completed with an error %1",
++ "PING_CHECK_CHANNEL_STOP", "channel is stopping operations.",
++ "PING_CHECK_CHANNEL_WATCH_SOCKET_CLEAR_ERROR", "an attempt to clear the WatchSocket associated with",
++ "PING_CHECK_CHANNEL_WATCH_SOCKET_CLOSE_ERROR", "an attempt to close the WatchSocket associated with",
++ "PING_CHECK_DHCP4_SRV_CONFIGURED_FAILED", "dhcp4_srv_configured callout failed %1",
++ "PING_CHECK_DUPLICATE_CHECK", "Ping check already in progress for %1, initiated by %2",
++ "PING_CHECK_LEASE4_OFFER_FAILED", "lease4_offer callout failed for query %1, lease address %2, reason %3",
++ "PING_CHECK_LOAD_ERROR", "loading Ping Check hooks library failed %1",
++ "PING_CHECK_LOAD_OK", "Ping Check hooks library loaded successfully.",
++ "PING_CHECK_MGR_CHANNEL_DOWN", "Ping Channel has shutdown, ping checking will be skipped",
++ "PING_CHECK_MGR_LEASE_FREE_TO_USE", "address %1 is free to use for %2",
++ "PING_CHECK_MGR_NEXT_ECHO_SCHEDULED", "for %1, scheduling ECHO_REQUEST %2 of %3",
++ "PING_CHECK_MGR_RECEIVED_ECHO_REPLY", "from %1, id %2, sequence %3",
++ "PING_CHECK_MGR_RECEIVED_UNEXPECTED_ECHO_REPLY", "from %1, id %2, sequence %3 received after reply-timeout expired",
++ "PING_CHECK_MGR_RECEIVED_UNEXPECTED_UNREACHABLE_MSG", "for %1, id %2, sequence %3 received after reply-timeout expired",
++ "PING_CHECK_MGR_RECEIVED_UNREACHABLE_MSG", "for %1, id %2, sequence %3",
++ "PING_CHECK_MGR_REPLY_RECEIVED_ERROR", "an error occurred processing an ICMP reply message %1",
++ "PING_CHECK_MGR_REPLY_TIMEOUT_EXPIRED", "for %1, ECHO REQUEST %2 of %3, reply-timeout %4",
++ "PING_CHECK_MGR_SEND_COMPLETED_ERROR", "an error occurred in the send completion callback %1",
++ "PING_CHECK_MGR_STARTED", "ping channel operations are running, number of threads %1",
++ "PING_CHECK_MGR_STARTED_SINGLE_THREADED", "single-threaded ping channel operations are running",
++ "PING_CHECK_MGR_START_PING_CHECK", "for %1, initiated by %2",
++ "PING_CHECK_MGR_STOPPED", "channel operations have stopped",
++ "PING_CHECK_MGR_STOPPING", "ping channel operations are stopping",
++ "PING_CHECK_MGR_SUBNET_CONFIG_FAILED", "user-context for subnet id %1, contains invalid ping-check %2",
++ "PING_CHECK_PAUSE_FAILED", "Pausing ping channel operations failed %1",
++ "PING_CHECK_PAUSE_ILLEGAL", "Pausing ping channel operations not allowed %1",
++ "PING_CHECK_PAUSE_PERMISSIONS_FAILED", "Permissions check for ping-channel pause failed %1",
++ "PING_CHECK_RESUME_FAILED", "Resuming ping channel operations failed %1",
++ "PING_CHECK_UNEXPECTED_READ_ERROR", "could not start next socket read %1",
++ "PING_CHECK_UNEXPECTED_WRITE_ERROR", "could not start next socket write %1",
++ "PING_CHECK_UNLOAD", "Ping Check hooks library has been unloaded",
++ NULL
++};
++
++const isc::log::MessageInitializer initializer(values);
++
++} // Anonymous namespace
++
+diff --git a/src/hooks/dhcp/ping_check/ping_check_messages.h b/src/hooks/dhcp/ping_check/ping_check_messages.h
+new file mode 100644
+index 0000000000..9326c699e8
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_messages.h
+@@ -0,0 +1,50 @@
++// File created from src/hooks/dhcp/ping_check/ping_check_messages.mes
++
++#ifndef PING_CHECK_MESSAGES_H
++#define PING_CHECK_MESSAGES_H
++
++#include <log/message_types.h>
++
++extern const isc::log::MessageID PING_CHECK_CB4_UPDATE_FAILED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_ECHO_REPLY_RECEIVED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_ECHO_REQUEST_SENT;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_MALFORMED_PACKET_RECEIVED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_NETWORK_WRITE_ERROR;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_CLOSED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_CLOSE_ERROR;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_OPENED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_READ_FAILED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_SOCKET_WRITE_FAILED;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_STOP;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_WATCH_SOCKET_CLEAR_ERROR;
++extern const isc::log::MessageID PING_CHECK_CHANNEL_WATCH_SOCKET_CLOSE_ERROR;
++extern const isc::log::MessageID PING_CHECK_DHCP4_SRV_CONFIGURED_FAILED;
++extern const isc::log::MessageID PING_CHECK_DUPLICATE_CHECK;
++extern const isc::log::MessageID PING_CHECK_LEASE4_OFFER_FAILED;
++extern const isc::log::MessageID PING_CHECK_LOAD_ERROR;
++extern const isc::log::MessageID PING_CHECK_LOAD_OK;
++extern const isc::log::MessageID PING_CHECK_MGR_CHANNEL_DOWN;
++extern const isc::log::MessageID PING_CHECK_MGR_LEASE_FREE_TO_USE;
++extern const isc::log::MessageID PING_CHECK_MGR_NEXT_ECHO_SCHEDULED;
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_ECHO_REPLY;
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_UNEXPECTED_ECHO_REPLY;
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_UNEXPECTED_UNREACHABLE_MSG;
++extern const isc::log::MessageID PING_CHECK_MGR_RECEIVED_UNREACHABLE_MSG;
++extern const isc::log::MessageID PING_CHECK_MGR_REPLY_RECEIVED_ERROR;
++extern const isc::log::MessageID PING_CHECK_MGR_REPLY_TIMEOUT_EXPIRED;
++extern const isc::log::MessageID PING_CHECK_MGR_SEND_COMPLETED_ERROR;
++extern const isc::log::MessageID PING_CHECK_MGR_STARTED;
++extern const isc::log::MessageID PING_CHECK_MGR_STARTED_SINGLE_THREADED;
++extern const isc::log::MessageID PING_CHECK_MGR_START_PING_CHECK;
++extern const isc::log::MessageID PING_CHECK_MGR_STOPPED;
++extern const isc::log::MessageID PING_CHECK_MGR_STOPPING;
++extern const isc::log::MessageID PING_CHECK_MGR_SUBNET_CONFIG_FAILED;
++extern const isc::log::MessageID PING_CHECK_PAUSE_FAILED;
++extern const isc::log::MessageID PING_CHECK_PAUSE_ILLEGAL;
++extern const isc::log::MessageID PING_CHECK_PAUSE_PERMISSIONS_FAILED;
++extern const isc::log::MessageID PING_CHECK_RESUME_FAILED;
++extern const isc::log::MessageID PING_CHECK_UNEXPECTED_READ_ERROR;
++extern const isc::log::MessageID PING_CHECK_UNEXPECTED_WRITE_ERROR;
++extern const isc::log::MessageID PING_CHECK_UNLOAD;
++
++#endif // PING_CHECK_MESSAGES_H
+diff --git a/src/hooks/dhcp/ping_check/ping_check_messages.mes b/src/hooks/dhcp/ping_check/ping_check_messages.mes
+new file mode 100644
+index 0000000000..21d407bedf
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_messages.mes
+@@ -0,0 +1,229 @@
++# Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++#
++# This Source Code Form is subject to the terms of the Mozilla Public
++# License, v. 2.0. If a copy of the MPL was not distributed with this
++# file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++% PING_CHECK_CB4_UPDATE_FAILED A subnet ping-check parameters failed to parse after being updated %1
++This error message is emitted when an error occurs trying to parse a subnet
++ping-check parameters after the subnet was updated via configuration backend.
++This implies one or more of the parameters is invalid and must be corrected.
++
++% PING_CHECK_CHANNEL_ECHO_REPLY_RECEIVED from address %1, id %2, sequence %3
++Logged at debug log level 50.
++This debug message is issued when an ECHO REPLY has been received on
++the ping channel's ICMP socket.
++
++% PING_CHECK_CHANNEL_ECHO_REQUEST_SENT to address %1, id %2, sequence %3
++Logged at debug log level 50.
++This debug message is issued when an ECHO REQUEST has been written to the
++ping channel's ICMP socket.
++
++% PING_CHECK_CHANNEL_MALFORMED_PACKET_RECEIVED error occurred unpacking message %1, discarding it
++Logged at debug log level 40.
++This debug message is emitted when an ICMP packet has been received
++that could not be unpacked.
++
++% PING_CHECK_CHANNEL_NETWORK_WRITE_ERROR occurred trying to ping %1, error %2
++This error message occurs when an asynchronous write on the ICMP socket
++failed trying to send on the ping target's network. This may mean an interface
++is down or there is a configuration error. The lease address to ping and the
++type of the error are provided in the arguments.
++
++% PING_CHECK_CHANNEL_SOCKET_CLOSED ICMP socket has been closed.
++Logged at debug log level 40.
++This debug message is emitted when the ICMP socket for carrying out
++ping checks has been closed.
++
++% PING_CHECK_CHANNEL_SOCKET_CLOSE_ERROR an attempt to close the ICMP socket failed %1
++This error message is emitted when an unexpected error occurred
++while closing the ping check ICMP socket. The error detail is
++provided as an argument of the log message.
++
++% PING_CHECK_CHANNEL_SOCKET_OPENED ICMP socket been opened successfully.
++Logged at debug log level 40.
++This debug message is emitted when the ICMP socket for carrying out
++ping checks has been successfully opened.
++
++% PING_CHECK_CHANNEL_SOCKET_READ_FAILED socket read completed with an error %1
++This error message occurs when an asynchronous read on the ICMP socket
++failed. The details of the error are provided as an argument of the log
++message.
++
++% PING_CHECK_CHANNEL_SOCKET_WRITE_FAILED socket write completed with an error %1
++This error message occurs when an asynchronous write on the ICMP socket
++failed. The details of the error are provided as an argument of the log
++message.
++
++% PING_CHECK_CHANNEL_STOP channel is stopping operations.
++Logged at debug log level 40.
++This debug message indicates that the channel is stopping operations and
++closing the ICMP socket. The reason for stopping should be apparent in
++preceding log messages.
++
++% PING_CHECK_CHANNEL_WATCH_SOCKET_CLEAR_ERROR an attempt to clear the WatchSocket associated with
++the single-threaded ping-channel failed %1
++This error message is emitted when an unexpected error occurred
++while clearing the ready marker of the WatchSocket associated with
++the ping check channel. This can only occur when running in
++single-threaded mode. The error detail is provided as an argument
++of the log message.
++
++% PING_CHECK_CHANNEL_WATCH_SOCKET_CLOSE_ERROR an attempt to close the WatchSocket associated with
++the single-threaded ping-channel failed %1
++This error message is emitted when an unexpected error occurred
++while closing the WatchSocket associated with the ping check channel.
++This can only occur when running in single-threaded mode.
++The error detail is provided as an argument of the log message.
++
++% PING_CHECK_DHCP4_SRV_CONFIGURED_FAILED dhcp4_srv_configured callout failed %1
++This error message indicates an error during the Ping Check hook
++library dhcp4_srv_configured callout. The details of the error are
++provided as argument of the log message.
++
++% PING_CHECK_DUPLICATE_CHECK Ping check already in progress for %1, initiated by %2
++Logged at debug log level 40.
++This debug message is emitted when a duplicate request to test an address
++is received. When this occurs the duplicate test will be skipped and
++the associated DHCPOFFER will be dropped.
++
++% PING_CHECK_LEASE4_OFFER_FAILED lease4_offer callout failed for query %1, lease address %2, reason %3
++This error message indicates an error during the Ping Check hook
++library lease4_offer callout. The details of the error are
++provided as argument of the log message.
++
++% PING_CHECK_LOAD_ERROR loading Ping Check hooks library failed %1
++This error message indicates an error during loading the Ping Check
++hooks library. The details of the error are provided as argument of
++the log message.
++
++% PING_CHECK_LOAD_OK Ping Check hooks library loaded successfully.
++This info message indicates that the Ping Check hooks library has
++been loaded successfully.
++
++% PING_CHECK_MGR_CHANNEL_DOWN Ping Channel has shutdown, ping checking will be skipped
++This error message is emitted when the underlying ICMP channel
++has stopped due to an unrecoverable error. DHCP service may continue
++to function but without performing ping checks. Prior log messages should
++provide details.
++
++% PING_CHECK_MGR_LEASE_FREE_TO_USE address %1 is free to use for %2
++Logged at debug log level 40.
++This debug message is emitted when ping check has deemed an
++address is free to use. The log arguments detail the lease address
++checked and the query which initiated the check.
++
++% PING_CHECK_MGR_NEXT_ECHO_SCHEDULED for %1, scheduling ECHO_REQUEST %2 of %3
++Logged at debug log level 50.
++This debug message is emitted when the minimum number of ECHO REQUESTs
++is greater than 1 and the next ECHO REQUEST for a given lease address has
++been scheduled.
++
++% PING_CHECK_MGR_RECEIVED_ECHO_REPLY from %1, id %2, sequence %3
++Logged at debug log level 40.
++This debug message is emitted when an ECHO REPLY message has been received.
++The log argument details the source IP address, id, and sequence number of
++the ECHO REPLY.
++
++% PING_CHECK_MGR_RECEIVED_UNEXPECTED_ECHO_REPLY from %1, id %2, sequence %3 received after reply-timeout expired
++Logged at debug log level 50.
++This debug message is emitted when an ECHO REPLY has been received after the
++reply-timeout has expired and is no longer of interest. This may be an errant
++ECHO REPLY or it may indicate that the reply-timeout value is too short. The
++log argument details the source IP address, id, and sequence number of the reply.
++
++% PING_CHECK_MGR_RECEIVED_UNEXPECTED_UNREACHABLE_MSG for %1, id %2, sequence %3 received after reply-timeout expired
++Logged at debug log level 50.
++This debug message is emitted when an UNREACHABLE message has been received
++after the reply-timeout has expired and is no longer of interest. This may
++be an errant message or it may indicate that the reply-timeout value is
++too short.
++
++% PING_CHECK_MGR_RECEIVED_UNREACHABLE_MSG for %1, id %2, sequence %3
++Logged at debug log level 50.
++This debug message is emitted when an UNREACHABLE message has been received.
++The log argument details the target IP address, id, and sequence number from
++the embedded ECHO REQUEST.
++
++% PING_CHECK_MGR_REPLY_RECEIVED_ERROR an error occurred processing an ICMP reply message %1
++This debug message is emitted when an error occurred while processing an inbound
++ICMP message. The log argument describes the specific error.
++
++% PING_CHECK_MGR_REPLY_TIMEOUT_EXPIRED for %1, ECHO REQUEST %2 of %3, reply-timeout %4
++Logged at debug log level 50.
++This debug message is emitted when no reply is received to an
++ECHO REQUEST before the configured timeout value, `reply-timeout`
++was reached. The log arguments provides details.
++
++% PING_CHECK_MGR_SEND_COMPLETED_ERROR an error occurred in the send completion callback %1
++This error message is emitted when an unexpected error occurred after the completion of
++a successful write to the PingChannel socket. The log argument describes the
++specific error.
++
++% PING_CHECK_MGR_STARTED ping channel operations are running, number of threads %1
++This message is emitted when the ping check channel has been opened
++and is ready to process requests. The log argument includes the number of
++threads in the channel's thread pool.
++
++% PING_CHECK_MGR_STARTED_SINGLE_THREADED single-threaded ping channel operations are running
++This message is emitted when the ping check channel has been opened
++and is ready to process requests in single-threaded mode.
++
++% PING_CHECK_MGR_START_PING_CHECK for %1, initiated by %2
++Logged at debug log level 40.
++This debug message is emitted when a ping check for an address
++has been initiated. The log arguments detail the lease address to
++ping and the query which initiated the check.
++
++% PING_CHECK_MGR_STOPPED channel operations have stopped
++This message is emitted when the ping check channel operations
++have been stopped.
++
++% PING_CHECK_MGR_STOPPING ping channel operations are stopping
++Logged at debug log level 40.
++This debug message is emitted when the ping check channel is stopping
++operations, typically due to configuration event or server shutdown.
++
++% PING_CHECK_MGR_SUBNET_CONFIG_FAILED user-context for subnet id %1, contains invalid ping-check %2
++This error message indicates that a subnet was updated via subnet commands
++and its 'user-context' contains invalid 'ping-check' configuration. The
++server will log the error once and then use global ping-check parameters
++for the subnet until the configuration is corrected.
++
++% PING_CHECK_PAUSE_FAILED Pausing ping channel operations failed %1
++This error message is emitted when an unexpected error occurred while
++attempting to pause the ping channel's thread pool. This error is highly
++unlikely and indicates a programmatic issue that should be reported as
++defect.
++
++% PING_CHECK_PAUSE_ILLEGAL Pausing ping channel operations not allowed %1
++This error message is emitted when attempting to pause the ping channel's
++thread pool. This indicates that a channel thread attempted to use a critical
++section which would result in a dead-lock. This error is highly unlikely
++and indicates a programmatic issue that should be reported as a defect.
++
++% PING_CHECK_PAUSE_PERMISSIONS_FAILED Permissions check for ping-channel pause failed %1
++This error message is emitted when an unexpected error occurred while
++validating an attempt to pause the ping channel's thread pool. This error
++is highly unlikely and indicates a programmatic issue that should be
++reported as a defect.
++
++% PING_CHECK_RESUME_FAILED Resuming ping channel operations failed %1
++This error message is emitted when an unexpected error occurred while
++attempting to resume operation of the ping channel's thread pool. This
++error is highly unlikely and indicates a programmatic issue that should
++be reported as defect.
++
++% PING_CHECK_UNEXPECTED_READ_ERROR could not start next socket read %1
++This error message occurs when initiating an asynchronous read on the ICMP
++socket failed in an unexpected fashion. The details of the error are provided
++as an argument of the log message.
++
++% PING_CHECK_UNEXPECTED_WRITE_ERROR could not start next socket write %1
++This error message occurs when initiating an asynchronous write on the ICMP
++socket failed in an unexpected fashion. The details of the error are provided
++as an argument of the log message.
++
++% PING_CHECK_UNLOAD Ping Check hooks library has been unloaded
++This info message indicates that the Ping Check hooks library has been
++unloaded.
+diff --git a/src/hooks/dhcp/ping_check/ping_check_mgr.cc b/src/hooks/dhcp/ping_check/ping_check_mgr.cc
+new file mode 100644
+index 0000000000..cb4f2ee1dc
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_mgr.cc
+@@ -0,0 +1,798 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <ping_check_mgr.h>
++#include <ping_check_log.h>
++#include <dhcpsrv/cfgmgr.h>
++#include <hooks/hooks_manager.h>
++#include <util/multi_threading_mgr.h>
++#include <util/chrono_time_utils.h>
++
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::data;
++using namespace isc::hooks;
++using namespace isc::util;
++using namespace std;
++using namespace std::chrono;
++
++namespace ph = std::placeholders;
++
++namespace isc {
++namespace ping_check {
++
++PingCheckMgr::PingCheckMgr()
++ : io_service_(new IOService()), thread_pool_(),
++ store_(new PingContextStore()),
++ channel_(),
++ config_cache_(new ConfigCache()),
++ mutex_(new mutex()),
++ suspended_(false) {
++}
++
++PingCheckMgr::PingCheckMgr(uint32_t num_threads,
++ uint32_t min_echos,
++ uint32_t reply_timeout)
++ : io_service_(new IOService()), thread_pool_(),
++ store_(new PingContextStore()),
++ channel_(),
++ config_cache_(new ConfigCache()),
++ mutex_(new mutex()),
++ suspended_(false) {
++ PingCheckConfigPtr config(new PingCheckConfig());
++ config->setMinPingRequests(min_echos);
++ config->setReplyTimeout(reply_timeout);
++ config->setPingChannelThreads(num_threads);
++ config_cache_->setGlobalConfig(config);
++}
++
++PingCheckMgr::~PingCheckMgr() {
++ stop();
++}
++
++void
++PingCheckMgr::configure(ConstElementPtr params) {
++ if (!params) {
++ isc_throw(dhcp::DhcpConfigError, "params must not be null");
++ return;
++ }
++
++ if (params->getType() != Element::map) {
++ isc_throw(dhcp::DhcpConfigError, "params must be an Element::map");
++ return;
++ }
++
++ PingCheckConfigPtr config(new PingCheckConfig());
++ config->parse(params);
++ config_cache_->setGlobalConfig(config);
++}
++
++void
++PingCheckMgr::updateSubnetConfig(SrvConfigPtr server_config) {
++ // Iterate over subnets and cache configurations for each.
++ ConfigCachePtr local_cache(new ConfigCache());
++ local_cache->setGlobalConfig(config_cache_->getGlobalConfig());
++ auto const& subnets = server_config->getCfgSubnets4()->getAll();
++ for (auto const& subnet : (*subnets)) {
++ auto user_context = subnet->getContext();
++ local_cache->parseAndCacheConfig(subnet->getID(), user_context);
++ }
++
++ // No errors above, replace the existing cache.
++ config_cache_ = local_cache;
++}
++
++const PingCheckConfigPtr
++PingCheckMgr::getGlobalConfig() const {
++ return (config_cache_->getGlobalConfig());
++}
++
++const PingCheckConfigPtr
++PingCheckMgr::getScopedConfig(Lease4Ptr& lease) {
++ if (!lease) {
++ // This really shouldn't happen.
++ isc_throw(InvalidOperation, "PingCheckConfig::getScopedConfig() - lease cannot be empty");
++ }
++
++ auto subnet_id = lease->subnet_id_;
++
++ // If the cache is stale, update it. We do this to catch subnets that have been updated
++ // via subnet_cmds.
++ auto server_config = CfgMgr::instance().getCurrentCfg();
++ auto const& subnet = server_config->getCfgSubnets4()->getBySubnetId(subnet_id);
++ if (!subnet) {
++ // This really shouldn't happen.
++ isc_throw(InvalidOperation, "PingCheckMgr::getScopedConfig() - "
++ "no subnet for id: " << subnet_id
++ << ", for lease address: " << lease->addr_);
++ }
++
++ // If cache is stale flush it and we'll lazy init subnets as we see them.
++ if (subnet->getModificationTime() > config_cache_->getLastFlushTime()) {
++ config_cache_->flush();
++ }
++
++ // If we don't find an entry for this subnet then we haven't seen it
++ // before so parse and cache it. If the subnet doesn't specify ping-check
++ // we cache an empty entry.
++ PingCheckConfigPtr config;
++ if (!config_cache_->findConfig(subnet_id, config)) {
++ auto user_context = subnet->getContext();
++ try {
++ config = config_cache_->parseAndCacheConfig(subnet_id, user_context);
++ } catch (const std::exception& ex) {
++ // We emit and error and then cache an empty entry. This causes us
++ // to log the error once and then default to global settings afterward.
++ // This avoids us relentlessly logging and failing. Remember this
++ // is happening because a subnet was updated with an invalid context via
++ // subnet-cmd.
++ LOG_ERROR(ping_check_logger, PING_CHECK_MGR_SUBNET_CONFIG_FAILED)
++ .arg(subnet_id)
++ .arg(ex.what());
++ config_cache_->cacheConfig(subnet_id, config);
++ }
++ }
++
++ // Return subnet's ping-check config if it specified one, otherwise
++ // return the global config.
++ return (config ? config : config_cache_->getGlobalConfig());
++}
++
++void
++PingCheckMgr::startPing(dhcp::Lease4Ptr& lease, dhcp::Pkt4Ptr& query, hooks::ParkingLotHandlePtr& parking_lot,
++ const PingCheckConfigPtr& config) {
++ if (checkSuspended()) {
++ // Server should not be submitting requests.
++ isc_throw(InvalidOperation, "PingCheckMgr::startPing() - DHCP service is suspended!");
++ }
++
++ if (!channel_ || !channel_->isOpen()) {
++ isc_throw(InvalidOperation, "PingCheckMgr::startPing() - channel isn't open");
++ }
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC,
++ PING_CHECK_MGR_START_PING_CHECK)
++ .arg(lease->addr_)
++ .arg(query->getLabel());
++
++ // Adds a context to the store
++ store_->addContext(lease, query, config->getMinPingRequests(),
++ config->getReplyTimeout(), parking_lot);
++
++ // Posts a call to channel's startSend() and startRead(). This will kick-start perpetual
++ // write and read cycles if they are not already running.
++ if (channel_) {
++ channel_->startSend();
++ channel_->startRead();
++ }
++}
++
++void
++PingCheckMgr::startPing(dhcp::Lease4Ptr& lease, dhcp::Pkt4Ptr& query, hooks::ParkingLotHandlePtr& parking_lot) {
++ startPing(lease, query, parking_lot, getGlobalConfig());
++}
++
++bool
++PingCheckMgr::nextToSend(IOAddress& next) {
++ if (checkSuspended()) {
++ return (false);
++ }
++
++ PingContextPtr context = store_->getNextToSend();
++ if (!context) {
++ return (false);
++ }
++
++ next = context->getTarget();
++ // Transition to sending.
++ context->setState(PingContext::SENDING);
++ store_->updateContext(context);
++
++ return (true);
++}
++
++void
++PingCheckMgr::sendCompleted(const ICMPMsgPtr& echo, bool send_failed) {
++ if (checkSuspended()) {
++ return;
++ }
++
++ try {
++ if (!echo) {
++ isc_throw(BadValue, "PingCheckMgr::sendCompleted() - echo is empty");
++ }
++
++ if (echo->getType() != ICMPMsg::ECHO_REQUEST) {
++ isc_throw(BadValue, "PingCheckMgr::sendCompleted() - message type: "
++ << echo->getType() << " is not an ECHO_REQUEST");
++ }
++
++ // Update the context associated with this ECHO_REQUEST.
++ PingContextPtr context = store_->getContextByAddress(echo->getDestination());
++ if (!context) {
++ isc_throw(Unexpected, "PingCheckMgr::sendCompleted() "
++ " no context found for: " << echo->getDestination());
++ }
++
++ if (send_failed) {
++ // Recoverable error occurred which means we can't get to the target's
++ // network (interface down?). Treat this the same as TARGET UNREACHABLE.
++ finishFree(context);
++ } else {
++ // Transition the context to WAITING_FOR_REPLY.
++ context->beginWaitingForReply();
++ store_->updateContext(context);
++ }
++
++ // Update the expiration timer if necessary.
++ setNextExpiration();
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_MGR_SEND_COMPLETED_ERROR)
++ .arg(ex.what());
++ }
++}
++
++void
++PingCheckMgr::replyReceived(const ICMPMsgPtr& reply) {
++ if (checkSuspended()) {
++ return;
++ }
++
++ try {
++ if (!reply) {
++ isc_throw(BadValue, "PingCheckMgr::replyReceived() - echo is empty");
++ }
++
++ switch (reply->getType()) {
++ case ICMPMsg::ECHO_REPLY:
++ handleEchoReply(reply);
++ break;
++ case ICMPMsg::TARGET_UNREACHABLE:
++ // Extract embedded ECHO REQUEST
++ handleTargetUnreachable(reply);
++ break;
++ default:
++ // Ignore anything else.
++ return;
++ }
++
++ setNextExpiration();
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_MGR_REPLY_RECEIVED_ERROR)
++ .arg(ex.what());
++ }
++}
++
++void
++PingCheckMgr::handleEchoReply(const ICMPMsgPtr& echo_reply) {
++ // Update the context associated with this ECHO_REQUEST.
++ PingContextPtr context = store_->getContextByAddress(echo_reply->getSource());
++ if (!context) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_MGR_RECEIVED_UNEXPECTED_ECHO_REPLY)
++ .arg(echo_reply->getSource())
++ .arg(echo_reply->getId())
++ .arg(echo_reply->getSequence());
++ return;
++ }
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC,
++ PING_CHECK_MGR_RECEIVED_ECHO_REPLY)
++ .arg(echo_reply->getSource())
++ .arg(echo_reply->getId())
++ .arg(echo_reply->getSequence());
++
++ context->setState(PingContext::TARGET_IN_USE);
++ store_->updateContext(context);
++
++ // If parking is employed, unpark the query from the parking lot,
++ // and set the offer_address_in_use argument in the callout handle
++ // to true, indicating to the server that the lease should be declined
++ // and the DHCPOFFER discarded.
++ auto parking_lot = context->getParkingLot();
++ if (parking_lot) {
++ auto query = context->getQuery();
++ auto callout_handle = query->getCalloutHandle();
++ callout_handle->setArgument("offer_address_in_use", true);
++ parking_lot->unpark(query);
++ }
++
++ // Remove the context from the store.
++ store_->deleteContext(context);
++}
++
++void
++PingCheckMgr::handleTargetUnreachable(const ICMPMsgPtr& unreachable) {
++ // Unpack the embedded ECHO REQUEST.
++ ICMPMsgPtr embedded_echo;
++ auto payload = unreachable->getPayload();
++ embedded_echo = ICMPMsg::unpack(payload.data(), payload.size());
++
++ // Fetch the context associated with the ECHO_REQUEST.
++ PingContextPtr context = store_->getContextByAddress(embedded_echo->getDestination());
++ if (!context) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_MGR_RECEIVED_UNEXPECTED_UNREACHABLE_MSG)
++ .arg(embedded_echo->getDestination())
++ .arg(embedded_echo->getId())
++ .arg(embedded_echo->getSequence());
++ return;
++ }
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_MGR_RECEIVED_UNREACHABLE_MSG)
++ .arg(embedded_echo->getDestination())
++ .arg(embedded_echo->getId())
++ .arg(embedded_echo->getSequence());
++
++ // Render the address usable.
++ finishFree(context);
++}
++
++void
++PingCheckMgr::finishFree(const PingContextPtr& context) {
++ context->setState(PingContext::TARGET_FREE);
++ store_->updateContext(context);
++
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC,
++ PING_CHECK_MGR_LEASE_FREE_TO_USE)
++ .arg(context->getTarget())
++ .arg(context->getQuery()->getLabel());
++
++ // If parking is employed, unpark the query from the parking lot,
++ // and set the offer_address_in_use argument in the callout handle
++ // to false, indicating to the server that the lease is available
++ // and the DHCPOFFER should be sent to the client.
++ auto parking_lot = context->getParkingLot();
++ if (parking_lot) {
++ auto query = context->getQuery();
++ auto callout_handle = query->getCalloutHandle();
++ callout_handle->setArgument("offer_address_in_use", false);
++ parking_lot->unpark(context->getQuery());
++ }
++
++ // Remove the context from the store.
++ store_->deleteContext(context);
++}
++
++void
++PingCheckMgr::channelShutdown() {
++ LOG_ERROR(ping_check_logger, PING_CHECK_MGR_CHANNEL_DOWN);
++ if (io_service_) {
++ // As this is a callback that may be invoked by a channel
++ // thread we post a call to stopService() rather than call
++ // it directly.
++ io_service_->post([&]() { stopService(true); });
++ }
++}
++
++size_t
++PingCheckMgr::processExpiredSince(const TimeStamp& since /* = PingContext::now() */) {
++ auto expired_pings = store_->getExpiredSince(since);
++ size_t more_pings = 0;
++ for (auto const& context : *(expired_pings)) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_MGR_REPLY_TIMEOUT_EXPIRED)
++ .arg(context->getTarget())
++ .arg(context->getEchosSent())
++ .arg(context->getMinEchos())
++ .arg(context->getReplyTimeout());
++
++ if (context->getEchosSent() < context->getMinEchos()) {
++ doNextEcho(context);
++ ++more_pings;
++ } else {
++ finishFree(context);
++ }
++ }
++
++ return (more_pings);
++}
++
++void
++PingCheckMgr::doNextEcho(const PingContextPtr& context) {
++ // Position to do another ping by re-entering WAITING_TO_SEND
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_DETAIL,
++ PING_CHECK_MGR_NEXT_ECHO_SCHEDULED)
++ .arg(context->getTarget())
++ .arg(context->getEchosSent() + 1)
++ .arg(context->getMinEchos());
++
++ context->beginWaitingToSend();
++ store_->updateContext(context);
++}
++
++TimeStamp
++PingCheckMgr::getNextExpiry() {
++ MultiThreadingLock lock(*mutex_);
++ return (next_expiry_);
++}
++
++void
++PingCheckMgr::setNextExpiration() {
++ MultiThreadingLock lock(*mutex_);
++ if (checkSuspendedInternal()) {
++ return;
++ }
++
++ setNextExpirationInternal();
++}
++
++void
++PingCheckMgr::setNextExpirationInternal() {
++ // Find the context that expires soonest.
++ PingContextPtr context = store_->getExpiresNext();
++ if (context) {
++ // if the context's expiry is sooner than current expiry
++ // reschedule expiration timer
++ if ((next_expiry_ == PingContext::EMPTY_TIME()) ||
++ (context->getNextExpiry() < next_expiry_)) {
++ auto now = PingContext::now();
++ auto timeout = duration_cast<milliseconds>(context->getNextExpiry() - now);
++ /// @todo For now we'll impose a 2 ms minimum to avoid thrashing the timer.
++ timeout = (timeout > milliseconds(2) ? timeout : milliseconds(2));
++ next_expiry_ = now + timeout;
++ expiration_timer_->setup(std::bind(&PingCheckMgr::expirationTimedOut,
++ shared_from_this()),
++ timeout.count(), IntervalTimer::ONE_SHOT);
++ }
++ } else {
++ // Nothing waiting to expire. Cancel the timer.
++ cancelExpirationTimerInternal();
++ }
++}
++
++void
++PingCheckMgr::cancelExpirationTimer() {
++ MultiThreadingLock lock(*mutex_);
++ cancelExpirationTimerInternal();
++}
++
++void
++PingCheckMgr::cancelExpirationTimerInternal() {
++ if (expiration_timer_) {
++ expiration_timer_->cancel();
++ next_expiry_ = PingContext::EMPTY_TIME();
++ }
++}
++
++void
++PingCheckMgr::expirationTimedOut() {
++ MultiThreadingLock lock(*mutex_);
++ if (checkSuspendedInternal()) {
++ return;
++ }
++
++ // Process everything that has expired since current time.
++ auto more_pings = processExpiredSince();
++
++ // Update the expiration timer.
++ next_expiry_ = PingContext::EMPTY_TIME();
++ setNextExpirationInternal();
++
++ // In the event there was nothing left to process when timed out,
++ // poke the channel to make sure things are moving.
++ if (more_pings && channel_) {
++ channel_->startSend();
++ channel_->startRead();
++ }
++}
++
++CalloutHandle::CalloutNextStep
++PingCheckMgr::shouldPing(Lease4Ptr& lease, Pkt4Ptr& query,
++ Lease4Ptr& old_lease,
++ const PingCheckConfigPtr& config) {
++
++ // If ping-check is disabled or the channel isn't open,
++ // drop the query from parking and release the offer to the client.
++ if (!config->getEnablePingCheck() || !channel_ || !channel_->isOpen()) {
++ return (CalloutHandle::CalloutNextStep::NEXT_STEP_CONTINUE);
++ }
++
++ // If we're already running check on this address then drop the
++ // query from parking and discard the offer.
++ if (store_->getContextByAddress(lease->addr_)) {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC,
++ PING_CHECK_DUPLICATE_CHECK)
++ .arg(lease->addr_)
++ .arg(query->getLabel());
++ return (CalloutHandle::CalloutNextStep::NEXT_STEP_DROP);
++ }
++
++ // If there's a previous lease that belongs to this client and
++ // it was touched by the client less than ping-cltt-secs ago then
++ // no check is needed. Drop the query from parking and release the
++ // offer to the client,
++ if (old_lease && (old_lease->addr_ == lease->addr_)) {
++ if (old_lease->belongsToClient(lease->hwaddr_, lease->client_id_)) {
++ auto now = time(0);
++ if ((now - old_lease->cltt_) < config->getPingClttSecs()) {
++ return (CalloutHandle::CalloutNextStep::NEXT_STEP_CONTINUE);
++ }
++ }
++ }
++
++ // Leave it parked and do the ping check.
++ return (CalloutHandle::CalloutNextStep::NEXT_STEP_PARK);
++}
++
++void
++PingCheckMgr::startService(NetworkStatePtr network_state) {
++ network_state_ = network_state;
++ io_service_->post([&]() { start(); });
++}
++
++bool
++PingCheckMgr::checkSuspended() {
++ MultiThreadingLock lock(*mutex_);
++ return (checkSuspendedInternal());
++}
++
++bool
++PingCheckMgr::checkSuspendedInternal() {
++ if (!network_state_ || network_state_->isServiceEnabled()) {
++ suspended_ = false;
++ } else {
++ if (!suspended_) {
++ suspended_ = true;
++
++ // Flush the context store, dropping parked queries.
++ flush(false);
++ }
++ }
++
++ return (suspended_);
++}
++
++void
++PingCheckMgr::stopService(bool finish_free) {
++ // Pause the thread pool while we flush the store.
++ pause();
++
++ // Flush the context store. If finish_free is true
++ // the flush will treat the remaining context lease
++ // addresses as free to use and unpark them. This
++ // will cause the server to send out the associated
++ // OFFERs. If it's false we just drop them from
++ // the parking lot.
++ flush(finish_free);
++
++ // Stop the thread pool, destroy the channel and the like.
++ stop();
++}
++
++void
++PingCheckMgr::start() {
++ if (MultiThreadingMgr::instance().isTestMode()) {
++ return;
++ }
++ if (!MultiThreadingMgr::instance().getMode()) {
++ startSingleThreaded();
++ return;
++ }
++
++ // We must be in multi-threading mode.
++ // Add critical section callbacks.
++ MultiThreadingMgr::instance().addCriticalSectionCallbacks("PING_CHECK",
++ std::bind(&PingCheckMgr::checkPermissions, this),
++ std::bind(&PingCheckMgr::pause, this),
++ std::bind(&PingCheckMgr::resume, this));
++
++ // Punt if we're already started.
++ if (thread_pool_ && thread_pool_->isStopped()) {
++ isc_throw(InvalidOperation, "PingCheckMgr already started!");
++ }
++
++ try {
++ auto config = config_cache_->getGlobalConfig();
++ auto use_threads = (config->getPingChannelThreads() ? config->getPingChannelThreads()
++ : MultiThreadingMgr::instance().getThreadPoolSize());
++ thread_pool_.reset(new IoServiceThreadPool(IOServicePtr(), use_threads, true));
++ IOServicePtr pool_ios = thread_pool_->getIOService();
++ channel_ = createChannel(pool_ios);
++ channel_->open();
++ expiration_timer_.reset(new IntervalTimer(pool_ios));
++ thread_pool_->run();
++ LOG_INFO(ping_check_logger, PING_CHECK_MGR_STARTED)
++ .arg(use_threads);
++ } catch (const std::exception& ex) {
++ channel_.reset();
++ thread_pool_.reset();
++ isc_throw(Unexpected, "PingCheckMgr::start failed:" << ex.what());
++ }
++}
++
++void
++PingCheckMgr::startSingleThreaded() {
++ try {
++ auto config = config_cache_->getGlobalConfig();
++ channel_ = createChannel(io_service_);
++ channel_->open();
++ expiration_timer_.reset(new IntervalTimer(io_service_));
++ LOG_INFO(ping_check_logger, PING_CHECK_MGR_STARTED_SINGLE_THREADED);
++ } catch (const std::exception& ex) {
++ channel_.reset();
++ isc_throw(Unexpected, "PingCheckMgr::startSingleThreaded() failed:" << ex.what());
++ }
++}
++
++PingChannelPtr
++PingCheckMgr::createChannel(IOServicePtr io_service) {
++ return (PingChannelPtr(new PingChannel(io_service,
++ std::bind(&PingCheckMgr::nextToSend,
++ this, ph::_1),
++ std::bind(&PingCheckMgr::sendCompleted,
++ this, ph::_1, ph::_2),
++ std::bind(&PingCheckMgr::replyReceived,
++ this, ph::_1),
++ std::bind(&PingCheckMgr::channelShutdown,
++ this))));
++}
++
++void
++PingCheckMgr::checkPermissions() {
++ // Since this function is used as CS callback all exceptions must be
++ // suppressed, unlikely though they may be.
++ try {
++ if (thread_pool_) {
++ thread_pool_->checkPausePermissions();
++ }
++ } catch (const isc::MultiThreadingInvalidOperation& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_PAUSE_ILLEGAL)
++ .arg(ex.what());
++ // The exception needs to be propagated to the caller of the
++ // @ref MultiThreadingCriticalSection constructor.
++ throw;
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_PAUSE_PERMISSIONS_FAILED)
++ .arg(ex.what());
++ }
++}
++
++void
++PingCheckMgr::pause() {
++ if (!MultiThreadingMgr::instance().getMode()) {
++ return;
++ }
++
++ // Since this function is used as CS callback all exceptions must be
++ // suppressed, unlikely though they may be.
++ try {
++ // Cancel the expiration timer.
++ cancelExpirationTimer();
++
++ // Pause the thread pool.
++ if (thread_pool_) {
++ thread_pool_->pause();
++ }
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_PAUSE_FAILED)
++ .arg(ex.what());
++ }
++}
++
++void
++PingCheckMgr::resume() {
++ if (!MultiThreadingMgr::instance().getMode()) {
++ return;
++ }
++
++ // Since this function is used as CS callback all exceptions must be
++ // suppressed, unlikely though they may be.
++ try {
++ if (thread_pool_) {
++ thread_pool_->run();
++ }
++
++ // Restore the expiration timer.
++ setNextExpiration();
++ } catch (const std::exception& ex) {
++ LOG_ERROR(ping_check_logger, PING_CHECK_RESUME_FAILED)
++ .arg(ex.what());
++ }
++}
++
++void
++PingCheckMgr::stop() {
++ LOG_DEBUG(ping_check_logger, isc::log::DBGLVL_TRACE_BASIC, PING_CHECK_MGR_STOPPING);
++
++ // Cancel the expiration timer.
++ cancelExpirationTimer();
++
++ if (channel_) {
++ channel_->close();
++ }
++
++ if (thread_pool_) {
++ // Remove critical section callbacks.
++ MultiThreadingMgr::instance().removeCriticalSectionCallbacks("PING_CHECK");
++
++ // Stop the thread pool.
++ thread_pool_->stop();
++
++ thread_pool_->getIOService()->stopAndPoll();
++
++ // Ditch the thread_pool
++ thread_pool_.reset();
++ }
++ // Ditch the timer. It must be destroyed before the thread pool because in
++ // MT it holds a reference to the pool's IOService.
++ expiration_timer_.reset();
++
++ // Get rid of the channel.
++ channel_.reset();
++
++ if (io_service_) {
++ io_service_->stopAndPoll();
++ }
++
++ LOG_INFO(ping_check_logger, PING_CHECK_MGR_STOPPED);
++}
++
++bool
++PingCheckMgr::isRunning() {
++ // In ST mode, running is an open channel.
++ if (!MultiThreadingMgr::instance().getMode()) {
++ return (channel_ && channel_->isOpen());
++ }
++
++ if (thread_pool_) {
++ return (thread_pool_->isRunning());
++ }
++
++ return (false);
++}
++
++bool
++PingCheckMgr::isStopped() {
++ // In ST mode, stopped equates to no channel.
++ if (!MultiThreadingMgr::instance().getMode()) {
++ return (!channel_);
++ }
++
++ if (thread_pool_) {
++ return (thread_pool_->isStopped());
++ }
++
++ return (true);
++}
++
++bool
++PingCheckMgr::isPaused() {
++ if (thread_pool_) {
++ return (thread_pool_->isPaused());
++ }
++
++ return (false);
++}
++
++void
++PingCheckMgr::flush(bool finish_free /* = false */) {
++ if (!store_) {
++ return;
++ }
++
++ // Fetch them all.
++ auto contexts = store_->getAll();
++ for (auto const& context : *contexts) {
++ if (finish_free) {
++ finishFree(context);
++ } else {
++ auto parking_lot = context->getParkingLot();
++ if (parking_lot) {
++ parking_lot->drop(context->getQuery());
++ }
++ }
++ }
++
++ store_->clear();
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
+diff --git a/src/hooks/dhcp/ping_check/ping_check_mgr.h b/src/hooks/dhcp/ping_check/ping_check_mgr.h
+new file mode 100644
+index 0000000000..42d11c1b48
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_check_mgr.h
+@@ -0,0 +1,436 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_CHECK_MGR_H
++#define PING_CHECK_MGR_H
++
++#include <asiolink/interval_timer.h>
++#include <asiolink/io_address.h>
++#include <asiolink/io_service.h>
++#include <asiolink/io_service_thread_pool.h>
++#include <cc/data.h>
++#include <cc/simple_parser.h>
++#include <dhcpsrv/srv_config.h>
++#include <hooks/callout_handle.h>
++#include <dhcp/pkt4.h>
++#include <dhcpsrv/lease.h>
++#include <dhcpsrv/network_state.h>
++#include <ping_context_store.h>
++#include <ping_channel.h>
++#include <config_cache.h>
++
++#include <boost/enable_shared_from_this.hpp>
++
++#include <mutex>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief Defines a pointer to a PingContextStore.
++typedef boost::shared_ptr<PingContextStore> PingContextStorePtr;
++
++/// @brief Ping Check Manager.
++///
++/// PinCheckMgr carries out the higher order management of requests for ping
++/// checks from the server. It is a singleton, instantiated when the library
++/// is loaded. It is responsible for:
++/// 1. Parsing and applying configuration.
++/// 2. Maintaining in-memory store of current ping requests (PingContextStore).
++/// 3. Creating and managing the PingChannel through which individual ICMP ECHO/REPLY
++/// cycles are conducted.
++/// 4. When in multi-threaded mode, it creates an IOServiceThread and synchronizes
++/// its state with Kea core MT.
++class PingCheckMgr : public boost::enable_shared_from_this<PingCheckMgr> {
++public:
++ /// @brief Constructor.
++ explicit PingCheckMgr();
++
++ /// @brief Constructor.
++ ///
++ /// This constructor is used in testing. It permits setting some basic behavior
++ /// parameters directly, rather than requiring calls to @c configure().
++ ///
++ /// @param num_threads number of threads to use in the thread pool (0 means follow
++ /// core thread pool size).
++ /// @param min_echos minimum number of ECHO REQUESTs sent without replies
++ /// received required to declare an address free to offer. Defaults to 1,
++ /// must be greater than zero.
++ /// @param reply_timeout maximum number of milliseconds to wait for an
++ /// ECHO REPLY after an ECHO REQUEST has been sent. Defaults to 100.
++ PingCheckMgr(uint32_t num_threads,
++ uint32_t min_echos = 1,
++ uint32_t reply_timeout = 100);
++
++ /// @brief Destructor.
++ virtual ~PingCheckMgr();
++
++ /// @brief Configure the PingCheckMgr.
++ ///
++ /// @param params map containing the hook library parameters.
++ /// @throw BadValue and similar exceptions on error.
++ void configure(data::ConstElementPtr params);
++
++ /// @brief Update the cache of subnet ping check configurations.
++ ///
++ /// Iterates over the subnets in the given server configuration,
++ /// and caches their ping-check configuration.
++ ///
++ /// @param server_config Server configuration containing the
++ /// configured subnets to process.
++ void updateSubnetConfig(dhcp::SrvConfigPtr server_config);
++
++ /// @brief Creates a ping channel instance.
++ ///
++ /// @param io_service IOService that will drive the channel.
++ ///
++ /// @return pointer to the newly created channel.
++ virtual PingChannelPtr createChannel(asiolink::IOServicePtr io_service);
++
++ /// @brief Initiates a ping check for a given lease and its associated
++ /// DHCPDISCOVER packet.
++ ///
++ /// Adds a context to the store and posts a call to @c PingChannel::startSend().
++ ///
++ /// @param lease lease whose address needs to be ping checked.
++ /// @param query parked DHCPDISCOVER associated with the lease.
++ /// @param parking_lot parking lot in which query is parked. If empty,
++ /// parking is assumed to not be employed.
++ /// @param config configuration parameters to employ.
++ void startPing(dhcp::Lease4Ptr& lease, dhcp::Pkt4Ptr& query,
++ hooks::ParkingLotHandlePtr& parking_lot,
++ const PingCheckConfigPtr& config);
++
++ /// @brief Initiates a ping check for a given lease and its associated
++ /// DHCPDISCOVER packet.
++ ///
++ /// Convenience method used in unit tests which uses global
++ /// configuration parameters only.
++ ///
++ /// @param lease lease whose address needs to be ping checked.
++ /// @param query parked DHCPDISCOVER associated with the lease.
++ /// @param parking_lot parking lot in which query is parked. If empty,
++ /// parking is assumed to not be employed.
++ void startPing(dhcp::Lease4Ptr& lease, dhcp::Pkt4Ptr& query,
++ hooks::ParkingLotHandlePtr& parking_lot);
++
++ /// @brief Callback passed to PingChannel to use to retrieve the next
++ /// address to check.
++ ///
++ /// Fetches the context which has been in the WAITING_TO_SEND state the
++ /// longest and returns its lease address.
++ ///
++ /// @param[out] next upon return it will contain the next target address.
++ /// Contents are only meaningful if the function returns true.
++ ///
++ /// @return True another target address exists, false otherwise.
++ virtual bool nextToSend(asiolink::IOAddress& next);
++
++ /// @brief Callback passed to PingChannel to invoke when an ECHO REQUEST
++ /// send has completed.
++ ///
++ /// If the send completed successfully we'll transition the context to
++ /// WAITING_FOR_REPLY, update the context in the store, and the update
++ /// next expiration.
++ ///
++ /// If the send failed, this implies that a recoverable error occurred, such
++ /// as a interface being down and thus, there is currently no way to send
++ /// the ping to the target network. We'll treat this the same as an ICMP
++ /// TARGET_UNREACHABLE and release the OFFER by calling @c finishFree().
++ ///
++ /// @param echo ICMP echo message that as sent.
++ /// @param send_failed True if the send completed with a non-fatal error,
++ /// false otherwise.
++ virtual void sendCompleted(const ICMPMsgPtr& echo, bool send_failed);
++
++ /// @brief Callback passed to PingChannel to invoke when an ICMP
++ /// reply has been received.
++ ///
++ /// If the reply type is an ECHO REQUEST, it is passed to
++ /// handleEchoRequest(), if it is an UNREACHABLE message it
++ /// is passed to handleTargetUnreachable(), any other message
++ /// type is dropped on the floor and the function returns.
++ /// Upon handler completion, it calls setNextExpiration() to
++ /// update the expiration timer.
++ ///
++ /// @param reply ICMP message that was received.
++ virtual void replyReceived(const ICMPMsgPtr& reply);
++
++ /// @brief Process an ECHO REPLY message.
++ ///
++ /// @param echo_reply ICMP ECHO REPLY message to process.
++ void handleEchoReply(const ICMPMsgPtr& echo_reply);
++
++ /// @brief Process an UNREACHABLE message.
++ ///
++ /// @param unreachable ICMP UNREACHABLE message to process.
++ void handleTargetUnreachable(const ICMPMsgPtr& unreachable);
++
++ /// @brief Processes a context whose address has been deemed free to use.
++ ///
++ /// -# Moves the context to TARGET_FREE state
++ /// -# Updates the context in the store
++ /// -# Unparks the query which will release the DHCPOFFER to the client
++ /// -# Invokes the target free callback (do we still need this?)
++ /// -# Deletes the store from the context
++ ///
++ /// @param context context to process.
++ void finishFree(const PingContextPtr& context);
++
++ /// @brief Position a context to do another ping test.
++ ///
++ /// -# Moves the context to WAITING_SEND_STATE
++ /// -# Updates the context in the store
++ ///
++ /// @param context context to process.
++ void doNextEcho(const PingContextPtr& context);
++
++ /// @brief Callback passed to PingChannel to invoke when it shuts down.
++ ///
++ /// Logs the shutdown and then posts a call to @c stopService() to the
++ /// main IOService.
++ virtual void channelShutdown();
++
++ /// @brief Performs expiration processing for contexts whose WAITING_FOR_REPLY
++ /// states expired prior to a given point in time.
++ ///
++ /// expired_pings = store_->getExpiredSince(since)
++ /// for context : expired_pings {
++ /// unpark context->getQuery()
++ /// store_->deleteContext(context)
++ /// }
++ ///
++ /// @param since point in time to select against. Defaults to current time.
++ /// @return number of contexts scheduled for another ping, zero if none.
++ virtual size_t processExpiredSince(const TimeStamp& since = PingContext::now());
++
++ /// @brief Fetches the time at which expiration timer will next expire.
++ ///
++ /// @return TimeStamp containing the next expiration time.
++ TimeStamp getNextExpiry();
++
++ /// @brief Updates the expiration timer (thread safe).
++ ///
++ /// PingContextPtr next = pings->getExpiresNext()
++ /// if next
++ /// reschedule expiration timer for next->getNextExpiry();
++ /// else
++ /// cancel expiration timer
++ virtual void setNextExpiration();
++
++ /// @brief Updates the expiration timer.
++ ///
++ /// PingContextPtr next = pings->getExpiresNext()
++ /// if next
++ /// reschedule expiration timer for next->getNextExpiry();
++ /// else
++ /// cancel expiration timer
++ virtual void setNextExpirationInternal();
++
++ /// @brief Cancels the expiration timer (thread safe).
++ void cancelExpirationTimer();
++
++ /// @brief Cancels the expiration timer.
++ void cancelExpirationTimerInternal();
++
++ /// @brief Callback passed to expiration timer to invoke on timeout.
++ virtual void expirationTimedOut();
++
++ /// @brief Determines whether or not a lease should be ping checked.
++ ///
++ /// Employs the following logic to determine if a ping-check should
++ /// be conducted:
++ ///
++ /// If there's a previous lease that belongs to this client and
++ /// it was touched by the client less than ping-cltt-secs ago,
++ /// then send the offer to the client without ping checking.
++ ///
++ /// Otherwise a ping-check is called for, leave the query parked.
++ ///
++ /// @param lease prospective lease to check.
++ /// @param query DHCPDISCOVER associated with the lease.
++ /// @param old_lease pre-existing lease for this client (if one).
++ /// @param config configuration parameters to employ.
++ ///
++ /// @return CalloutNextStep indicating what should happen next:
++ /// - status == PARK - ping check it
++ /// - status == CONTINUE - check not needed, release DHCPOFFER to client
++ /// - status == DROP - duplicate check, drop the duplicate DHCPOFFER
++ virtual hooks::CalloutHandle::CalloutNextStep shouldPing(dhcp::Lease4Ptr& lease,
++ dhcp::Pkt4Ptr& query,
++ dhcp::Lease4Ptr& old_lease,
++ const PingCheckConfigPtr& config);
++
++ /// @brief Check if the current thread can perform thread pool state
++ /// transition.
++ ///
++ /// @throw MultiThreadingInvalidOperation if the state transition is done on
++ /// any of the worker threads.
++ void checkPermissions();
++
++ /// @brief Performs a deferred start by posting an invocation of @c start()
++ /// to the given IOService.
++ ///
++ /// @param network_state pointer to server's networks state object.
++ void startService(dhcp::NetworkStatePtr network_state);
++
++ /// @brief Shuts down the manager's channel, flushes the store.
++ ///
++ /// This function gracefully winds down operation:
++ ///
++ /// 1. Pauses the thread pool.
++ /// 2. Flushes the context store, either finishing all contexts as free
++ /// or just dropping them from parking, depending on finish_free parameter.
++ /// 3. Stop the thread pool, shutdown the channel.
++ ///
++ /// @param finish_free if true finishFree() will be invoke on all remaining
++ /// contexts in the store, otherwise their queries are simply dropped from
++ /// the parking lot.
++ void stopService(bool finish_free = false);
++
++ /// @brief Start PingChannel operations.
++ ///
++ /// Will start multi-threaded if core MT is enabled, or calls
++ /// @c startSingleThreaded() if core MT is disabled. Creates
++ /// a thread pool with its own IOService, uses that IOService
++ /// when creating the channel.
++ void start();
++
++ /// @brief Start single-threaded PingChannel operations.
++ ///
++ /// Does not create a thread pool. Uses main thread's IOService
++ /// when creating the channel.
++ void startSingleThreaded();
++
++ /// @brief Pause PingChannel operations.
++ ///
++ /// In multi-threaded mode this pauses the thread pool threads, in
++ /// single-threaded mode it does nothing.
++ void pause();
++
++ /// @brief Resume PingChannel operations.
++ ///
++ /// In multi-threaded mode this resumes the thread pool threads, in
++ /// single-threaded mode it does nothing.
++ void resume();
++
++ /// @brief Flushes the ping context store.
++ ///
++ /// This function iterates over the contexts in the store and then
++ /// either invokes finishFree() or drops their queries from parking
++ /// depending upon finish_free parameter. It assumes the operations
++ /// have ceased (i.e. thread pool is not running).
++ ///
++ /// @param finish_free if true finishFree() will be invoke on all remaining
++ /// contexts in the store, otherwise their queries are simply dropped from
++ /// the parking lot.
++ void flush(bool finish_free = false);
++
++ /// @brief Stop PingChannel operations.
++ void stop();
++
++ /// @brief Indicates if the thread pool is running.
++ ///
++ /// @return True if the thread pool exists and it is in the RUNNING state in
++ /// multi-threaded mode, true if the channel exists and is open in single-threaded
++ /// mode, false otherwise.
++ bool isRunning();
++
++ /// @brief Indicates if the thread pool is stopped.
++ ///
++ /// @return True if the thread pool does not exist or it is in the STOPPED
++ /// state in multi-threaded mode, true if the channel does not exist in
++ /// single-threaded mode, false otherwise.
++ bool isStopped();
++
++ /// @brief Indicates if the thread pool is paused.
++ ///
++ /// @return True if the thread pool exists and it is in the PAUSED state,
++ /// false otherwise. Always returns false in single-threaded mode.
++ bool isPaused();
++
++ /// @brief Checks if operations are currently suspended due to NetworkState.
++ ///
++ /// Thread-safe wrapper around checkSuspendedInternal().
++ ///
++ /// @return True if operations are suspended, false otherwise.
++ bool checkSuspended();
++
++ /// @brief Checks if operations are currently suspended due to NetworkState.
++ ///
++ /// If DHCP service is enabled, operations are not suspended and the function
++ /// returns false. Otherwise operations, if not already suspended, are suspended
++ /// by flushing the PingContext store and the function returns true. The queries
++ /// for flushed contexts are dropped from parking and thus their offers discarded.
++ ///
++ /// @return True if operations are suspended, false otherwise.
++ bool checkSuspendedInternal();
++
++ /// @brief Fetches the current, global configuration parameters.
++ ///
++ /// @return PingCheckConfig reference containing the current configuration.
++ const PingCheckConfigPtr getGlobalConfig() const;
++
++ /// @brief Fetches the current, scoped configuration parameters.
++ ///
++ /// @param lease lease for which the parameters are desired.
++ ///
++ /// @return PingCheckConfig reference containing the current configuration.
++ const PingCheckConfigPtr getScopedConfig(dhcp::Lease4Ptr& lease);
++
++ /// @brief Get the hook I/O service.
++ ///
++ /// @return the hook I/O service.
++ isc::asiolink::IOServicePtr getIOService() {
++ return (io_service_);
++ }
++
++ /// @brief Set the hook I/O service.
++ ///
++ /// @param io_service the hook I/O service.
++ void setIOService(isc::asiolink::IOServicePtr io_service) {
++ io_service_ = io_service;
++ }
++
++protected:
++
++ /// @brief The hook I/O service.
++ isc::asiolink::IOServicePtr io_service_;
++
++ /// @brief Thread pool used when running multi-threaded.
++ asiolink::IoServiceThreadPoolPtr thread_pool_;
++
++ /// @brief In-memory store of PingContexts.
++ PingContextStorePtr store_;
++
++ /// @brief Channel that conducts ICMP messaging.
++ PingChannelPtr channel_;
++
++ /// @brief Warehouses parsed global and subnet configuration.
++ ConfigCachePtr config_cache_;
++
++ /// @brief Tracks whether or not the server is processing DHCP packets.
++ dhcp::NetworkStatePtr network_state_;
++
++ /// @brief TimeStamp of the next expiration event.
++ TimeStamp next_expiry_;
++
++ /// @brief Timer which tracks the next expiration event.
++ asiolink::IntervalTimerPtr expiration_timer_;
++
++ /// @brief The mutex used to protect internal state.
++ const boost::scoped_ptr<std::mutex> mutex_;
++
++ /// @brief Indicates whether or not operations have been suspended.
++ bool suspended_;
++};
++
++/// @brief Defines a shared pointer to a PingCheckMgr.
++typedef boost::shared_ptr<PingCheckMgr> PingCheckMgrPtr;
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/ping_context.cc b/src/hooks/dhcp/ping_check/ping_context.cc
+new file mode 100644
+index 0000000000..45e896f948
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_context.cc
+@@ -0,0 +1,237 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <ping_context.h>
++#include <ping_check_log.h>
++#include <exceptions/exceptions.h>
++#include <util/chrono_time_utils.h>
++#include <iostream>
++
++using namespace std;
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::hooks;
++using namespace std::chrono;
++
++namespace isc {
++namespace ping_check {
++
++PingContext::PingContext(Lease4Ptr& lease, Pkt4Ptr& query,
++ uint32_t min_echos /* = 1 */,
++ uint32_t reply_timeout /* = 100 */,
++ ParkingLotHandlePtr& parking_lot /* = EMPTY_LOT() */)
++ : min_echos_(min_echos),
++ reply_timeout_(reply_timeout),
++ echos_sent_(0),
++ last_echo_sent_time_(EMPTY_TIME()),
++ send_wait_start_(EMPTY_TIME()),
++ next_expiry_(EMPTY_TIME()),
++ created_time_(PingContext::now()),
++ lease_(lease),
++ query_(query),
++ state_(NEW),
++ parking_lot_(parking_lot) {
++ if (!lease_) {
++ isc_throw(BadValue, "PingContext ctor - lease cannot be empty");
++ }
++
++ if (!query_) {
++ isc_throw(BadValue, "PingContext ctor - query cannot be empty");
++ }
++
++ if (getTarget() == IOAddress::IPV4_ZERO_ADDRESS()) {
++ isc_throw(BadValue, "PingContext ctor - target address cannot be 0.0.0.0");
++ }
++
++ if (min_echos_ == 0) {
++ isc_throw(BadValue, "PingContext ctor - min_echos must be greater than 0");
++ }
++
++ if (reply_timeout_ == 0) {
++ isc_throw(BadValue, "PingContext ctor - reply_timeout must be greater than 0");
++ }
++}
++
++PingContext::State
++PingContext::stringToState(const std::string& state_str) {
++ if (state_str == "NEW") {
++ return (NEW);
++ }
++
++ if (state_str == "WAITING_TO_SEND") {
++ return (WAITING_TO_SEND);
++ }
++
++ if (state_str == "SENDING") {
++ return (SENDING);
++ }
++
++ if (state_str == "WAITING_FOR_REPLY") {
++ return (WAITING_FOR_REPLY);
++ }
++
++ if (state_str == "TARGET_FREE") {
++ return (TARGET_FREE);
++ }
++
++ if (state_str == "TARGET_IN_USE") {
++ return (TARGET_IN_USE);
++ }
++
++ isc_throw(BadValue, "Invalid PingContext::State: '" << state_str << "'");
++}
++
++TimeStamp
++PingContext::now() {
++ return (time_point_cast<milliseconds>(std::chrono::system_clock::now()));
++}
++
++std::string
++PingContext::stateToString(const PingContext::State& state) {
++ std::string label = "";
++ switch (state) {
++ case NEW:
++ label = "NEW";
++ break;
++ case WAITING_TO_SEND:
++ label = "WAITING_TO_SEND";
++ break;
++ case SENDING:
++ label = "SENDING";
++ break;
++ case WAITING_FOR_REPLY:
++ label = "WAITING_FOR_REPLY";
++ break;
++ case TARGET_FREE:
++ label = "TARGET_FREE";
++ break;
++ case TARGET_IN_USE:
++ label = "TARGET_IN_USE";
++ break;
++ }
++
++ return (label);
++}
++
++const IOAddress& PingContext::getTarget() const {
++ return (lease_->addr_);
++}
++
++uint32_t
++PingContext::getMinEchos() const {
++ return (min_echos_);
++}
++
++void
++PingContext::setMinEchos(uint32_t value) {
++ min_echos_ = value;
++}
++
++uint32_t
++PingContext::getReplyTimeout() const {
++ return (reply_timeout_);
++}
++
++void
++PingContext::setReplyTimeout(uint32_t value) {
++ reply_timeout_ = value;
++}
++
++uint32_t
++PingContext::getEchosSent() const {
++ return (echos_sent_);
++}
++
++void
++PingContext::setEchosSent(uint32_t value) {
++ echos_sent_ = value;
++}
++
++const TimeStamp&
++PingContext::getLastEchoSentTime() const {
++ return (last_echo_sent_time_);
++}
++
++void
++PingContext::setLastEchoSentTime(const TimeStamp& value) {
++ last_echo_sent_time_ = value;
++}
++
++const TimeStamp&
++PingContext::getSendWaitStart() const {
++ return (send_wait_start_);
++}
++
++bool
++PingContext::isWaitingToSend() const {
++ return (state_ == WAITING_TO_SEND);
++}
++
++void
++PingContext::setSendWaitStart(const TimeStamp& value) {
++ send_wait_start_ = value;
++}
++
++const TimeStamp&
++PingContext::getNextExpiry() const {
++ return (next_expiry_);
++}
++
++bool
++PingContext::isWaitingForReply() const {
++ return (state_ == WAITING_FOR_REPLY);
++}
++
++void
++PingContext::setNextExpiry(const TimeStamp& value) {
++ next_expiry_ = value;
++}
++
++const TimeStamp&
++PingContext::getCreatedTime() const {
++ return (created_time_);
++}
++
++PingContext::State
++PingContext::getState() const {
++ return (state_);
++}
++
++void
++PingContext::setState(const PingContext::State& value) {
++ state_ = value;
++}
++
++Pkt4Ptr
++PingContext::getQuery() const {
++ return (query_);
++}
++
++Lease4Ptr
++PingContext::getLease() const {
++ return (lease_);
++}
++
++void
++PingContext::beginWaitingToSend(const TimeStamp& begin_time /* = now() */) {
++ state_ = WAITING_TO_SEND;
++ send_wait_start_ = begin_time;
++}
++
++void
++PingContext::beginWaitingForReply(const TimeStamp& begin_time /* = now() */) {
++ ++echos_sent_;
++ last_echo_sent_time_ = begin_time;
++ next_expiry_ = begin_time + milliseconds(reply_timeout_);
++ state_ = WAITING_FOR_REPLY;
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
++
+diff --git a/src/hooks/dhcp/ping_check/ping_context.h b/src/hooks/dhcp/ping_check/ping_context.h
+new file mode 100644
+index 0000000000..2c5b704a04
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_context.h
+@@ -0,0 +1,280 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_CONTEXT_H
++#define PING_CONTEXT_H
++
++#include <dhcp/pkt4.h>
++#include <dhcpsrv/lease.h>
++#include <hooks/parking_lots.h>
++
++#include <chrono>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief Specifies the type for time stamps.
++using TimeStamp = std::chrono::time_point<std::chrono::system_clock>;
++
++/// @brief Embodies the life cycle of a ping check test for a single address
++/// for a single DHCPDISCOVER.
++///
++/// The class uses a state-model to direct the tasks needed to execute one
++/// or more ECHO REQUEST SEND/WAIT FOR REPLY cycles until the address is
++/// either deemed free to offer or in-use and should not be offered. The
++/// number of cycles conducted is dictated by the minimum number of echos
++/// (@c min_echos_) and whether or not either an ECHO REPLY or DESTINATION
++/// UNREACHABLE are received.
++class PingContext {
++public:
++
++ /// @brief Defines PingContext life cycle states
++ enum State {
++ NEW, // Newly created
++ WAITING_TO_SEND, // Waiting to send next ECHO REQUEST
++ SENDING, // Next ECHO REQUEST is being sent
++ WAITING_FOR_REPLY, // ECHO REQUEST sent, Waiting for reply or timeout
++ TARGET_FREE, // Target has been deemed free to offer.
++ TARGET_IN_USE // Target has been deemed in-use, do not offer
++ };
++
++ /// @brief Converts a string to State
++ ///
++ /// @param state_str Upper case string label to convert
++ /// @return State value corresponding to the given string
++ ///
++ /// @throw BadValue if the string is not a valid state label
++ static State stringToState(const std::string& state_str);
++
++ /// @brief Converts a State to a string
++ ///
++ /// @param state State to convert
++ /// @return string label corresponding to the given state
++ static std::string stateToString(const State& state);
++
++ /// @brief Constructor
++ ///
++ /// @param lease pointer to the lease whose address needs to be checked
++ /// @param query DHCPDISCOVER that instigated the check
++ /// @param min_echos minimum number of ECHO REQUESTs sent without replies
++ /// received required to declare an address free to offer. Defaults to 1,
++ /// must be greater than zero.
++ /// @param reply_timeout maximum number of milliseconds to wait for an
++ /// ECHO REPLY after an ECHO REQUEST has been sent. Defaults to 100,
++ /// must be greater than 0.
++ /// @param parking_lot parking lot in which the query is parked. Defaults
++ /// to an empty pointer.
++ ///
++ /// @throw BadValue if either lease or query are empty, or if the lease
++ /// address is 0.0.0.0
++ PingContext(isc::dhcp::Lease4Ptr& lease, isc::dhcp::Pkt4Ptr& query,
++ uint32_t min_echos = 1, uint32_t reply_timeout = 100,
++ isc::hooks::ParkingLotHandlePtr& parking_lot = EMPTY_LOT());
++
++ /// @brief Destructor
++ virtual ~PingContext() = default;
++
++ /// @brief Fetches the current timestamp (UTC/milliseconds precision)
++ ///
++ /// @return current time as a TimeStamp
++ static TimeStamp now();
++
++ /// @brief Fetches an empty timestamp
++ ///
++ /// @return an empty TimeStamp
++ static const TimeStamp& EMPTY_TIME() {
++ static TimeStamp empty_time;
++ return (empty_time);
++ }
++
++ /// @brief Fetches the minimum timestamp
++ ///
++ /// @return the minimum timestamp
++ static const TimeStamp& MIN_TIME() {
++ static TimeStamp min_time = std::chrono::system_clock::time_point::min();
++ return (min_time);
++ }
++
++ /// @brief Fetches an empty parking lot handle
++ ///
++ /// @return an empty ParkingLotHandlePtr
++ static hooks::ParkingLotHandlePtr& EMPTY_LOT() {
++ static hooks::ParkingLotHandlePtr empty_lot(0);
++ return (empty_lot);
++ }
++
++ /// @brief Fetches the IP address that is under test.
++ ///
++ /// @return IP address as an IOAddress
++ const isc::asiolink::IOAddress& getTarget() const;
++
++ /// @brief Fetches the minimum number of ECHO REQUESTs
++ ///
++ /// @return minimum number of echos as a uint32_t
++ uint32_t getMinEchos() const;
++
++ /// @brief Sets the minimum number of ECHO REQUESTs
++ ///
++ /// @param value new value, must be greater than 0
++ ///
++ /// @throw BadValue if the given value is 0
++ void setMinEchos(uint32_t value);
++
++ /// @brief Fetches the reply timeout (milliseconds)
++ ///
++ /// @return reply timeout as a unit32_t
++ uint32_t getReplyTimeout() const;
++
++ /// @brief Sets the reply timeout
++ ///
++ /// @param value new value in milliseconds, must be greater than 0
++ ///
++ /// @throw BadValue if the given value is 0.
++ void setReplyTimeout(uint32_t value);
++
++ /// @brief Fetches the number of ECHO REQUESTs sent.
++ ///
++ /// @return number of echos sent as a unit32_t
++ uint32_t getEchosSent() const;
++
++ /// @brief Sets the number of ECHO REQUESTs sent.
++ ///
++ /// @param value new value
++ void setEchosSent(uint32_t value);
++
++ /// @brief Fetches the timestamp of when the most recent ECHO REQUEST
++ /// was sent
++ ///
++ /// @return time the last echo was sent as a TimeStamp
++ const TimeStamp& getLastEchoSentTime() const;
++
++ /// @brief Sets the timestamp the most recent ECHO REQUEST was sent
++ ///
++ /// @param value new value
++ void setLastEchoSentTime(const TimeStamp& value);
++
++ /// @brief Fetches the time the context went into WAITING_TO_SEND state
++ ///
++ /// The value returned is only meaningful when the context state is WAITING_TO_SEND.
++ ///
++ /// @return send waits start time as a TimeStamp
++ const TimeStamp& getSendWaitStart() const;
++
++ /// @brief Sets the send wait start timestamp
++ ///
++ /// @param value new value
++ void setSendWaitStart(const TimeStamp& value);
++
++ /// @brief Returns true if state is WAITING_TO_SEND
++ ///
++ /// @return True if the context is in WAITING_TO_SEND state
++ bool isWaitingToSend() const;
++
++ /// @brief Fetches the time at which the WAITING_FOR_REPLY state expires(ed)
++ ///
++ /// The value returned is only meaningful when the context state is WAITING_FOR_REPLY.
++ ///
++ /// @return expiration
++ const TimeStamp& getNextExpiry() const;
++
++ /// @brief Sets the timestamp which specifies the time at which the WAITING_FOR_REPLY state expires
++ /// @param value new value
++ void setNextExpiry(const TimeStamp& value);
++
++ /// @brief Returns true if state is WAITING_FOR_REPLY
++ ///
++ /// @return True if the context is in WAITING_TO_REPLY state
++ bool isWaitingForReply() const;
++
++ /// @brief Fetches the time at which the context was created
++ ///
++ /// @return creation time as a TimeStamp
++ const TimeStamp& getCreatedTime() const;
++
++ /// @brief Fetches the current state.
++ ///
++ /// @return current state as PingContext::State
++ State getState() const;
++
++ /// @brief Sets the state.
++ ///
++ /// @param value new state value
++ void setState(const State& value);
++
++ /// @brief Returns the query that instigated this check
++ ///
++ /// @return query as a Pkt4Ptr
++ isc::dhcp::Pkt4Ptr getQuery() const;
++
++ /// @brief Returns the candidate lease whose address is the target to check
++ ///
++ /// @return lease under test as a Lease4Ptr
++ isc::dhcp::Lease4Ptr getLease() const;
++
++ /// @brief Enters WAITING_TO_SEND state
++ ///
++ /// @param begin_time timestamp of when the state began. Defaults to
++ /// time now. Provided for testing purposes.
++ void beginWaitingToSend(const TimeStamp& begin_time = PingContext::now());
++
++ /// @brief Enters WAITING_TO_REPLY state
++ ///
++ /// @param begin_time timestamp of when the state began. Defaults to
++ /// time now. Provided for testing purposes.
++ void beginWaitingForReply(const TimeStamp& begin_time = PingContext::now());
++
++ /// @brief Fetches the parking lot used for this context.
++ ///
++ /// @return Pointer to the parking lot handle or empty if parking is not
++ /// employed.
++ isc::hooks::ParkingLotHandlePtr getParkingLot() {
++ return (parking_lot_);
++ };
++
++private:
++ /// @brief Minimum number of echos to send without receiving a reply
++ /// before giving up
++ uint32_t min_echos_ = 0;
++
++ /// @brief Amount of time (likely in ms) to wait for an echo reply
++ uint32_t reply_timeout_ = 0;
++
++ /// @brief Number of echos sent since instantiation
++ uint32_t echos_sent_ = 0;
++
++ /// @brief Timestamp the most recent echo send completed
++ TimeStamp last_echo_sent_time_;
++
++ /// @brief Timestamp of entry into waiting_to_send
++ TimeStamp send_wait_start_;
++
++ /// @brief Timestamp the most recent echo times out
++ TimeStamp next_expiry_;
++
++ /// @brief Time context was created
++ TimeStamp created_time_;
++
++ /// @brief Candidate lease to check
++ isc::dhcp::Lease4Ptr lease_;
++
++ /// @brief DHCPDISCOVER packet that instigated this check.
++ isc::dhcp::Pkt4Ptr query_;
++
++ /// @brief Current state of this context
++ State state_;
++
++ /// @brief Parking lot where the associated query is parked.
++ /// If empty parking is not being employed.
++ isc::hooks::ParkingLotHandlePtr parking_lot_;
++};
++
++/// @brief Defines a shared pointer to a PingContext.
++typedef boost::shared_ptr<PingContext> PingContextPtr;
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/ping_context_store.cc b/src/hooks/dhcp/ping_check/ping_context_store.cc
+new file mode 100644
+index 0000000000..35712d5afe
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_context_store.cc
+@@ -0,0 +1,144 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <ping_context_store.h>
++#include <util/multi_threading_mgr.h>
++
++using namespace std;
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::hooks;
++using namespace isc::util;
++using namespace std::chrono;
++
++namespace isc {
++namespace ping_check {
++
++PingContextPtr
++PingContextStore::addContext(Lease4Ptr& lease, Pkt4Ptr& query,
++ uint32_t min_echos, uint32_t reply_timeout,
++ ParkingLotHandlePtr& parking_lot) {
++
++ MultiThreadingLock lock(*mutex_);
++ PingContextPtr context;
++ try {
++ context.reset(new PingContext(lease, query, min_echos, reply_timeout, parking_lot));
++ } catch (const std::exception& ex) {
++ isc_throw(BadValue, "PingContextStore::addContext failed: " << ex.what());
++ }
++
++ context->beginWaitingToSend();
++ auto ret = pings_.insert(context);
++ if (ret.second == false) {
++ isc_throw(DuplicateContext, "PingContextStore::addContex: context already exists for: "
++ << lease->addr_);
++ }
++
++ return (context);
++}
++
++void
++PingContextStore::updateContext(const PingContextPtr& context) {
++ MultiThreadingLock lock(*mutex_);
++ auto& index = pings_.get<AddressIndexTag>();
++ auto context_iter = index.find(context->getTarget());
++ if (context_iter == index.end()) {
++ isc_throw(InvalidOperation, "PingContextStore::updateContext failed for address: "
++ << context->getTarget() << ", not in store");
++ }
++
++ // Use replace() to re-index contexts.
++ index.replace(context_iter, PingContextPtr(new PingContext(*context)));
++}
++
++void
++PingContextStore::deleteContext(const PingContextPtr& context) {
++ MultiThreadingLock lock(*mutex_);
++ auto& index = pings_.get<AddressIndexTag>();
++ auto context_iter = index.find(context->getTarget());
++ if (context_iter == index.end()) {
++ // Not there, just return.
++ return;
++ }
++
++ // Remove the context from the store.
++ pings_.erase(context_iter);
++}
++
++PingContextPtr
++PingContextStore::getContextByAddress(const IOAddress& address) {
++ MultiThreadingLock lock(*mutex_);
++ auto const& index = pings_.get<AddressIndexTag>();
++ auto context_iter = index.find(address);
++ return (context_iter == index.end() ? PingContextPtr()
++ : PingContextPtr(new PingContext(**context_iter)));
++}
++
++PingContextPtr
++PingContextStore::getContextByQuery(Pkt4Ptr& query) {
++ MultiThreadingLock lock(*mutex_);
++ auto const& index = pings_.get<QueryIndexTag>();
++ auto context_iter = index.find(query);
++ return (context_iter == index.end() ? PingContextPtr()
++ : PingContextPtr(new PingContext(**context_iter)));
++}
++
++PingContextPtr
++PingContextStore::getNextToSend() {
++ MultiThreadingLock lock(*mutex_);
++ auto const& index = pings_.get<NextToSendIndexTag>();
++ auto context_iter = index.lower_bound(boost::make_tuple(true, PingContext::MIN_TIME()));
++ return (context_iter == index.end() ? PingContextPtr()
++ : PingContextPtr(new PingContext(**context_iter)));
++}
++
++PingContextPtr
++PingContextStore::getExpiresNext() {
++ MultiThreadingLock lock(*mutex_);
++ auto const& index = pings_.get<ExpirationIndexTag>();
++ auto context_iter = index.lower_bound(boost::make_tuple(true, PingContext::now() + milliseconds(1)));
++ return (context_iter == index.end() ? PingContextPtr()
++ : PingContextPtr(new PingContext(**context_iter)));
++}
++
++PingContextCollectionPtr
++PingContextStore::getExpiredSince(const TimeStamp& since) {
++ MultiThreadingLock lock(*mutex_);
++ auto const& index = pings_.get<ExpirationIndexTag>();
++ auto lower_limit = index.lower_bound(boost::make_tuple(true, PingContext::MIN_TIME()));
++ auto upper_limit = index.upper_bound(boost::make_tuple(true, since));
++
++ PingContextCollectionPtr collection(new PingContextCollection());
++ for (auto context_iter = lower_limit; context_iter != upper_limit; ++context_iter) {
++ PingContextPtr context(new PingContext(**context_iter));
++ collection->push_back(context);
++ }
++
++ return (collection);
++}
++
++PingContextCollectionPtr
++PingContextStore::getAll() {
++ MultiThreadingLock lock(*mutex_);
++ auto const& index = pings_.get<AddressIndexTag>();
++ PingContextCollectionPtr collection(new PingContextCollection());
++ for (auto const& context_iter : index) {
++ collection->push_back(PingContextPtr(new PingContext(*context_iter)));
++ }
++
++ return (collection);
++}
++
++void PingContextStore::clear() {
++ MultiThreadingLock lock(*mutex_);
++ pings_.clear();
++}
++
++} // end of namespace ping_check
++} // end of namespace isc
+diff --git a/src/hooks/dhcp/ping_check/ping_context_store.h b/src/hooks/dhcp/ping_check/ping_context_store.h
+new file mode 100644
+index 0000000000..3a7664bfca
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/ping_context_store.h
+@@ -0,0 +1,240 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_CONTEXT_STORE_H
++#define PING_CONTEXT_STORE_H
++
++#include <asiolink/io_address.h>
++#include <ping_context.h>
++
++#include <boost/multi_index/indexed_by.hpp>
++#include <boost/multi_index/member.hpp>
++#include <boost/multi_index/mem_fun.hpp>
++#include <boost/multi_index/ordered_index.hpp>
++#include <boost/multi_index_container.hpp>
++#include <boost/multi_index/composite_key.hpp>
++#include <boost/scoped_ptr.hpp>
++
++#include <mutex>
++#include <vector>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief Exception thrown when an attempt was made to add a duplicate context
++class DuplicateContext : public Exception {
++public:
++ DuplicateContext(const char* file, size_t line, const char* what) :
++ isc::Exception(file, line, what) {}
++};
++
++/// @brief Tag for index by target address.
++struct AddressIndexTag { };
++
++/// @brief Tag for index by the query packet.
++struct QueryIndexTag { };
++
++/// @brief Tag for index by send wait start time.
++struct NextToSendIndexTag { };
++
++/// @brief Tag for index by expiration time.
++struct ExpirationIndexTag { };
++
++/// @brief Tag for index by state.
++struct StateIndexTag { };
++
++/// @brief A multi index container holding pointers to PingContexts.
++///
++/// The contexts in the container may be accessed using different indexes:
++/// - using an IPv4 address,
++/// - using a query packet
++/// - using a send wait start time
++/// - using an expiration time
++/// - using a context state
++///
++/// Indexes can be accessed using the index number (from 0 to 2) or a
++/// name tag. It is recommended to use the tags to access indexes as
++/// they do not depend on the order of indexes in the container.
++typedef boost::multi_index_container<
++ // It holds pointers to Lease6 objects.
++ PingContextPtr,
++ boost::multi_index::indexed_by<
++ // Specification of the first index starts here.
++ // This index sorts PingContexts by IPv4 addresses represented as
++ // IOAddress objects.
++ /// @todo Does it need to be ordered or only unique?
++ boost::multi_index::ordered_unique<
++ boost::multi_index::tag<AddressIndexTag>,
++ boost::multi_index::const_mem_fun<PingContext, const isc::asiolink::IOAddress&,
++ &PingContext::getTarget>
++ >,
++
++ // Specification of the second index starts here.
++ // This index sorts contexts by query.
++ boost::multi_index::ordered_unique<
++ boost::multi_index::tag<QueryIndexTag>,
++ boost::multi_index::const_mem_fun<PingContext, isc::dhcp::Pkt4Ptr,
++ &PingContext::getQuery>
++ >,
++
++ // Specification of the third index starts here.
++ // This index sorts contexts by send_wait_start.
++ boost::multi_index::ordered_non_unique<
++ boost::multi_index::tag<NextToSendIndexTag>,
++ boost::multi_index::composite_key<
++ PingContext,
++ // The boolean value specifying if context is waiting to send
++ boost::multi_index::const_mem_fun<PingContext, bool,
++ &PingContext::isWaitingToSend>,
++ // Context expiration time.
++ boost::multi_index::const_mem_fun<PingContext, const TimeStamp&,
++ &PingContext::getSendWaitStart>
++ >
++ >,
++
++ // Specification of the fourth index starts here.
++ // This index sorts contexts by next_expiry.
++ boost::multi_index::ordered_non_unique<
++ boost::multi_index::tag<ExpirationIndexTag>,
++ boost::multi_index::composite_key<
++ PingContext,
++ // The boolean value specifying if context is waiting for a reply
++ boost::multi_index::const_mem_fun<PingContext, bool,
++ &PingContext::isWaitingForReply>,
++ // Context expiration time.
++ boost::multi_index::const_mem_fun<PingContext, const TimeStamp&,
++ &PingContext::getNextExpiry>
++ >
++ >,
++
++ // Specification of the fifth index starts here.
++ // This index sorts contexts by State.
++ boost::multi_index::ordered_non_unique<
++ boost::multi_index::tag<StateIndexTag>,
++ boost::multi_index::const_mem_fun<PingContext, PingContext::State,
++ &PingContext::getState>
++ >
++ >
++> PingContextContainer;
++
++/// @brief Type for a collection of PingContextPtrs.
++typedef std::vector<PingContextPtr> PingContextCollection;
++/// @brief Type for a pointer to a collection of PingContextPtrs.
++typedef boost::shared_ptr<PingContextCollection> PingContextCollectionPtr;
++
++/// @brief Maintains an in-memory store of PingContexts
++///
++/// Provides essential CRUD functions for managing a collection of
++/// PingContexts. Additionally there are finders that can return
++/// contexts by target IP address, instigating query, WAITING_TO_SEND
++/// start time, WAITING_FOR_REPLY expiration time, and context state.
++/// All finders return copies of the contexts found, rather than the
++/// stored context itself.
++class PingContextStore {
++public:
++
++ /// @brief Constructor
++ PingContextStore() : pings_(), mutex_(new std::mutex) {
++ }
++
++ /// @brief Destructor
++ ~PingContextStore() = default;
++
++ /// @brief Creates a new PingContext and adds it to the store
++ ///
++ /// @param lease lease whose address is to be ping checked
++ /// @param query query that instigated the lease
++ /// @param min_echos minimum number of ECHO REQUESTs sent without replies
++ /// received required to declare an address free to offer. Must be
++ /// greater than zero.
++ /// @param reply_timeout maximum number of milliseconds to wait for an
++ /// ECHO REPLY after an ECHO REQUEST has been sent. Must be greater than 0.
++ /// @param parking_lot parking lot in which query is parked. If empty,
++ /// parking is assumed to not be employed.
++ ///
++ /// @return pointer to the newly created context
++ /// @throw DuplicateContext is a context for the lease address already
++ /// exists in the store.
++ PingContextPtr addContext(isc::dhcp::Lease4Ptr& lease,
++ isc::dhcp::Pkt4Ptr& query,
++ uint32_t min_echos,
++ uint32_t reply_timeout,
++ isc::hooks::ParkingLotHandlePtr& parking_lot
++ = PingContext::EMPTY_LOT());
++
++ /// @brief Updates a context in the store.
++ ///
++ /// The context is assumed to already exist in the store.
++ ///
++ /// @param context context to update.
++ ///
++ /// @throw InvalidOperation if PingContext does not exist in the store.
++ void updateContext(const PingContextPtr& context);
++
++ /// @brief Removes the context from the store.
++ ///
++ /// If the context does not exist in the store, it simply returns.
++ ///
++ /// @param context context to delete.
++ void deleteContext(const PingContextPtr& context);
++
++ /// @brief Fetches the context with a given target address
++ ///
++ /// @param address target IP address for which to search
++ ///
++ /// @return pointer to the matching PingContext or an empty pointer if
++ /// not found.
++ PingContextPtr getContextByAddress(const isc::asiolink::IOAddress& address);
++
++ /// @brief Fetches the context with a given query packet
++ ///
++ /// @param query query for which to search
++ ///
++ /// @return pointer to the matching PingContext or an empty pointer if
++ /// not found.
++ PingContextPtr getContextByQuery(isc::dhcp::Pkt4Ptr& query);
++
++ /// @brief Fetches the context in WAITING_TO_SEND with the oldest send wait
++ /// start time.
++ ///
++ /// @return pointer to the matching PingContext or an empty pointer if
++ /// not found.
++ PingContextPtr getNextToSend();
++
++ /// @brief Fetches the context in WAITING_FOR_REPLY with the oldest expiration
++ /// time that has not already passed (i.e. is still in the future)
++ ///
++ /// @return pointer to the matching PingContext or an empty pointer if
++ /// not found.
++ PingContextPtr getExpiresNext();
++
++ /// @brief Fetches the contexts in WAITING_FOR_REPLY that expired since a given time
++ ///
++ /// @param since timestamp to search by. Defaults to current time.
++ ///
++ /// @return a collection of the matching contexts, ordered by expiration time.
++ PingContextCollectionPtr getExpiredSince(const TimeStamp& since = PingContext::now());
++
++ /// @brief Fetches all of the contexts (in order by target)
++ ///
++ /// @return a collection of all contexts in the store.
++ PingContextCollectionPtr getAll();
++
++ /// @brief Removes all contexts from the store.
++ void clear();
++
++private:
++ /// @brief Container instance.
++ PingContextContainer pings_;
++
++ /// @brief The mutex used to protect internal state.
++ const boost::scoped_ptr<std::mutex> mutex_;
++};
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/tests/.gitignore b/src/hooks/dhcp/ping_check/tests/.gitignore
+new file mode 100644
+index 0000000000..7e12f9e5be
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/.gitignore
+@@ -0,0 +1 @@
++ping_check_unittests
+diff --git a/src/hooks/dhcp/ping_check/tests/Makefile.am b/src/hooks/dhcp/ping_check/tests/Makefile.am
+new file mode 100644
+index 0000000000..a8c2ea4d92
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/Makefile.am
+@@ -0,0 +1,70 @@
++SUBDIRS = .
++
++AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
++AM_CPPFLAGS += -I$(top_builddir)/src/hooks/dhcp/ping_check -I$(top_srcdir)/src/hooks/dhcp/ping_check
++AM_CPPFLAGS += $(BOOST_INCLUDES) $(CRYPTO_CFLAGS) $(CRYPTO_INCLUDES)
++AM_CPPFLAGS += -DPING_CHECK_LIB_SO=\"$(abs_top_builddir)/src/hooks/dhcp/ping_check/.libs/libdhcp_ping_check.so\"
++AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
++
++AM_CXXFLAGS = $(KEA_CXXFLAGS)
++
++if USE_STATIC_LINK
++AM_LDFLAGS = -static
++endif
++
++# Unit test data files need to get installed.
++EXTRA_DIST =
++
++CLEANFILES = *.gcno *.gcda
++
++TESTS_ENVIRONMENT = $(LIBTOOL) --mode=execute $(VALGRIND_COMMAND)
++
++LOG_COMPILER = $(LIBTOOL)
++AM_LOG_FLAGS = --mode=execute
++
++TESTS =
++if HAVE_GTEST
++TESTS += ping_check_unittests
++
++ping_check_unittests_SOURCES = run_unittests.cc
++ping_check_unittests_SOURCES += icmp_endpoint_unittests.cc
++ping_check_unittests_SOURCES += icmp_socket_unittests.cc
++ping_check_unittests_SOURCES += ping_context_unittests.cc
++ping_check_unittests_SOURCES += ping_context_store_unittests.cc
++ping_check_unittests_SOURCES += icmp_msg_unittests.cc
++ping_check_unittests_SOURCES += ping_test_utils.h
++ping_check_unittests_SOURCES += ping_channel_unittests.cc
++ping_check_unittests_SOURCES += ping_check_mgr_unittests.cc
++ping_check_unittests_SOURCES += ping_check_config_unittests.cc
++ping_check_unittests_SOURCES += config_cache_unittests.cc
++
++ping_check_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
++
++ping_check_unittests_LDFLAGS = $(AM_LDFLAGS) $(CRYPTO_LDFLAGS) $(GTEST_LDFLAGS)
++
++ping_check_unittests_CXXFLAGS = $(AM_CXXFLAGS)
++
++ping_check_unittests_LDADD = $(top_builddir)/src/hooks/dhcp/ping_check/libping_check.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/dhcpsrv/libkea-dhcpsrv.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/process/libkea-process.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/eval/libkea-eval.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/dhcp_ddns/libkea-dhcp_ddns.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/stats/libkea-stats.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/config/libkea-cfgclient.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/http/libkea-http.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libkea-dhcp++.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/hooks/libkea-hooks.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/database/libkea-database.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/cc/libkea-cc.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libkea-asiolink.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/dns/libkea-dns++.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libkea-cryptolink.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/log/libkea-log.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/util/libkea-util.la
++ping_check_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libkea-exceptions.la
++ping_check_unittests_LDADD += $(LOG4CPLUS_LIBS)
++ping_check_unittests_LDADD += $(CRYPTO_LIBS)
++ping_check_unittests_LDADD += $(BOOST_LIBS)
++ping_check_unittests_LDADD += $(GTEST_LDADD)
++endif
++noinst_PROGRAMS = $(TESTS)
+diff --git a/src/hooks/dhcp/ping_check/tests/config_cache_unittests.cc b/src/hooks/dhcp/ping_check/tests/config_cache_unittests.cc
+new file mode 100644
+index 0000000000..f4e48d6591
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/config_cache_unittests.cc
+@@ -0,0 +1,245 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which verify the subnet ping-check
++/// configuration cache.
++
++#include <config.h>
++#include <config_cache.h>
++#include <dhcpsrv/cfgmgr.h>
++#include <hooks/callout_manager.h>
++#include <hooks/hooks.h>
++#include <testutils/gtest_utils.h>
++#include <testutils/multi_threading_utils.h>
++
++#include <boost/date_time/posix_time/posix_time.hpp>
++#include <gtest/gtest.h>
++#include <sstream>
++
++using namespace std;
++using namespace isc;
++using namespace isc::data;
++using namespace isc::dhcp;
++using namespace isc::hooks;
++using namespace isc::ping_check;
++using namespace isc::test;
++using namespace boost::posix_time;
++
++namespace {
++
++/// @brief ConfigCache derivation that allows flush time to be modified.
++class TestConfigCache : public ConfigCache {
++public:
++ /// @brief Constructor
++ TestConfigCache() {
++ }
++
++ /// @brief Destructor
++ virtual ~TestConfigCache() {
++ }
++
++ /// @brief Adjusts the last flush time by the given amount.
++ ///
++ /// @param offset signed value in seconds to add to cache's last
++ /// flush time value
++ void tweakLastFlushTime(int offset) {
++ setModificationTime(getLastFlushTime() + seconds(offset));
++ }
++};
++
++/// @brief Test fixture for testing ConfigCache.
++class ConfigCacheTest : public ::testing::Test {
++public:
++ /// @brief Constructor
++ ConfigCacheTest() {
++ isc::util::MultiThreadingMgr::instance().setMode(false);
++ }
++
++ /// @brief Destructor
++ virtual ~ConfigCacheTest() {
++ }
++
++ /// @brief Verifies construction of a ConfigCache.
++ void testConstruction() {
++ // We use a BaseStampedElement to get the current time to ensure we
++ // are using the same time perspective (currently local) as StampedElements do.
++ BaseStampedElement now;
++ ptime start_time = now.getModificationTime();
++
++ // Create a new cache.
++ TestConfigCache configs;
++ EXPECT_EQ(configs.size(), 0);
++
++ // Verify that last_flush_time_ has been set and that the
++ // cache has no entries.
++ ptime last_flush_time = configs.getLastFlushTime();
++ EXPECT_GE(last_flush_time, start_time);
++
++ // Verify that looking for an entry in an empty cache
++ // gracefully finds nothing.
++ PingCheckConfigPtr fetched_config;
++ EXPECT_FALSE(configs.findConfig(999, fetched_config));
++ EXPECT_FALSE(fetched_config);
++ }
++
++ /// @brief Verifies that invalid user-context config is rejected gracefully.
++ void testInvalidConfig() {
++ // Create a new cache.
++ TestConfigCache configs;
++ EXPECT_EQ(configs.size(), 0);
++
++ // An invalid keyword should fail.
++ std::string json =
++ R"({
++ "ping-check" : {
++ "bogus" : 777
++ }
++ })";
++
++ ConstElementPtr user_context;
++ ASSERT_NO_THROW_LOG(user_context = Element::fromJSON(json));
++
++ ASSERT_THROW_MSG(configs.parseAndCacheConfig(1, user_context), DhcpConfigError,
++ "spurious 'bogus' parameter");
++
++ EXPECT_EQ(configs.size(), 0);
++ }
++
++ /// @brief Verifies that valid user-context supplied config are cached correctly.
++ void testValidConfig() {
++ // Create a new cache.
++ TestConfigCache configs;
++ EXPECT_EQ(configs.size(), 0);
++
++ // A valid config should get cached.
++ std::string json =
++ R"({
++ "ping-check" : {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 2,
++ "reply-timeout" : 375,
++ "ping-cltt-secs" : 120,
++ "ping-channel-threads" : 6
++ }
++ })";
++
++ ConstElementPtr user_context;
++ ASSERT_NO_THROW_LOG(user_context = Element::fromJSON(json));
++
++ // Verify that we cache a valid config.
++ PingCheckConfigPtr config;
++ ASSERT_NO_THROW_LOG(config = configs.parseAndCacheConfig(1, user_context));
++ ASSERT_TRUE(config);
++ EXPECT_EQ(configs.size(), 1);
++
++ // Verify we can retrieve the cached config.
++ PingCheckConfigPtr fetched_config;
++ ASSERT_TRUE(configs.findConfig(1, fetched_config));
++ EXPECT_EQ(fetched_config, config);
++ }
++
++ /// @brief Verifies that an empty config pointer can be cached.
++ void testConfigCacheEmptyConfig() {
++ // Create a new cache.
++ TestConfigCache configs;
++ EXPECT_EQ(configs.size(), 0);
++
++ // Verify that we can cache an empty config pointer.
++ PingCheckConfigPtr no_config;
++ ASSERT_NO_THROW_LOG(configs.cacheConfig(1, no_config));
++ EXPECT_EQ(configs.size(), 1);
++
++ // Verify we can retrieve the cached empty config pointer.
++ PingCheckConfigPtr fetched_config;
++ ASSERT_TRUE(configs.findConfig(1, fetched_config));
++ ASSERT_FALSE(fetched_config);
++ }
++
++ /// @brief Verifies that the cache can be cleared correctly.
++ void testFlushCache() {
++ // Create a new cache.
++ TestConfigCache configs;
++ EXPECT_EQ(configs.size(), 0);
++
++ ptime last_flush_time = configs.getLastFlushTime();
++
++ // Now let's wind the clock back on last_flush_time.
++ configs.tweakLastFlushTime(-1000);
++ EXPECT_LT(configs.getLastFlushTime(), last_flush_time);
++ last_flush_time = configs.getLastFlushTime();
++
++ // Make a simple valid config.
++ std::string json =
++ R"({
++ "ping-check": {
++ "enable-ping-check" : true
++ }
++ })";
++
++ ConstElementPtr user_context;
++ ASSERT_NO_THROW_LOG(user_context = Element::fromJSON(json));
++
++ for (int id = 1; id < 5; ++id) {
++ PingCheckConfigPtr config;
++ ASSERT_NO_THROW_LOG(config = configs.parseAndCacheConfig(id, user_context));
++ ASSERT_TRUE(config);
++ EXPECT_EQ(configs.size(), id);
++ }
++
++ // Verify we can explicitly clear the cache. Should be no entries
++ // and last_flush_time should be updated.
++ configs.flush();
++ EXPECT_GT(configs.getLastFlushTime(), last_flush_time);
++ EXPECT_EQ(configs.size(), 0);
++ }
++};
++
++TEST_F(ConfigCacheTest, construction) {
++ testConstruction();
++}
++
++TEST_F(ConfigCacheTest, constructionMultiThreading) {
++ MultiThreadingTest mt;
++ testConstruction();
++}
++
++TEST_F(ConfigCacheTest, invalidConfig) {
++ testInvalidConfig();
++}
++
++TEST_F(ConfigCacheTest, invalidConfigMultiThreading) {
++ MultiThreadingTest mt;
++ testInvalidConfig();
++}
++
++TEST_F(ConfigCacheTest, validConfig) {
++ testValidConfig();
++}
++
++TEST_F(ConfigCacheTest, validConfigMultiThreading) {
++ MultiThreadingTest mt;
++ testValidConfig();
++}
++
++TEST_F(ConfigCacheTest, configCacheEmptyConfig) {
++ testConfigCacheEmptyConfig();
++}
++
++TEST_F(ConfigCacheTest, configCacheEmptyConfigMultiThreading) {
++ MultiThreadingTest mt;
++ testConfigCacheEmptyConfig();
++}
++
++TEST_F(ConfigCacheTest, flushCache) {
++ testFlushCache();
++}
++
++TEST_F(ConfigCacheTest, flushCacheMultiThreading) {
++ MultiThreadingTest mt;
++ testFlushCache();
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/icmp_endpoint_unittests.cc b/src/hooks/dhcp/ping_check/tests/icmp_endpoint_unittests.cc
+new file mode 100644
+index 0000000000..e9ed8dcb9b
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/icmp_endpoint_unittests.cc
+@@ -0,0 +1,44 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++#include <asiolink/asio_wrapper.h>
++#include <asiolink/io_address.h>
++#include <icmp_endpoint.h>
++
++#include <gtest/gtest.h>
++
++#include <string>
++
++using namespace isc::asiolink;
++using namespace isc::ping_check;
++using namespace std;
++
++// This test checks that the endpoint can manage its own internal
++// boost::asio::ip::icmp::endpoint object for IPv4.
++TEST(ICMPEndpointTest, v4Address) {
++ const string test_address("192.0.2.1");
++
++ IOAddress address(test_address);
++ ICMPEndpoint endpoint(address);
++
++ EXPECT_TRUE(address == endpoint.getAddress());
++ EXPECT_EQ(static_cast<short>(IPPROTO_ICMP), endpoint.getProtocol());
++ EXPECT_EQ(AF_INET, endpoint.getFamily());
++}
++
++// This test checks that the endpoint can manage its own internal
++// boost::asio::ip::icmp::endpoint object for IPv6.
++TEST(ICMPEndpointTest, v6Address) {
++ const string test_address("2001:db8::1235");
++
++ IOAddress address(test_address);
++ ICMPEndpoint endpoint(address);
++
++ EXPECT_TRUE(address == endpoint.getAddress());
++ EXPECT_EQ(static_cast<short>(IPPROTO_ICMPV6), endpoint.getProtocol());
++ EXPECT_EQ(AF_INET6, endpoint.getFamily());
++}
+diff --git a/src/hooks/dhcp/ping_check/tests/icmp_msg_unittests.cc b/src/hooks/dhcp/ping_check/tests/icmp_msg_unittests.cc
+new file mode 100644
+index 0000000000..36c7056840
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/icmp_msg_unittests.cc
+@@ -0,0 +1,172 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the ICMPMsg class.
++
++#include <config.h>
++#include <icmp_msg.h>
++#include <asiolink/io_address.h>
++#include <testutils/gtest_utils.h>
++#include <util/str.h>
++
++#include <gtest/gtest.h>
++#include <list>
++
++using namespace std;
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::ping_check;
++
++namespace {
++
++// Verifies accessors.
++TEST(ICMPMsgTest, basics) {
++ ICMPMsgPtr msg(new ICMPMsg());
++
++ msg->setType(ICMPMsg::ECHO_REPLY);
++ EXPECT_EQ(ICMPMsg::ECHO_REPLY, msg->getType());
++
++ msg->setCode(77);
++ EXPECT_EQ(77, msg->getCode());
++
++ msg->setChecksum(0x8899);
++ EXPECT_EQ(0x8899, msg->getChecksum());
++
++ msg->setId(0x1122);
++ EXPECT_EQ(0x1122, msg->getId());
++
++ msg->setSequence(0x3344);
++ EXPECT_EQ(0x3344, msg->getSequence());
++
++ msg->setSource(IOAddress("192.0.2.1"));
++ EXPECT_EQ(IOAddress("192.0.2.1"), msg->getSource());
++
++ msg->setDestination(IOAddress("192.0.2.2"));
++ EXPECT_EQ(IOAddress("192.0.2.2"), msg->getDestination());
++
++ std::vector<uint8_t> payload{ 0x55, 0x66, 0x77, 0x88, 0x99 };
++ msg->setPayload(payload.data(), payload.size());
++ EXPECT_EQ(payload, msg->getPayload());
++}
++
++// Verifies that a valid ECHO REPLY message can be unpacked.
++TEST(ICMPMsgTest, unpackValidEchoReply) {
++ // Create wire data for a valid ECHO REPLY.
++ std::string echo_reply =
++ "45:00:00:30:73:8a:00:00:40:01:a0:ff:b2:10:01:19:b2:10:01:0a:"
++ "00:00:33:11:55:66:77:88:"
++ "00:00:00:00:00:00:00:00:"
++ "00:00:00:00:00:00:00:00:"
++ "00:00:00:00";
++
++ std::vector<uint8_t> wire_data;
++ ASSERT_NO_THROW_LOG(util::str::decodeSeparatedHexString(echo_reply, ":", wire_data));
++
++ // Unpack the wire data.
++ ICMPMsgPtr msg;
++ ASSERT_NO_THROW_LOG(msg = ICMPMsg::unpack(wire_data.data(), wire_data.size()));
++ ASSERT_TRUE(msg);
++
++ // Verify the reply contents.
++ EXPECT_EQ(ICMPMsg::ECHO_REPLY, msg->getType());
++ EXPECT_EQ(0, msg->getCode());
++ EXPECT_EQ(0x3311, msg->getChecksum());
++ EXPECT_EQ(0x5566, msg->getId());
++ EXPECT_EQ(0x7788, msg->getSequence());
++ EXPECT_EQ(IOAddress("178.16.1.25"), msg->getSource());
++ EXPECT_EQ(IOAddress("178.16.1.10"), msg->getDestination());
++
++ std::vector<uint8_t> payload{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++ EXPECT_EQ(payload, msg->getPayload());
++}
++
++// Verifies that a valid DESTINATION UNREACHABLE message can be unpacked.
++TEST(ICMPMsgTest, unpackValidUnreachable) {
++ // Valid destination unreachable message. Payload is the original
++ // ECHO request.
++ std::string unreachable =
++ "45:c0:00:4c:31:b3:00:00:40:01:e2:09:b2:10:01:0a:b2:10:01:0a:"
++ "03:01:fc:fe:00:00:00:00:"
++ "45:00:00:30:e3:e2:40:00:40:01:f0:5c:"
++ "b2:10:01:0a:b2:10:01:63:08:00:2b:11:"
++ "55:66:77:88:00:00:00:00:00:00:00:00:"
++ "00:00:00:00:00:00:00:00:00:00:00:00";
++
++ // Create the wire data.
++ std::vector<uint8_t> wire_data;
++ ASSERT_NO_THROW_LOG(util::str::decodeSeparatedHexString(unreachable, ":", wire_data));
++
++ // Unpack the outer message.
++ ICMPMsgPtr msg;
++ ASSERT_NO_THROW_LOG(msg = ICMPMsg::unpack(wire_data.data(), wire_data.size()));
++ ASSERT_TRUE(msg);
++
++ // Verify its contents.
++ EXPECT_EQ(ICMPMsg::TARGET_UNREACHABLE, msg->getType());
++ EXPECT_EQ(1, msg->getCode());
++ EXPECT_EQ(0xfcfe, msg->getChecksum());
++ EXPECT_EQ(0, msg->getId());
++ EXPECT_EQ(0, msg->getSequence());
++ EXPECT_EQ(IOAddress("178.16.1.10"), msg->getSource());
++ EXPECT_EQ(IOAddress("178.16.1.10"), msg->getDestination());
++
++ // Now unpack the original ECHO from the outer message payload.
++ std::vector<uint8_t> payload(wire_data.begin() + 28, wire_data.end());
++ EXPECT_EQ(payload, msg->getPayload());
++
++ ICMPMsgPtr payload_msg;
++ ASSERT_NO_THROW_LOG(payload_msg = ICMPMsg::unpack(payload.data(), payload.size()));
++ ASSERT_TRUE(payload_msg);
++
++ // Verify the original ECHO contents.
++ EXPECT_EQ(ICMPMsg::ECHO_REQUEST, payload_msg->getType());
++ EXPECT_EQ(0, payload_msg->getCode());
++ EXPECT_EQ(0x2b11, payload_msg->getChecksum());
++ EXPECT_EQ(0x5566, payload_msg->getId());
++ EXPECT_EQ(0x7788, payload_msg->getSequence());
++ EXPECT_EQ(IOAddress("178.16.1.10"), payload_msg->getSource());
++ EXPECT_EQ(IOAddress("178.16.1.99"), payload_msg->getDestination());
++}
++
++// Verifies the malformed packets are detected.
++TEST(ICMPMsgTest, unpackInValidPackets) {
++ // Contains a test scenario.
++ struct Scenario {
++ // Wire data to submit to unpack.
++ std::string wire_data_;
++ // Expected exception message.
++ std::string error_msg_;
++ };
++
++ // List of scenarios to test.
++ std::list<Scenario> scenarios = {
++ {
++ // Truncated IP header
++ "45:c0:00:4c:31:b3:00:00:40:01:e2:09:b2",
++ "ICMPMsg::unpack - truncated ip header, length: 13"
++ },
++ {
++ // Truncated packet
++ "45:c0:00:4c:31:b3:00:00:40:01:e2:09:b2:10:01:0a:b2:10:01:0a:"
++ "03:01:fc:fe:00:00:00:00:"
++ "45:00:00:30:e3:e2:40:00:40:01:f0:5c",
++ "ICMPMsg::truncated packet? length: 40, hlen: 20"
++ }
++
++ };
++
++ // Iterate over scenarios.
++ for (auto const& scenario : scenarios) {
++ // Create the wire data.
++ std::vector<uint8_t> wire_data;
++ ASSERT_NO_THROW_LOG(util::str::decodeSeparatedHexString(scenario.wire_data_, ":", wire_data));
++ ASSERT_THROW_MSG(ICMPMsg::unpack(wire_data.data(), wire_data.size()), BadValue, scenario.error_msg_);
++ }
++}
++
++/// @todo YOU NEED some round trip tests that test packing!
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/icmp_socket_unittests.cc b/src/hooks/dhcp/ping_check/tests/icmp_socket_unittests.cc
+new file mode 100644
+index 0000000000..2394b360ca
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/icmp_socket_unittests.cc
+@@ -0,0 +1,380 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// \brief Test of ICMPSocket
++///
++/// Tests the functionality of a ICMPSocket by working through an open-send-
++/// receive-close sequence and checking that the asynchronous notifications
++/// work.
++
++#include <config.h>
++#include <asiolink/asio_wrapper.h>
++#include <asiolink/interval_timer.h>
++#include <asiolink/io_address.h>
++#include <asiolink/io_service.h>
++#include <icmp_socket.h>
++#include <icmp_msg.h>
++#include <exceptions/exceptions.h>
++#include <util/buffer.h>
++#include <testutils/gtest_utils.h>
++
++#include <boost/shared_ptr.hpp>
++#include <boost/enable_shared_from_this.hpp>
++#include <boost/date_time/posix_time/posix_time.hpp>
++#include <gtest/gtest.h>
++
++#include <string>
++#include <arpa/inet.h>
++#include <netinet/in.h>
++#include <sys/types.h>
++#include <sys/socket.h>
++#include <algorithm>
++#include <cstdlib>
++#include <cstddef>
++#include <list>
++#include <vector>
++#include <unistd.h>
++
++#include <netinet/ip.h>
++#include <netinet/ip_icmp.h>
++
++using namespace isc;
++using namespace boost::asio;
++using namespace boost::posix_time;
++using namespace isc::asiolink;
++using namespace isc::ping_check;
++using namespace isc::util;
++using namespace std;
++
++namespace ph = std::placeholders;
++
++namespace {
++
++/// @brief Test timeout (ms).
++const long TEST_TIMEOUT = 10000;
++
++/// @brief Type of the function implementing a callback invoked by the
++/// @c SocketCallback functor.
++typedef std::function<void(boost::system::error_code ec, size_t length)>
++ SocketCallbackFunction;
++
++/// @brief Callback class for socket IO operations
++///
++/// An instance of this object is passed to the asynchronous I/O functions
++/// and the operator() method is called when when an asynchronous I/O
++/// completes. The arguments to the completion callback are stored for later
++/// retrieval.
++class SocketCallback {
++public:
++
++ /// @brief Structure that houses callback invocation data.
++ struct PrivateData {
++ PrivateData() :
++ error_code_(), length_(0), called_(false), name_("")
++ {}
++
++ boost::system::error_code error_code_; ///< Completion error code
++ size_t length_; ///< Number of bytes transferred
++ bool called_; ///< Set true when callback called
++ std::string name_; ///< Which of the objects this is
++ };
++
++ /// @brief Constructor
++ ///
++ /// Constructs the object. It also creates the data member pointed to by
++ /// a shared pointer. When used as a callback object, this is copied as it
++ /// is passed into the asynchronous function. This means that there are two
++ /// objects and inspecting the one we passed in does not tell us anything.
++ ///
++ /// Therefore we use a boost::shared_ptr. When the object is copied, the
++ /// shared pointer is copied, which leaves both objects pointing to the same
++ /// data.
++ ///
++ /// @param which Which of the two callback objects this is
++ explicit SocketCallback(const std::string& which) : data_(new PrivateData())
++ {
++ setName(which);
++ }
++
++ /// @brief Destructor
++ ///
++ /// No code needed, destroying the shared pointer destroys the private data.
++ virtual ~SocketCallback()
++ {}
++
++ /// @brief Clears the current values of invocation data members.
++ void clear() {
++ setCode(0);
++ setLength(0);
++ setCalled(false);
++ }
++
++ /// @brief Callback Function
++ ///
++ /// Called when an asynchronous I/O completes, this stores the
++ /// completion error code and the number of bytes transferred.
++ ///
++ /// @param ec I/O completion error code passed to callback function.
++ /// @param length Number of bytes transferred
++ virtual void operator()(boost::system::error_code ec, size_t length = 0) {
++ data_->error_code_ = ec;
++ setLength(length);
++ setCalled(true);
++ }
++
++ /// @brief Get I/O completion error code
++ int getCode() {
++ return (data_->error_code_.value());
++ }
++
++ /// @brief Set I/O completion code
++ ///
++ /// @param code New value of completion code
++ void setCode(int code) {
++ data_->error_code_ = boost::system::error_code(code, boost::system::error_code().category());
++ }
++
++ /// @brief Get number of bytes transferred in I/O
++ size_t getLength() const {
++ return (data_->length_);
++ }
++
++ /// @brief Set number of bytes transferred in I/O
++ ///
++ /// @param length New value of length parameter
++ void setLength(size_t length) {
++ data_->length_ = length;
++ }
++
++ /// @brief Get flag to say when callback was called
++ bool getCalled() const {
++ return (data_->called_);
++ }
++
++ /// @brief Set flag to say when callback was called
++ ///
++ /// @param called New value of called parameter
++ void setCalled(bool called) {
++ data_->called_ = called;
++ }
++
++ /// @brief Return instance of callback name
++ std::string getName() const {
++ return (data_->name_);
++ }
++
++ /// @brief Set callback name
++ ///
++ /// @param name New value of the callback name
++ void setName(const std::string& name) {
++ data_->name_ = name;
++ }
++
++private:
++ boost::shared_ptr<PrivateData> data_; ///< Pointer to private data
++};
++
++/// @brief Socket and pointer types for sending and receiving ICMP echos.
++typedef ICMPSocket<SocketCallback> PingSocket;
++typedef boost::shared_ptr<PingSocket> PingSocketPtr;
++
++/// @brief Simple test fixture for testing ICMPSocket.
++class ICMPSocketTest : public ::testing::Test {
++public:
++ /// @brief Constructor.
++ ICMPSocketTest()
++ : io_service_(new IOService()), test_timer_(io_service_) {
++ test_timer_.setup(std::bind(&ICMPSocketTest::timeoutHandler, this, true),
++ TEST_TIMEOUT, IntervalTimer::ONE_SHOT);
++ }
++
++ /// @brief Destructor.
++ virtual ~ICMPSocketTest() {
++ test_timer_.cancel();
++ io_service_->stopAndPoll();
++ }
++
++ /// @brief Indicates if current user is not root
++ ///
++ /// @return True if neither the uid or the effective
++ /// uid is root.
++ static bool notRoot() {
++ return (getuid() != 0 && geteuid() != 0);
++ }
++
++ /// @brief Callback function invoke upon test timeout.
++ ///
++ /// It stops the IO service and reports test timeout.
++ ///
++ /// @param fail_on_timeout Specifies if test failure should be reported.
++ void timeoutHandler(const bool fail_on_timeout) {
++ if (fail_on_timeout) {
++ ADD_FAILURE() << "Timeout occurred while running the test!";
++ }
++ io_service_->stop();
++ }
++
++ /// @brief IOService instance used by thread pools.
++ IOServicePtr io_service_;
++
++ /// @brief Asynchronous timer service to detect timeouts.
++ IntervalTimer test_timer_;
++
++ /// @brief Returns pointer to the first byte of the input buffer.
++ ///
++ /// @throw InvalidOperation if called when the buffer is empty.
++ uint8_t* getInputBufData() {
++ if (input_buf_.empty()) {
++ isc_throw(InvalidOperation, "TcpConnection::getInputBufData() - cannot access empty buffer");
++ }
++
++ return (input_buf_.data());
++ }
++
++ /// @brief Returns input buffer size.
++ size_t getInputBufSize() const {
++ return (input_buf_.size());
++ }
++
++ /// @brief Set the capacity of the input buffer
++ ///
++ /// @param buf_size maximum number of bytes allowed in the buffer
++ void resizeInputBuf(size_t buf_size) {
++ input_buf_.resize(buf_size);
++ }
++
++ /// @brief Buffer for a single socket read.
++ std::vector<uint8_t> input_buf_;
++};
++
++
++// Verifies that an ICMP socket can be opened and closed.
++TEST_F(ICMPSocketTest, openClose) {
++ SKIP_IF(notRoot());
++
++ // For open the endpoint is only used to determine protocol, the address is irrelevant.
++ ICMPEndpoint ping_to_endpoint(IOAddress::IPV4_ZERO_ADDRESS());
++
++ PingSocket socket(io_service_);
++ SocketCallback socket_cb("open");
++
++ // Verify the socket is closed.
++ ASSERT_FALSE(socket.isOpen());
++
++ // Open the socket.
++ ASSERT_NO_THROW_LOG(socket.open(&ping_to_endpoint, socket_cb));
++
++ // Verify the socket is open.
++ ASSERT_TRUE(socket.isOpen());
++ // Since open() is synchronous the callback should not have been invoked.
++ ASSERT_FALSE(socket_cb.getCalled());
++
++ // Opening an already open should be harmless.
++ ASSERT_NO_THROW_LOG(socket.open(&ping_to_endpoint, socket_cb));
++ ASSERT_TRUE(socket.isOpen());
++
++ // Close the socket.
++ ASSERT_NO_THROW_LOG(socket.close());
++ ASSERT_FALSE(socket.isOpen());
++
++ // Closing a closed socket should be harmless.
++ ASSERT_NO_THROW_LOG(socket.close());
++ ASSERT_FALSE(socket.isOpen());
++}
++
++// Verifies that an ICMP socket can send and receive ICMP messages.
++TEST_F(ICMPSocketTest, sendReceive) {
++ SKIP_IF(notRoot());
++
++ PingSocket socket(io_service_);
++
++ // For open the endpoint is only used to determine protocol, the address is irrelevant.
++ ICMPEndpoint endpoint(IOAddress::IPV4_ZERO_ADDRESS());
++
++ // Open the socket.
++ SocketCallback open_cb("open");
++ ASSERT_NO_THROW_LOG(socket.open(&endpoint, open_cb));
++
++ // Build a ping.
++ struct icmp echo;
++ memset(&echo, 0, sizeof(echo));
++ echo.icmp_type = ICMPMsg::ECHO_REQUEST;
++ echo.icmp_id = htons(0x1122);
++ echo.icmp_seq = htons(0x3344);
++ echo.icmp_cksum = htons(~(socket.calcChecksum((const uint8_t*)&echo, sizeof(echo))));
++
++ // Send it to the loopback.
++ IOAddress ping_to_addr("127.0.0.1");
++ SocketCallback send_cb("send");
++ ICMPEndpoint ping_to_endpoint(ping_to_addr);
++ ASSERT_NO_THROW_LOG(socket.asyncSend(&echo, sizeof(echo), &ping_to_endpoint, send_cb));
++
++ // Run the send handler.
++ io_service_->runOne();
++
++ // Callback should have been invoked without an error code.
++ ASSERT_TRUE(send_cb.getCalled());
++ ASSERT_EQ(0, send_cb.getCode());
++ // Verify we sent the whole message.
++ ASSERT_EQ(send_cb.getLength(), sizeof(echo));
++
++ // Call asyncReceive until we get our reply.
++ resizeInputBuf(1500);
++ ICMPEndpoint reply_endpoint;
++ SocketCallback receive_cb("receive");
++
++ // We need two receives when pinging loop back, only one with a real address.
++ size_t pass = 0;
++ do {
++ receive_cb.clear();
++ memset(getInputBufData(), 0x00, getInputBufSize());
++ ASSERT_NO_THROW(socket.asyncReceive(static_cast<void*>(getInputBufData()),
++ getInputBufSize(), 0, &reply_endpoint, receive_cb));
++
++ // Run the read handler.
++ io_service_->runOne();
++ } while (++pass < 2 && (!receive_cb.getCalled()));
++
++ // Callback should have been invoked without an error code.
++ ASSERT_TRUE(receive_cb.getCalled());
++ ASSERT_EQ(0, receive_cb.getCode());
++
++ // Verify the reply came from the target address.
++ EXPECT_EQ(ping_to_addr.toText(), reply_endpoint.getAddress().toText());
++
++ // Verify we got at least enough data for an IP header.
++ size_t bytes_received = receive_cb.getLength();
++ ASSERT_GE(bytes_received, sizeof(struct ip));
++
++ // Build the reply from data
++ uint8_t* icbuf = getInputBufData();
++
++ // Find the IP header length...
++ struct ip* ip_header = (struct ip*)(icbuf);
++ auto hlen = (ip_header->ip_hl << 2);
++
++ // Make sure we received enough data.
++ ASSERT_TRUE(bytes_received >= (hlen + sizeof(struct icmp)))
++ << "received packet too short to be ICMP";
++
++ // Verify the message type.
++ struct icmp* reply = (struct icmp*)(icbuf + hlen);
++ auto msg_type = reply->icmp_type;
++ ASSERT_EQ(ICMPMsg::ECHO_REPLY, msg_type);
++
++ // Verify the id and sequence values.
++ auto id = ntohs(reply->icmp_hun.ih_idseq.icd_id);
++ EXPECT_EQ(0x1122, id);
++
++ auto sequence = ntohs(reply->icmp_hun.ih_idseq.icd_seq);
++ EXPECT_EQ(0x3344, sequence);
++
++ // Close the socket.
++ ASSERT_NO_THROW_LOG(socket.close());
++ ASSERT_FALSE(socket.isOpen());
++}
++
++}
+diff --git a/src/hooks/dhcp/ping_check/tests/meson.build b/src/hooks/dhcp/ping_check/tests/meson.build
+new file mode 100644
+index 0000000000..8beca7813e
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/meson.build
+@@ -0,0 +1,21 @@
++if not TESTS_OPT.enabled()
++ subdir_done()
++endif
++
++dhcp_ping_check_tests = executable(
++ 'dhcp-ping-check-tests',
++ 'config_cache_unittests.cc',
++ 'icmp_endpoint_unittests.cc',
++ 'icmp_msg_unittests.cc',
++ 'icmp_socket_unittests.cc',
++ 'ping_channel_unittests.cc',
++ 'ping_check_config_unittests.cc',
++ 'ping_check_mgr_unittests.cc',
++ 'ping_context_store_unittests.cc',
++ 'ping_context_unittests.cc',
++ 'run_unittests.cc',
++ dependencies: [CRYPTO_DEP, GTEST_DEP],
++ include_directories: [include_directories('.'), include_directories('..')] + INCLUDES,
++ link_with: [dhcp_ping_check_archive] + LIBS_BUILT_SO_FAR,
++)
++test('dhcp-ping-check-tests', dhcp_ping_check_tests, protocol: 'gtest')
+diff --git a/src/hooks/dhcp/ping_check/tests/ping_channel_unittests.cc b/src/hooks/dhcp/ping_check/tests/ping_channel_unittests.cc
+new file mode 100644
+index 0000000000..4c57a2e500
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/ping_channel_unittests.cc
+@@ -0,0 +1,821 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the PingChannel class.
++
++#include <config.h>
++
++#include <ping_channel.h>
++#include <ping_test_utils.h>
++#include <asiolink/interval_timer.h>
++#include <asiolink/io_service_thread_pool.h>
++#include <dhcp/iface_mgr.h>
++#include <util/multi_threading_mgr.h>
++#include <testutils/multi_threading_utils.h>
++#include <testutils/gtest_utils.h>
++#include <gtest/gtest.h>
++
++#include <boost/multi_index/indexed_by.hpp>
++#include <boost/multi_index/member.hpp>
++#include <boost/multi_index/mem_fun.hpp>
++#include <boost/multi_index/hashed_index.hpp>
++#include <boost/multi_index/ordered_index.hpp>
++#include <boost/multi_index_container.hpp>
++#include <boost/multi_index/composite_key.hpp>
++
++#include <queue>
++#include <list>
++#include <thread>
++#include <mutex>
++
++using namespace std;
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::ping_check;
++using namespace isc::util;
++using namespace isc::test;
++using namespace boost::asio::error;
++
++namespace ph = std::placeholders;
++
++namespace {
++
++/// @brief Tag for index by address.
++struct AddressIdSequenceIndexTag { };
++
++/// @brief A multi index container holding pointers ICMPMsgPtr
++///
++/// The message may be accessed using the following index(es):
++/// - using an IPv4 address, id, and sequence number
++typedef boost::multi_index_container<
++ // It holds pointers to ICMPMsg objects.
++ ICMPMsgPtr,
++ boost::multi_index::indexed_by<
++ // Specification of the first index starts here.
++ // This index sorts PingContexts by IPv4 addresses represented as
++ // IOAddress objects.
++ // Specification of the first index starts here.
++ boost::multi_index::ordered_unique<
++ boost::multi_index::tag<AddressIdSequenceIndexTag>,
++ boost::multi_index::composite_key<
++ ICMPMsg,
++ // The boolean value specifying if context is waiting for a reply
++ boost::multi_index::const_mem_fun<ICMPMsg, const IOAddress&,
++ &ICMPMsg::getSource>,
++ boost::multi_index::const_mem_fun<ICMPMsg, uint16_t,
++ &ICMPMsg::getId>,
++ boost::multi_index::const_mem_fun<ICMPMsg, uint16_t,
++ &ICMPMsg::getSequence>
++ >
++ >
++ >
++> ReplyContainer;
++
++/// @brief Single-threaded test fixture for exercising a PingChannel.
++class PingChannelTest : public IOServiceTest {
++public:
++ /// @brief Constructor
++ PingChannelTest() : mutex_(new mutex()), stopped_(false) {
++ MultiThreadingMgr::instance().setMode(false);
++ };
++
++ /// @brief Destructor
++ virtual ~PingChannelTest() {
++ stopped_ = true;
++ if (channel_) {
++ channel_->close();
++ }
++ if (ios_pool_) {
++ ios_pool_->getIOService()->stopAndPoll();
++ ios_pool_->stop();
++ }
++ ios_pool_.reset();
++ test_timer_.cancel();
++ test_io_service_->stopAndPoll();
++ MultiThreadingMgr::instance().setMode(false);
++ }
++
++ /// @brief Called prior to test destruction.
++ /// Ensure we stop the pool in the even a test failed in an unexpected
++ /// manner that left it running. Otherwise we can get false TSAN complaints.
++ virtual void TearDown() {
++ // Stop the thread pool (if one).
++ if (ios_pool_) {
++ ios_pool_->stop();
++ }
++ }
++
++ /// @brief Initializes the IOServiceThreadPool
++ ///
++ /// @param num_threads number of threads in the pool
++ /// @param defer_start enables deferred start of the pool's IOService
++ void initThreadPool(size_t num_threads = 1, bool defer_start = false) {
++ ios_pool_.reset(new IoServiceThreadPool(IOServicePtr(), num_threads, defer_start));
++ };
++
++ /// @brief Callback to invoke to fetch the next ping target.
++ ///
++ /// Fetches the next entry from the front of the send queue (if one). Checks for
++ /// test completion before returning.
++ ///
++ /// @param[out] next upon return it will contain the next target address. Contents are
++ /// only meaningful if the function returns true.
++ ///
++ /// @return True another target address exists, false otherwise.
++ virtual bool nextToSend(IOAddress& next) {
++ if (stopped_) {
++ return (false);
++ }
++ MultiThreadingLock lock(*mutex_);
++ bool use_next = true;
++ if (send_queue_.empty()) {
++ use_next = false;
++ } else {
++ next = send_queue_.front();
++ }
++
++ stopIfDone();
++ return (use_next);
++ }
++
++ /// @brief Callback to invoke when an ECHO write has completed.
++ ///
++ /// Ensures the completed echo matches the front of the send queue and then
++ /// pops it from the front of the queue. Checks for test completion before
++ /// returning.
++ ///
++ /// @param echo ICMP echo message that as sent
++ virtual void echoSent(ICMPMsgPtr& echo, bool send_failed) {
++ if (stopped_) {
++ return;
++ }
++ MultiThreadingLock lock(*mutex_);
++ ASSERT_EQ(echo->getDestination(), send_queue_.front()) << "send queue mismatch";
++ send_queue_.pop();
++ if (!send_failed) {
++ echos_sent_.push_back(echo);
++ }
++ stopIfDone();
++ }
++
++ /// @brief Callback to invoke when an ICMP reply has been received.
++ ///
++ /// Stores the reply if it is an ECHO REPLY message. We check to the
++ /// do avoid storing our outbound ECHO REQUESTs when testing with loop back
++ /// address. Checks for test completion before returning.
++ ///
++ /// @param reply ICMP message that was received
++ virtual void replyReceived(ICMPMsgPtr& reply) {
++ if (stopped_) {
++ return;
++ }
++ MultiThreadingLock lock(*mutex_);
++ if (reply->getType() == ICMPMsg::ECHO_REPLY) {
++ // If loopback routing is enabled, Insert the original destination address
++ // as the reply's source address.
++ if (channel_->route_loopback_) {
++ IOAddress address = channel_->loopback_map_.find(reply->getSequence());
++ if (address != IOAddress::IPV4_ZERO_ADDRESS()) {
++ reply->setSource(address);
++ }
++ }
++
++ replies_received_.push_back(reply);
++ storeReply(reply);
++ }
++
++ stopIfDone();
++ }
++
++ /// @brief Tests that a channel can send and receive, reliably
++ /// in either single or multi-threaded mode.
++ ///
++ /// The test queues the given number of requests, beginning with
++ /// address 127.0.0.1 and incrementing the address through the number
++ /// of targets. It then opens the channel and initiates reading and
++ /// and writing, running until the test completes or times out.
++ /// It expects to receive a reply for every request.
++ ///
++ /// @param num_threads number of threads in the thread pool. If 0,
++ /// the channel will be single-threaded, sharing the test's IOService,
++ /// otherwise the channel will be driven by an IOServiceThreadPool with
++ /// the given number of threads.
++ /// @param num_targets number of target IP addresses to ping. Must not
++ /// be greater than 253.
++ /// @param set_error_trigger optional function that sets the error trigger
++ /// condition.
++ void sendReceiveTest(size_t num_threads, size_t num_targets = 25,
++ const std::function<void()>& set_error_trigger = [](){});
++
++ /// @brief Tests for graceful behavior when a channel encounters a read
++ /// or write error, in either single or multi-threaded mode.
++ ///
++ /// The test runs in two passes. The first pass sends and receives until
++ /// the error trigger occurs. The error should induce a graceful cessation
++ /// of operations. After verifying expected state of affairs, the second pass
++ /// is begun by re-opening the channel and resuming operations until the test
++ /// completes or times out.
++ ///
++ /// @param set_error_trigger function that sets the error trigger condition
++ /// @param num_threads number of threads in the thread pool. If 0,
++ /// the channel will be single-threaded, sharing the test's IOService,
++ /// otherwise the channel will be driven by an IOServiceThreadPool with
++ /// the given number of threads.
++ /// @param num_targets number of target IP addresses to ping. Must not
++ /// be greater than 253.
++ void ioErrorTest(const std::function<void()>& set_error_trigger,
++ size_t num_threads, size_t num_targets = 10);
++
++ /// @brief Adds a reply to reply store.
++ ///
++ /// Fails if a reply for the same address, id, and sequence number is already
++ /// in the store. Must be used in a thread-safe context.
++ ///
++ /// @param reply reply to store
++ void storeReply(ICMPMsgPtr& reply) {
++ auto retpair = replies_map_.insert(reply);
++ ASSERT_TRUE(retpair.second)
++ << "failed to insert reply for: " << reply->getSource()
++ << ", id: " << reply->getId() << ", sequence: " << reply->getSequence();
++ }
++
++ /// @brief Fetches a reply from the store that matches a given ECHO
++ ///
++ /// Must be used in a thread-safe context.
++ ///
++ /// @param echo echo for whom a reply is sought
++ ///
++ /// @return The matching reply if found, otherwise an empty ICMPMsgPtr.
++ ICMPMsgPtr findReply(const ICMPMsgPtr& echo) {
++ auto const& index = replies_map_.get<AddressIdSequenceIndexTag>();
++ auto key = boost::make_tuple(echo->getDestination(), echo->getId(), echo->getSequence());
++ auto iter = index.find(key);
++ return (iter == index.end() ? ICMPMsgPtr() : *iter);
++ }
++
++ /// @brief Channel instance.
++ TestablePingChannelPtr channel_;
++
++ /// @brief IoServiceThreadPool instance
++ IoServiceThreadPoolPtr ios_pool_;
++
++ /// @brief The mutex used to protect internal state.
++ const boost::scoped_ptr<std::mutex> mutex_;
++
++ /// @brief Queue of IOAddresses for which to send ECHO REQUESTs.
++ std::queue<IOAddress> send_queue_;
++
++ /// @brief List of ECHO REQUESTs that have been successfully sent in the order
++ /// they were sent.
++ std::list<ICMPMsgPtr> echos_sent_;
++
++ /// @brief List of ECHO REPLYs that have been successfully received in the
++ /// order they were received.
++ std::list<ICMPMsgPtr> replies_received_;
++
++ /// @brief Map of ECHO REPLYs received, indexed by source IP, id, and sequence number.
++ ReplyContainer replies_map_;
++
++ /// @brief Flag which indicates that the manager has been stopped.
++ bool stopped_;
++};
++
++void
++PingChannelTest::sendReceiveTest(size_t num_threads, size_t num_targets /* = 25 */,
++ const std::function<void()>& set_error_trigger) {
++ stopped_ = false;
++
++ // Clear state.
++ send_queue_ = {};
++ echos_sent_.clear();
++ replies_received_.clear();
++ replies_map_.clear();
++
++ SKIP_IF(notRoot());
++
++ ASSERT_TRUE(num_targets < 253);
++ auto channel_ios = test_io_service_;
++ if (num_threads) {
++ // Enable MT mode.
++ util::MultiThreadingMgr::instance().setMode(true);
++
++ // Initialize the thread pool to num_threads, defer start.
++ ASSERT_NO_THROW_LOG(initThreadPool(num_threads, true));
++ ASSERT_TRUE(ios_pool_->isStopped());
++ channel_ios = ios_pool_->getIOService();
++ }
++
++ // Create the channel instance with the appropriate io_service.
++ ASSERT_NO_THROW_LOG(channel_.reset(new TestablePingChannel(
++ channel_ios,
++ std::bind(&PingChannelTest::nextToSend, this, ph::_1),
++ std::bind(&PingChannelTest::echoSent, this, ph::_1, ph::_2),
++ std::bind(&PingChannelTest::replyReceived, this, ph::_1)
++ )));
++
++ // Create the callback to check test completion criteria.
++ // It returns true if we have sent out all the echos and received
++ // all the replies.
++ test_done_cb_ = [this]() {
++ return (send_queue_.empty() && (echos_sent_.size() == replies_received_.size()));
++ };
++
++ // Fill the send queue with num_target addresses to ping.
++ IOAddress target("127.0.0.1");
++ for (auto i = 0; i < num_targets; ++i) {
++ send_queue_.push(target);
++ target = IOAddress::increase(target);
++ }
++
++ (set_error_trigger)();
++
++ // Open the channel.
++ ASSERT_NO_THROW_LOG(channel_->open());
++ ASSERT_TRUE(channel_->isOpen());
++
++ if (num_threads) {
++ ios_pool_->run();
++ }
++
++ // Initiate reading and writing.
++ ASSERT_NO_THROW_LOG(channel_->startRead());
++ ASSERT_NO_THROW_LOG(channel_->startSend());
++
++ // Run the main thread's IOService until we complete or timeout.
++ ASSERT_NO_THROW_LOG(runIOService(1000));
++
++ if (ios_pool_) {
++ // Stop the thread pool.
++ ASSERT_NO_THROW_LOG(ios_pool_->stop());
++ ASSERT_TRUE(ios_pool_->isStopped());
++ }
++
++ // Send queue should be empty.
++ EXPECT_TRUE(send_queue_.empty());
++
++ // Should have as many replies as echos.
++ EXPECT_EQ(echos_sent_.size(), replies_received_.size());
++
++ // Should have a reply for every echo.
++ for (auto const& echo : echos_sent_) {
++ ICMPMsgPtr reply = findReply(echo);
++ EXPECT_TRUE(reply) << "no reply found for:" << echo->getDestination()
++ << ", id:" << echo->getId() << ", sequence: " << echo->getSequence();
++ }
++
++ stopped_ = true;
++ if (channel_) {
++ channel_->close();
++ }
++ if (ios_pool_) {
++ ios_pool_->getIOService()->stopAndPoll();
++ ios_pool_->stop();
++ }
++ ios_pool_.reset();
++ test_timer_.cancel();
++ test_io_service_->stopAndPoll();
++ MultiThreadingMgr::instance().setMode(false);
++}
++
++void
++PingChannelTest::ioErrorTest(const std::function<void()>& set_error_trigger,
++ size_t num_threads, size_t num_targets) {
++ ASSERT_TRUE(num_targets < 253);
++ SKIP_IF(notRoot());
++
++ ASSERT_TRUE(replies_received_.empty());
++
++ /// If it's an MT test create the thread pool.
++ auto channel_ios = test_io_service_;
++ if (num_threads) {
++ // Enable MT mode.
++ util::MultiThreadingMgr::instance().setMode(true);
++
++ // Initialize the thread pool to num_threads, defer start.
++ ASSERT_NO_THROW_LOG(initThreadPool(num_threads, true));
++ ASSERT_TRUE(ios_pool_->isStopped());
++ channel_ios = ios_pool_->getIOService();
++ }
++
++ // Set local shutdown called flag to false.
++ bool shutdown_cb_called = false;
++
++ // Create the channel instance with the appropriate io_service.
++ ASSERT_NO_THROW_LOG(channel_.reset(new TestablePingChannel(
++ channel_ios,
++ std::bind(&PingChannelTest::nextToSend, this, ph::_1),
++ std::bind(&PingChannelTest::echoSent, this, ph::_1, ph::_2),
++ std::bind(&PingChannelTest::replyReceived, this, ph::_1),
++ ([this, &shutdown_cb_called]() {
++ shutdown_cb_called = true;
++ test_io_service_->stop();
++ })
++ )));
++
++ // Set the test_done_cb_ to always return false (i.e. test is not
++ // done).
++ test_done_cb_ = []() {
++ return (false);
++ };
++
++ // Fill the send queue with target addresses to ping.
++ IOAddress target("127.0.0.1");
++ for (auto i = 0; i < (num_targets / 2); ++i) {
++ send_queue_.push(target);
++ target = IOAddress::increase(target);
++ }
++
++ // Set the error trigger.
++ (set_error_trigger)();
++
++ // FIRST PASS
++
++ // Open the channel.
++ ASSERT_NO_THROW_LOG(channel_->open());
++ ASSERT_TRUE(channel_->isOpen());
++
++ if (num_threads) {
++ ios_pool_->run();
++ }
++
++ // Initiate reading and writing.
++ ASSERT_NO_THROW_LOG(channel_->startRead());
++ ASSERT_NO_THROW_LOG(channel_->startSend());
++
++ // Run the main thread's IOService until we stop or timeout.
++ ASSERT_NO_THROW_LOG(runIOService(1000));
++
++ // Shutdown callback should have been invoked, the channel should be closed,
++ // but the pool should still be running.
++ ASSERT_TRUE(shutdown_cb_called);
++ ASSERT_FALSE(channel_->isOpen());
++
++ if (ios_pool_) {
++ ASSERT_TRUE(ios_pool_->isRunning());
++
++ // Pause the thread pool.
++ ASSERT_NO_THROW_LOG(ios_pool_->pause());
++ ASSERT_TRUE(ios_pool_->isPaused());
++ }
++
++ // Save how many echos sent and replies received during the first pass.
++ auto first_pass_echo_count = echos_sent_.size();
++ auto first_pass_reply_count = replies_received_.size();
++
++ // Should have sent some but not all.
++ EXPECT_LE(first_pass_echo_count, num_targets);
++
++ // SECOND PASS
++
++ // Modify the test done callback to check test completion criteria.
++ // It returns true if we have sent out all the echos and received
++ // all the replies.
++ test_done_cb_ = [this, &first_pass_reply_count]() {
++ return (send_queue_.empty() && (replies_received_.size() > first_pass_reply_count));
++ };
++
++ // Fill the send queue with target addresses to ping.
++ for (auto i = 0; i < (num_targets / 2); ++i) {
++ send_queue_.push(target);
++ target = IOAddress::increase(target);
++ }
++
++ // Resume running the thread pool (if one).
++ if (ios_pool_) {
++ ASSERT_NO_THROW_LOG(ios_pool_->run());
++ ASSERT_TRUE(ios_pool_->isRunning());
++ }
++
++ // Resume reopening the channel and restarting IO operations.
++ ASSERT_NO_THROW_LOG(channel_->open());
++ ASSERT_TRUE(channel_->isOpen());
++ ASSERT_NO_THROW_LOG(channel_->startRead());
++ ASSERT_NO_THROW_LOG(channel_->startSend());
++
++ // Run the main thread's IOService until we complete or timeout.
++ ASSERT_NO_THROW_LOG(runIOService(1000));
++
++ // Stop the thread pool (if one).
++ if (ios_pool_) {
++ ASSERT_NO_THROW_LOG(ios_pool_->stop());
++ ASSERT_TRUE(ios_pool_->isStopped());
++ }
++
++ // Send queue should be empty.
++ EXPECT_TRUE(send_queue_.empty());
++
++ // Should have sent as many echos as we queued.
++ EXPECT_EQ(echos_sent_.size(), num_targets);
++
++ // Should have more replies than we had, but likely not all.
++ EXPECT_GE(replies_received_.size(), first_pass_reply_count);
++}
++
++// Verifies PingChannel open and close operations.
++TEST_F(PingChannelTest, openCloseST) {
++ SKIP_IF(notRoot());
++
++ // Create the channel instance.
++ ASSERT_NO_THROW_LOG(channel_.reset(new TestablePingChannel(
++ test_io_service_,
++ std::bind(&PingChannelTest::nextToSend, this, ph::_1),
++ std::bind(&PingChannelTest::echoSent, this, ph::_1, ph::_2),
++ std::bind(&PingChannelTest::replyReceived, this, ph::_1)
++ )));
++
++ ASSERT_TRUE(channel_);
++
++ ASSERT_TRUE(channel_->getSingleThreaded());
++
++ // Verify it is not open.
++ ASSERT_FALSE(channel_->isOpen());
++
++ EXPECT_FALSE(channel_->getWatchSocket());
++ EXPECT_EQ(channel_->getRegisteredWriteFd(), -1);
++ EXPECT_EQ(channel_->getRegisteredReadFd(), -1);
++
++ // Verify that invoking close is harmless.
++ ASSERT_NO_THROW_LOG(channel_->close());
++
++ // Attempt to open the channel.
++ ASSERT_NO_THROW_LOG(channel_->open());
++
++ // PingChannel::open() is synchronous and while it has a callback
++ // it should never be invoked. Run the service to make sure.
++ ASSERT_NO_THROW_LOG(runIOService(1000));
++
++ // Verify the channel is open.
++ ASSERT_TRUE(channel_->isOpen());
++
++ // Verify the WatchSocket was created and that its fd and that of the
++ // PingSocket are both registered with IfaceMgr.
++ ASSERT_TRUE(channel_->getWatchSocket());
++ int registered_write_fd = channel_->getRegisteredWriteFd();
++ EXPECT_EQ(registered_write_fd, channel_->getWatchSocket()->getSelectFd());
++ EXPECT_TRUE(IfaceMgr::instance().isExternalSocket(registered_write_fd));
++ int registered_read_fd = channel_->getRegisteredReadFd();
++ EXPECT_EQ(registered_read_fd, channel_->getPingSocket()->getNative());
++ EXPECT_TRUE(IfaceMgr::instance().isExternalSocket(registered_read_fd));
++
++ // A subsequent open should be harmless.
++ ASSERT_NO_THROW_LOG(channel_->open());
++
++ // Closing the socket should work.
++ ASSERT_NO_THROW_LOG(channel_->close());
++
++ // Verify watch socket is gone, registered fds are reset, and prior
++ // registered fds are no longer registered.
++ EXPECT_FALSE(channel_->getWatchSocket());
++ EXPECT_EQ(channel_->getRegisteredWriteFd(), -1);
++ EXPECT_FALSE(IfaceMgr::instance().isExternalSocket(registered_write_fd));
++ EXPECT_EQ(channel_->getRegisteredReadFd(), -1);
++ EXPECT_FALSE(IfaceMgr::instance().isExternalSocket(registered_read_fd));
++
++ // Verify it is not open.
++ ASSERT_FALSE(channel_->isOpen());
++}
++
++// Verifies PingChannel open and close operations.
++TEST_F(PingChannelTest, openCloseMT) {
++ SKIP_IF(notRoot());
++ MultiThreadingTest mt;
++
++ // Create the channel instance.
++ ASSERT_NO_THROW_LOG(channel_.reset(new TestablePingChannel(
++ test_io_service_,
++ std::bind(&PingChannelTest::nextToSend, this, ph::_1),
++ std::bind(&PingChannelTest::echoSent, this, ph::_1, ph::_2),
++ std::bind(&PingChannelTest::replyReceived, this, ph::_1)
++ )));
++
++ ASSERT_TRUE(channel_);
++
++ ASSERT_FALSE(channel_->getSingleThreaded());
++
++ // Verify it is not open.
++ ASSERT_FALSE(channel_->isOpen());
++
++ // Verify that invoking close is harmless.
++ ASSERT_NO_THROW_LOG(channel_->close());
++
++ // Attempt to open the channel.
++ ASSERT_NO_THROW_LOG(channel_->open());
++
++ // PingChannel::open() is synchronous and while it has a callback
++ // it should never be invoked. Run the service to make sure.
++ ASSERT_NO_THROW_LOG(runIOService(1000));
++
++ // Verify the channel is open.
++ ASSERT_TRUE(channel_->isOpen());
++
++ // Verify that single-threaded members are not set.
++ EXPECT_FALSE(channel_->getWatchSocket());
++ EXPECT_EQ(channel_->getRegisteredWriteFd(), -1);
++ EXPECT_EQ(channel_->getRegisteredReadFd(), -1);
++
++ // A subsequent open should be harmless.
++ ASSERT_NO_THROW_LOG(channel_->open());
++
++ // Closing the socket should work.
++ ASSERT_NO_THROW_LOG(channel_->close());
++
++ // Verify it is not open.
++ ASSERT_FALSE(channel_->isOpen());
++}
++
++// Verifies that a PingChannel can perpetuate sending requests and receiving
++// replies when driven by a single-threaded IOService.
++TEST_F(PingChannelTest, sendReceiveST) {
++ sendReceiveTest(0);
++}
++
++// Verifies that a PingChannel can perpetuate sending requests and receiving
++// replies when driven by a multi-threaded IOServiceThreadPool 3 threads
++TEST_F(PingChannelTest, sendReceiveMT) {
++ // Use a thread pool with 3 threads.
++ sendReceiveTest(3);
++}
++
++// Verifies that an exception throw from asyncRead triggers graceful channel
++// shutdown and that operations can be resumed with a single-threaded channel.
++TEST_F(PingChannelTest, readExceptionErrorST) {
++ ioErrorTest(
++ [this]() {
++ channel_->throw_on_read_number_ = 5;
++ }, 0);
++}
++
++// Verifies that an exception throw from asyncRead triggers graceful channel
++// shutdown and that operations can be resumed with a multi-threaded channel.
++TEST_F(PingChannelTest, readExceptionErrorMT) {
++ // Use a thread pool with 3 threads.
++ ioErrorTest(
++ [this]() {
++ channel_->throw_on_read_number_ = 5;
++ }, 3, 20);
++}
++
++// Verifies that a fatal error code passed into socketReadCallback triggers graceful channel
++// shutdown and that operations can be resumed with a single-threaded channel.
++TEST_F(PingChannelTest, readFatalErrorST) {
++ ioErrorTest(
++ [this]() {
++ channel_->ec_on_read_number_ = 3;
++ // See boost/asio/error.hpp for error codes
++ channel_->read_error_ec_ = make_error_code(fault);
++ }, 0);
++}
++
++// Verifies that a fatal error code passed into socketReadCallback triggers graceful channel
++// shutdown and that operations can be resumed with a single-threaded channel.
++TEST_F(PingChannelTest, readFatalErrorMT) {
++ ioErrorTest(
++ [this]() {
++ channel_->ec_on_read_number_ = 3;
++ // See boost/asio/error.hpp for error codes
++ channel_->read_error_ec_ = make_error_code(fault);
++ }, 4);
++}
++
++// Verifies that a non-fatal, EWOULDBLOCK error passed into socketReadCallback does
++// not disrupt reading for a single-threaded channel.
++TEST_F(PingChannelTest, readAgainErrorST) {
++ sendReceiveTest(0, 10,
++ [this]() {
++ channel_->ec_on_read_number_ = 4;
++ // See boost/asio/error.hpp for error codes
++ channel_->read_error_ec_ = make_error_code(would_block);
++ });
++}
++
++// Verifies that a non-fatal, EWOULDBLOCK error passed into socketReadCallback does
++// not disrupt reading for a multi-threaded channel.
++TEST_F(PingChannelTest, readAgainErrorMT) {
++ sendReceiveTest(3, 10,
++ [this]() {
++ channel_->ec_on_read_number_ = 4;
++ // See boost/asio/error.hpp for error codes
++ channel_->read_error_ec_ = make_error_code(would_block);
++ });
++}
++
++// Verifies that an exception throw from asyncRead triggers graceful channel
++// shutdown and that operations can be resumed with a single-threaded channel.
++TEST_F(PingChannelTest, writeExceptionErrorST) {
++ ioErrorTest(
++ [this]() {
++ channel_->throw_on_write_number_ = 5;
++ }, 0);
++}
++
++// Verifies that an exception throw from asyncRead triggers graceful channel
++// shutdown and that operations can be resumed with a multi-threaded channel.
++TEST_F(PingChannelTest, writeExceptionErrorMT) {
++ // Use a thread pool with 3 threads.
++ ioErrorTest(
++ [this]() {
++ channel_->throw_on_write_number_ = 5;
++ }, 3);
++}
++
++// Verifies that a fatal error code passed into socketReadCallback triggers graceful channel
++// shutdown and that operations can be resumed with a single-threaded channel.
++TEST_F(PingChannelTest, writeFatalErrorST) {
++ ioErrorTest(
++ [this]() {
++ channel_->ec_on_write_number_ = 3;
++ // See boost/asio/error.hpp for error codes
++ channel_->write_error_ec_ = make_error_code(fault);
++ }, 0);
++}
++
++// Verifies that a fatal error code passed into socketReadCallback triggers graceful channel
++// shutdown and that operations can be resumed with a single-threaded channel.
++TEST_F(PingChannelTest, writeFatalErrorMT) {
++ ioErrorTest(
++ [this]() {
++ channel_->ec_on_write_number_ = 3;
++ // See boost/asio/error.hpp for error codes
++ channel_->write_error_ec_ = make_error_code(fault);
++ }, 4);
++}
++
++// Verifies that a non-fatal, EWOULDBLOCK error passed into socketWriteCallback does
++// not disrupt writing for a single-threaded channel.
++TEST_F(PingChannelTest, writeAgainErrorST) {
++ sendReceiveTest(0, 10,
++ [this]() {
++ channel_->ec_on_write_number_ = 6;
++ // See boost/asio/error.hpp for error codes
++ channel_->write_error_ec_ = make_error_code(would_block);
++ });
++}
++
++// Verifies that a non-fatal, EWOULDBLOCK error passed into socketWriteCallback
++// does not disrupt writing for a multi-threaded channel.
++TEST_F(PingChannelTest, writeAgainErrorMT) {
++ sendReceiveTest(3, 10,
++ [this]() {
++ channel_->ec_on_write_number_ = 6;
++ // See boost/asio/error.hpp for error codes
++ channel_->write_error_ec_ = make_error_code(would_block);
++ });
++}
++
++// Verify the recoverable write errors do not disrupt writing for a
++// single-threaded channel.
++TEST_F(PingChannelTest, writeSendFailedErrorST) {
++ SKIP_IF(notRoot());
++
++ std::list<boost::asio::error::basic_errors> errors = {
++ boost::asio::error::network_unreachable,
++ boost::asio::error::host_unreachable,
++ boost::asio::error::network_down,
++ boost::asio::error::no_buffer_space,
++ boost::asio::error::access_denied
++ };
++
++ for (auto const& error : errors) {
++ sendReceiveTest(0, 10,
++ [this, error]() {
++ channel_->ec_on_write_number_ = 6;
++ // See boost/asio/error.hpp for error codes
++ channel_->write_error_ec_ = make_error_code(error);
++ });
++
++ // Sanity check, we should have sent one less than we targeted.
++ EXPECT_EQ(echos_sent_.size(), 9);
++ }
++}
++
++// Verify the recoverable write errors do not disrupt writing for a
++// multi-threaded channel.
++TEST_F(PingChannelTest, writeSendFailedErrorMT) {
++ SKIP_IF(notRoot());
++
++ std::list<boost::asio::error::basic_errors> errors = {
++ boost::asio::error::network_unreachable,
++ boost::asio::error::host_unreachable,
++ boost::asio::error::network_down,
++ boost::asio::error::no_buffer_space,
++ boost::asio::error::access_denied
++ };
++
++ for (auto const& error : errors) {
++ sendReceiveTest(3, 10,
++ [this, error]() {
++ channel_->ec_on_write_number_ = 6;
++ // See boost/asio/error.hpp for error codes
++ channel_->write_error_ec_ = make_error_code(error);
++ });
++
++ // Sanity check, we should have sent one less than we targeted.
++ EXPECT_EQ(echos_sent_.size(), 9);
++ }
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/ping_check_config_unittests.cc b/src/hooks/dhcp/ping_check/tests/ping_check_config_unittests.cc
+new file mode 100644
+index 0000000000..a831a0efab
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/ping_check_config_unittests.cc
+@@ -0,0 +1,287 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the PingCheckConfig class.
++
++#include <config.h>
++#include <ping_check_config.h>
++#include <testutils/gtest_utils.h>
++
++#include <gtest/gtest.h>
++#include <list>
++
++using namespace std;
++using namespace isc;
++using namespace isc::data;
++using namespace isc::ping_check;
++
++namespace {
++
++// Verifies PingCheckConfig constructors and accessors.
++TEST(PingCheckConfigTest, basics) {
++ PingCheckConfig config;
++
++ // Verify initial values.
++ EXPECT_TRUE(config.getEnablePingCheck());
++ EXPECT_EQ(1, config.getMinPingRequests());
++ EXPECT_EQ(100, config.getReplyTimeout());
++ EXPECT_EQ(60, config.getPingClttSecs());
++ EXPECT_EQ(0, config.getPingChannelThreads());
++
++ // Verify accessors.
++ EXPECT_NO_THROW_LOG(config.setEnablePingCheck(false));
++ EXPECT_FALSE(config.getEnablePingCheck());
++
++ EXPECT_NO_THROW_LOG(config.setMinPingRequests(4));
++ EXPECT_EQ(4, config.getMinPingRequests());
++
++ EXPECT_NO_THROW_LOG(config.setReplyTimeout(250));
++ EXPECT_EQ(250, config.getReplyTimeout());
++
++ EXPECT_NO_THROW_LOG(config.setPingClttSecs(120));
++ EXPECT_EQ(120, config.getPingClttSecs());
++
++ EXPECT_NO_THROW_LOG(config.setPingChannelThreads(6));
++ EXPECT_EQ(6, config.getPingChannelThreads());
++
++ // Verify copy construction.
++ PingCheckConfig config2(config);
++ EXPECT_FALSE(config2.getEnablePingCheck());
++ EXPECT_EQ(4, config2.getMinPingRequests());
++ EXPECT_EQ(250, config2.getReplyTimeout());
++ EXPECT_EQ(120, config2.getPingClttSecs());
++ EXPECT_EQ(6, config2.getPingChannelThreads());
++}
++
++// Exercises PingCheckConfig parameter parsing with valid configuration
++// permutations.
++TEST(PingCheckConfigTest, parseValidScenarios) {
++ // Describes a test scenario.
++ struct Scenario {
++ int line_; // Scenario line number
++ std::string json_; // JSON configuration to parse
++ bool exp_enable_ping_check_; // Expected value for enable-ping-check
++ uint32_t exp_min_ping_requests_; // Expected value for min-ping-requests
++ uint32_t exp_reply_timeout_; // Expected value for reply-timeout
++ uint32_t exp_ping_cltt_secs_; // Expected value for ping-cltt-secs
++ size_t exp_num_threads_; // Expected value for ping-channel-threads
++ };
++
++ // List of test scenarios to run.
++ list<Scenario> scenarios = {
++ {
++ // Empty map
++ __LINE__,
++ R"({ })",
++ true, 1, 100, 60, 0
++ },
++ {
++ // Only enable-ping-check",
++ __LINE__,
++ R"({ "enable-ping-check" : false })",
++ false, 1, 100, 60, 0
++ },
++ {
++ // Only min-ping-requests",
++ __LINE__,
++ R"({ "min-ping-requests" : 3 })",
++ true, 3, 100, 60, 0
++ },
++ {
++ // Only reply-timeout",
++ __LINE__,
++ R"({ "reply-timeout" : 250 })",
++ true, 1, 250, 60, 0
++ },
++ {
++ // Only ping-cltt-secs",
++ __LINE__,
++ R"({ "ping-cltt-secs" : 77 })",
++ true, 1, 100, 77, 0
++ },
++ {
++ // Only ping-channel-threads",
++ __LINE__,
++ R"({ "ping-channel-threads" : 5 })",
++ true, 1, 100, 60, 5
++ },
++ {
++ // All parameters",
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 2,
++ "reply-timeout" : 375,
++ "ping-cltt-secs" : 120,
++ "ping-channel-threads" : 6
++ })",
++ false, 2, 375, 120, 6
++ },
++ };
++
++ // Iterate over the scenarios.
++ for (auto const& scenario : scenarios) {
++ stringstream oss;
++ oss << "scenario at line: " << scenario.line_;
++ SCOPED_TRACE(oss.str());
++
++ // Convert JSON texts to Element map.
++ ConstElementPtr json_elements;
++ ASSERT_NO_THROW_LOG(json_elements = Element::fromJSON(scenario.json_));
++
++ // Parsing elements should succeed.
++ PingCheckConfig config;
++ ASSERT_NO_THROW_LOG(config.parse(json_elements));
++
++ // Verify expected values.
++ EXPECT_EQ(scenario.exp_enable_ping_check_, config.getEnablePingCheck());
++ EXPECT_EQ(scenario.exp_min_ping_requests_, config.getMinPingRequests());
++ EXPECT_EQ(scenario.exp_reply_timeout_, config.getReplyTimeout());
++ EXPECT_EQ(scenario.exp_ping_cltt_secs_, config.getPingClttSecs());
++ EXPECT_EQ(scenario.exp_num_threads_, config.getPingChannelThreads());
++ }
++}
++
++// Exercises PingCheckConfig parameter parsing with invalid configuration
++// permutations.
++TEST(PingCheckConfigTest, parseInvalidScenarios) {
++ // Describes a test scenario.
++ struct Scenario {
++ int line_; // Scenario line number
++ string json_; // JSON configuration to parse
++ string exp_message_; // Expected exception message
++ };
++
++ // List of test scenarios to run. Most scenario supply
++ // all valid parameters except one in error. This allows
++ // us to verify that no values are changed if any are in error.
++ list<Scenario> scenarios = {
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 3,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 4,
++ "bogus" : false
++ })",
++ "spurious 'bogus' parameter"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : "not bool",
++ "min-ping-requests" : 3,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 4
++ })",
++ "'enable-ping-check' parameter is not a boolean"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 0,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 4
++ })",
++ "invalid min-ping-requests: '0', must be greater than 0"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : -2,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 4
++ })",
++ "invalid min-ping-requests: '-2', must be greater than 0"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 1,
++ "reply-timeout" : 0,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 4
++ })",
++ "invalid reply-timeout: '0', must be greater than 0"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 1,
++ "reply-timeout" : -77,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 4
++ })",
++ "invalid reply-timeout: '-77', must be greater than 0"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 1,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : -3,
++ "ping-channel-threads" : 4
++ })",
++ "invalid ping-cltt-secs: '-3', cannot be less than 0"
++ },
++ {
++ __LINE__,
++ R"(
++ {
++ "enable-ping-check" : false,
++ "min-ping-requests" : 1,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : -1
++ })",
++ "invalid ping-channel-threads: '-1', cannot be less than 0"
++ }
++ };
++
++ // Iterate over the scenarios.
++ PingCheckConfig default_config;
++ for (auto const& scenario : scenarios) {
++ stringstream oss;
++ oss << "scenario at line: " << scenario.line_;
++ SCOPED_TRACE(oss.str());
++
++ // Convert JSON text to a map of parameters.
++ ConstElementPtr json_elements;
++ ASSERT_NO_THROW_LOG(json_elements = Element::fromJSON(scenario.json_));
++
++ // Parsing parameters should throw.
++ PingCheckConfig config;
++ ASSERT_THROW_MSG(config.parse(json_elements), dhcp::DhcpConfigError,
++ scenario.exp_message_);
++
++ // Original values should be intact.
++ EXPECT_EQ(default_config.getEnablePingCheck(), config.getEnablePingCheck());
++ EXPECT_EQ(default_config.getMinPingRequests(), config.getMinPingRequests());
++ EXPECT_EQ(default_config.getReplyTimeout(), config.getReplyTimeout());
++ EXPECT_EQ(default_config.getPingClttSecs(), config.getPingClttSecs());
++ EXPECT_EQ(default_config.getPingChannelThreads(), config.getPingChannelThreads());
++ }
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/ping_check_mgr_unittests.cc b/src/hooks/dhcp/ping_check/tests/ping_check_mgr_unittests.cc
+new file mode 100644
+index 0000000000..ded13b085c
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/ping_check_mgr_unittests.cc
+@@ -0,0 +1,1878 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the PingCheckMgr class.
++#include <config.h>
++
++#include <ping_check_mgr.h>
++#include <ping_test_utils.h>
++#include <cc/data.h>
++#include <dhcp/pkt4.h>
++#include <dhcpsrv/cfgmgr.h>
++#include <dhcpsrv/lease.h>
++#include <hooks/hooks_manager.h>
++#include <util/chrono_time_utils.h>
++#include <testutils/gtest_utils.h>
++#include <testutils/multi_threading_utils.h>
++
++#include <gtest/gtest.h>
++#include <mutex>
++#include <chrono>
++
++using namespace std;
++using namespace isc;
++using namespace isc::data;
++using namespace isc::dhcp;
++using namespace isc::util;
++using namespace isc::asiolink;
++using namespace isc::ping_check;
++using namespace isc::hooks;
++using namespace isc::test;
++using namespace std::chrono;
++using namespace boost::asio::error;
++
++namespace ph = std::placeholders;
++
++namespace {
++
++// Sanity check the basics for production class, PingCheckMgr, single-threaded mode.
++TEST(PingCheckMgr, basicsST) {
++ SKIP_IF(IOServiceTest::notRoot());
++ MultiThreadingMgr::instance().setMode(false);
++
++ // Create a multi-threaded manager.
++ IOServicePtr main_ios(new IOService());
++ PingCheckMgrPtr mgr;
++ ASSERT_NO_THROW_LOG(mgr.reset(new PingCheckMgr(0)));
++ ASSERT_TRUE(mgr);
++ mgr->setIOService(main_ios);
++
++ // Sanity check the global configuration. More robust tests are done
++ // elsewhere.
++ auto& config = mgr->getGlobalConfig();
++ EXPECT_TRUE(config->getEnablePingCheck());
++ EXPECT_EQ(1, config->getMinPingRequests());
++ EXPECT_EQ(100, config->getReplyTimeout());
++ EXPECT_EQ(60, config->getPingClttSecs());
++ EXPECT_EQ(0, config->getPingChannelThreads());
++
++ // Verify we report as stopped.
++ EXPECT_FALSE(mgr->isRunning());
++ EXPECT_TRUE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Starting it should be OK.
++ ASSERT_NO_THROW_LOG(mgr->start());
++
++ // Verify we report as running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Pausing it should be harmless.
++ ASSERT_NO_THROW_LOG(mgr->pause());
++
++ // Verify we report as running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Resuming it should be harmless.
++ ASSERT_NO_THROW_LOG(mgr->resume());
++
++ // Verify we report as running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Stopping it should be fine
++ ASSERT_NO_THROW_LOG(mgr->stop());
++
++ // Verify we report as stopped.
++ EXPECT_FALSE(mgr->isRunning());
++ EXPECT_TRUE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Re-starting it should be OK.
++ ASSERT_NO_THROW_LOG(mgr->start());
++
++ // Verify we report as running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Calling destructor when its running should be OK.
++ ASSERT_NO_THROW_LOG(mgr.reset());
++
++ main_ios->stopAndPoll();
++}
++
++// Sanity check the basics for production class, PingCheckMgr. Bulk of testing
++// is done with test derivation, TestPingCheckMgr.
++TEST(PingCheckMgr, basicsMT) {
++ SKIP_IF(IOServiceTest::notRoot());
++ MultiThreadingTest mt;
++
++ // Create a multi-threaded manager.
++ IOServicePtr main_ios(new IOService());
++ PingCheckMgrPtr mgr;
++ ASSERT_NO_THROW_LOG(mgr.reset(new PingCheckMgr(3)));
++ ASSERT_TRUE(mgr);
++ mgr->setIOService(main_ios);
++
++ // Sanity check the global configuration. More robust tests are done
++ // elsewhere.
++ auto& config = mgr->getGlobalConfig();
++ EXPECT_TRUE(config->getEnablePingCheck());
++ EXPECT_EQ(1, config->getMinPingRequests());
++ EXPECT_EQ(100, config->getReplyTimeout());
++ EXPECT_EQ(60, config->getPingClttSecs());
++ EXPECT_EQ(3, config->getPingChannelThreads());
++
++ // It should not be running yet.
++ EXPECT_FALSE(mgr->isRunning());
++ EXPECT_TRUE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Starting it should be OK.
++ ASSERT_NO_THROW_LOG(mgr->start());
++
++ // Verify it's running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Pausing it should be fine.
++ ASSERT_NO_THROW_LOG(mgr->pause());
++
++ // Verify it's paused.
++ EXPECT_FALSE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_TRUE(mgr->isPaused());
++
++ // Resuming it should be fine.
++ ASSERT_NO_THROW_LOG(mgr->resume());
++
++ // Verify it's running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Stopping it should be fine
++ ASSERT_NO_THROW_LOG(mgr->stop());
++
++ // It should not be running.
++ EXPECT_FALSE(mgr->isRunning());
++ EXPECT_TRUE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Re-starting it should be OK.
++ ASSERT_NO_THROW_LOG(mgr->start());
++
++ // Verify it's running.
++ EXPECT_TRUE(mgr->isRunning());
++ EXPECT_FALSE(mgr->isStopped());
++ EXPECT_FALSE(mgr->isPaused());
++
++ // Calling destructor when its running should be OK.
++ ASSERT_NO_THROW_LOG(mgr.reset());
++}
++
++// Verify basic behavior of PingCheckMgr::configure().
++TEST(PingCheckMgr, configure) {
++ // Create a manager.
++ IOServicePtr main_ios(new IOService());
++ PingCheckMgrPtr mgr;
++ ASSERT_NO_THROW_LOG(mgr.reset(new PingCheckMgr()));
++ ASSERT_TRUE(mgr);
++
++ // Verify initial global configuration.
++ auto& default_config = mgr->getGlobalConfig();
++ EXPECT_TRUE(default_config->getEnablePingCheck());
++ EXPECT_EQ(1, default_config->getMinPingRequests());
++ EXPECT_EQ(100, default_config->getReplyTimeout());
++ EXPECT_EQ(60, default_config->getPingClttSecs());
++ EXPECT_EQ(0, default_config->getPingChannelThreads());
++
++ //Create a valid configuration.
++ std::string valid_json_cfg =
++ R"({
++ "enable-ping-check" : false,
++ "min-ping-requests" : 2,
++ "reply-timeout" : 250,
++ "ping-cltt-secs" : 90,
++ "ping-channel-threads" : 3
++ })";
++
++ auto parameters = Element::fromJSON(valid_json_cfg);
++
++ // Parse it.
++ ASSERT_NO_THROW_LOG(mgr->configure(parameters));
++
++ // Verify updated global configuration.
++ auto& config = mgr->getGlobalConfig();
++ ASSERT_TRUE(config);
++ EXPECT_FALSE(config->getEnablePingCheck());
++ EXPECT_EQ(2, config->getMinPingRequests());
++ EXPECT_EQ(250, config->getReplyTimeout());
++ EXPECT_EQ(90, config->getPingClttSecs());
++ EXPECT_EQ(3, config->getPingChannelThreads());
++
++ // Create an invalid configuration.
++ std::string invalid_json_cfg =
++ R"({
++ "enable-ping-check" : true,
++ "min-ping-requests" : 4,
++ "reply-timeout" : 500,
++ "ping-cltt-secs" : 45,
++ "ping-channel-threads" : 6,
++ "bogus" : 0
++ })";
++
++ parameters = Element::fromJSON(invalid_json_cfg);
++
++ // Parsing it should throw.
++ ASSERT_THROW_MSG(mgr->configure(parameters), DhcpConfigError, "spurious 'bogus' parameter");
++
++ // Verify configuration values were left unchanged.
++ auto& final_config = mgr->getGlobalConfig();
++ ASSERT_TRUE(final_config);
++ EXPECT_EQ(final_config->getEnablePingCheck(), config->getEnablePingCheck());
++ EXPECT_EQ(final_config->getMinPingRequests(), config->getMinPingRequests());
++ EXPECT_EQ(final_config->getReplyTimeout(), config->getReplyTimeout());
++ EXPECT_EQ(final_config->getPingClttSecs(), config->getPingClttSecs());
++ EXPECT_EQ(final_config->getPingChannelThreads(), config->getPingChannelThreads());
++}
++
++/// @brief Defines a callback to invoke at the bottom of sendCompleted()
++typedef std::function<void(const ICMPMsgPtr& echo, bool send_failed)> SendCompletedCallback;
++
++/// @brief Defines a callback to invoke at the bottom of replyReceived()
++typedef std::function<void(const ICMPMsgPtr& reply)> ReplyReceivedCallback;
++
++/// @brief Testable derivation of PingCheckMgr
++///
++/// Uses a TestablePingChannel to facilitate more robust testing.
++class TestablePingCheckMgr : public PingCheckMgr {
++public:
++ /// @brief Constructor.
++ ///
++ /// @param num_threads number of threads to use in the thread pool (0 means follow
++ /// core thread pool size)
++ /// @param min_echos minimum number of ECHO REQUESTs sent without replies
++ /// received required to declare an address free to offer. Defaults to 1,
++ /// must be greater than zero.
++ /// @param reply_timeout maximum number of milliseconds to wait for an
++ /// ECHO REPLY after an ECHO REQUEST has been sent. Defaults to 100,
++ TestablePingCheckMgr(uint32_t num_threads, uint32_t min_echos = 1,
++ uint32_t reply_timeout = 100)
++ : PingCheckMgr(num_threads, min_echos, reply_timeout),
++ post_send_completed_cb_(SendCompletedCallback()),
++ post_reply_received_cb_(ReplyReceivedCallback()) {
++ }
++
++ /// @brief Destructor.
++ virtual ~TestablePingCheckMgr() {
++ post_send_completed_cb_ = SendCompletedCallback();
++ post_reply_received_cb_ = ReplyReceivedCallback();
++ if (getIOService()) {
++ getIOService()->stopAndPoll();
++ }
++ }
++
++ /// @brief Fetch the current channel instance.
++ ///
++ /// @return pointer to the TestablePingChannel instance (or an empty pointer).
++ TestablePingChannelPtr getChannel() {
++ return (boost::dynamic_pointer_cast<TestablePingChannel>(channel_));
++ }
++
++ /// @brief Fetches the manager's context store.
++ ///
++ /// @return Pointer to the PingContextStore.
++ PingContextStorePtr getStore() {
++ return (store_);
++ }
++
++ /// @brief Fetches the expiration timer's current interval (in milliseconds).
++ ///
++ /// @return current interval as long or 0L if the timer is not currently
++ /// running or does not exist.
++ long getExpirationTimerInterval() {
++ if (expiration_timer_) {
++ return (expiration_timer_->getInterval());
++ }
++
++ return (0);
++ }
++
++protected:
++ /// @brief Creates a TestablePingChannel instance.
++ ///
++ /// This override the base case creator.
++ ///
++ /// @param io_service IOService that will drive the channel.
++ /// @return pointer to the newly created channel.
++ virtual PingChannelPtr createChannel(asiolink::IOServicePtr io_service) {
++ return (TestablePingChannelPtr(
++ new TestablePingChannel(io_service,
++ std::bind(&PingCheckMgr::nextToSend, this, ph::_1),
++ std::bind(&TestablePingCheckMgr::sendCompleted,
++ this, ph::_1, ph::_2),
++ std::bind(&TestablePingCheckMgr::replyReceived, this, ph::_1),
++ std::bind(&PingCheckMgr::channelShutdown, this))));
++ }
++
++public:
++ /// @brief Fetches the current size of the parking lot.
++ ///
++ /// @return size_t containing the number of entries parked.
++ size_t parkingLotSize() const {
++ auto const& parking_lot = ServerHooks::getServerHooks().getParkingLotPtr("lease4_offer");
++ return (parking_lot->size());
++ }
++
++ /// @brief Callback passed to PingChannel to invoke when an ECHO REQUEST
++ /// send has completed.
++ ///
++ /// -# Invokes the base class implementation
++ /// -# Invokes an optional callback
++ ///
++ /// @param echo ICMP echo message that is sent.
++ /// @param send_failed True if the send completed with a non-fatal error,
++ /// false otherwise.
++ virtual void sendCompleted(const ICMPMsgPtr& echo, bool send_failed) {
++ // Call the production callback.
++ PingCheckMgr::sendCompleted(echo, send_failed);
++
++ // Invoke the post check, if one.
++ if (post_send_completed_cb_) {
++ (post_send_completed_cb_)(echo, send_failed);
++ }
++ }
++
++ /// @brief Callback invoked by the channel to process received ICMP messages.
++ ///
++ /// -# Invokes the base class implementation
++ /// -# Pauses the test IOService thread and returns if the parking lot is empty
++ /// -# Invokes an option callback passing in the reply received
++ ///
++ /// @param reply pointer to the ICMP message received.
++ virtual void replyReceived(const ICMPMsgPtr& reply) {
++ if (reply->getType() == ICMPMsg::ECHO_REQUEST) {
++ return;
++ }
++
++ // If we're routing loopback messages, look up the original address based
++ // on the sequence number and use it as the reply's source address.
++ if (getChannel()->route_loopback_) {
++ IOAddress address = getChannel()->loopback_map_.find(reply->getSequence());
++ if (address != IOAddress::IPV4_ZERO_ADDRESS()) {
++ reply->setSource(address);
++ }
++ }
++
++ // Call the production callback.
++ PingCheckMgr::replyReceived(reply);
++
++ // Invoke the post check, if one.
++ if (post_reply_received_cb_) {
++ (post_reply_received_cb_)(reply);
++ }
++ }
++
++ /// @brief Fetches the thread pool (if it exists).
++ ///
++ /// @return pointer to theIoServiceThreadPool. Will be empty
++ /// in ST mode or if the manager has not been started.
++ asiolink::IoServiceThreadPoolPtr getThreadPool() {
++ return (thread_pool_);
++ }
++
++ /// @brief Sets the network_state object.
++ ///
++ /// @param network_state pointer to a NetworkState instance.
++ void setNetworkState(NetworkStatePtr network_state) {
++ network_state_ = network_state;
++ }
++
++ /// @brief Callback to invoke at the bottom of sendCompleted().
++ SendCompletedCallback post_send_completed_cb_;
++
++ /// @brief Callback to invoke at the bottom of replyReceived().
++ ReplyReceivedCallback post_reply_received_cb_;
++};
++
++/// @brief Defines a shared pointer to a PingCheckMgr.
++typedef boost::shared_ptr<TestablePingCheckMgr> TestablePingCheckMgrPtr;
++
++/// @brief Holds a lease and its associated query.
++struct LeaseQueryPair {
++public:
++ /// @brief Constructor.
++ ///
++ /// @param lease pointer to the lease.
++ /// @param query pointer to the query.
++ LeaseQueryPair(Lease4Ptr lease, Pkt4Ptr query) : lease_(lease), query_(query) {
++ };
++
++ /// @brief Pointer to the lease.
++ Lease4Ptr lease_;
++
++ /// @brief Pointer to the query.
++ Pkt4Ptr query_;
++};
++
++/// @brief Container of leases and their associated queries.
++typedef std::vector<LeaseQueryPair> LeaseQueryPairs;
++
++/// @brief Test fixture for exercising PingCheckMgr.
++///
++/// Uses a TestablePingCheckMgr instance for all tests and
++/// provides numerous helper functions.
++class PingCheckMgrTest : public IOServiceTest {
++public:
++ /// @brief Constructor.
++ PingCheckMgrTest() : mgr_(), lease_query_pairs_(), mutex_(new mutex()),
++ test_start_time_(PingContext::now()), unparked_(0) {
++ MultiThreadingMgr::instance().setMode(false);
++ };
++
++ /// @brief Destructor.
++ virtual ~PingCheckMgrTest() {
++ test_timer_.cancel();
++ test_io_service_->stopAndPoll();
++ MultiThreadingMgr::instance().setMode(false);
++ }
++
++ /// @brief Pretest setup.
++ ///
++ /// Registers the hook point and creates its parking lot.
++ virtual void SetUp() {
++ HooksManager::registerHook("lease4_offer");
++ parking_lot_ = boost::make_shared<ParkingLotHandle>(
++ ServerHooks::getServerHooks().getParkingLotPtr("lease4_offer"));
++ }
++
++ /// @brief Ensure we stop cleanly.
++ virtual void TearDown() {
++ if (mgr_) {
++ mgr_->stop();
++ }
++
++ HooksManager::clearParkingLots();
++ }
++
++ /// @brief Creates the test's manager instance.
++ ///
++ /// @param num_threads number of threads in the thread pool.
++ /// @param min_echos minimum number of echos per ping check.
++ /// @param reply_timeout reply timeout per ping.
++ /// @param start_and_pause when false, the manager is only created,
++ /// when true it is created, started and then paused. This allows
++ /// manipulation of context store contents while the threads are doing
++ /// no work.
++ void createMgr(uint32_t num_threads,
++ uint32_t min_echos = 1,
++ uint32_t reply_timeout = 100,
++ bool start_and_pause = false) {
++ ASSERT_NO_THROW_LOG(
++ mgr_.reset(new TestablePingCheckMgr(num_threads, min_echos, reply_timeout)));
++ ASSERT_TRUE(mgr_);
++ mgr_->setIOService(test_io_service_);
++
++ if (start_and_pause) {
++ ASSERT_NO_THROW_LOG(mgr_->start());
++
++ if (!MultiThreadingMgr::instance().getMode()) {
++ ASSERT_FALSE(mgr_->getThreadPool());
++ } else {
++ ASSERT_TRUE(mgr_->getThreadPool());
++ ASSERT_NO_THROW_LOG(mgr_->pause());
++ ASSERT_TRUE(mgr_->isPaused());
++ }
++ }
++ }
++
++ /// @brief Add a new lease and query pair to the test's list of lease query pairs.
++ ///
++ /// Creates a bare-bones DHCPv4 lease and DHCPDISCOVER, wraps them in a
++ /// LeaseQueryPair and adds the pair to the end of the test's internal
++ /// list of pairs, lease_query_pairs_.
++ ///
++ /// @param target IOAddress of the lease.
++ /// @param transid transaction id of the query.
++ ///
++ /// @return A copy of the newly created pair.
++ LeaseQueryPair makeLeaseQueryPair(IOAddress target, uint16_t transid) {
++ // Make a lease and query pair
++ Lease4Ptr lease(new Lease4());
++ lease->addr_ = IOAddress(target);
++ Pkt4Ptr query(new Pkt4(DHCPDISCOVER, transid));
++ LeaseQueryPair lqp(lease, query);
++ lease_query_pairs_.push_back(lqp);
++ return (lqp);
++ }
++
++ /// @brief Start ping checks for a given number of targets.
++ ///
++ /// The function first creates and parks the given number of targets, and
++ /// then starts a ping check for each of them. Parking them all first
++ /// establishes the number of ping checks expected to be conducted during
++ /// the test prior to actually starting any of them. This avoids the
++ /// parking lot from becoming empty part way through the test.
++ ///
++ /// It unpark callback lambda increments the unparked_ counter and then
++ /// pushes the unparked lease/query pair to either the list of frees
++ /// or list of declines.
++ ///
++ /// @param num_targets number of target ip addresses to ping check.
++ /// @param start_address starting target address. Defaults to 127.0.0.1.
++ ///
++ /// @return last target address started.
++ IOAddress startTargets(size_t num_targets, IOAddress start_address = IOAddress("127.0.0.1")) {
++ IOAddress target = start_address;
++ for (auto i = 0; i < num_targets; ++i) {
++ auto lqp = makeLeaseQueryPair(IOAddress(target), i+1);
++ HooksManager::park("lease4_offer", lqp.query_,
++ [this, lqp]() {
++ MultiThreadingLock lock(*mutex_);
++ ++unparked_;
++ auto handle = lqp.query_->getCalloutHandle();
++ bool offer_address_in_use;
++ handle->getArgument("offer_address_in_use", offer_address_in_use);
++ offer_address_in_use ? declines_.push_back(lqp) : frees_.push_back(lqp);
++ });
++
++ try {
++ mgr_->startPing(lqp.lease_, lqp.query_, parking_lot_);
++ } catch (const std::exception& ex) {
++ ADD_FAILURE() << "startPing threw: " << ex.what();
++ }
++
++ target = IOAddress::increase(target);
++ }
++
++ return(target);
++ }
++
++ /// @brief Fetches the context, by lease address, from the store for a
++ /// given lease query pair.
++ ///
++ /// @param lqp LeaseQueryPair for which the context is desired.
++ /// @return pointer to the found context or an empty pointer.
++ PingContextPtr getContext(const LeaseQueryPair& lqp) {
++ return (getContext(lqp.lease_->addr_));
++ }
++
++ /// @brief Fetches the context, by lease address, from the store for address.
++ ///
++ /// @param address lease ip address for which the context is desired.
++ /// @return pointer to the found context or an empty pointer.
++ PingContextPtr getContext(const IOAddress& address) {
++ return (mgr_->getStore()->getContextByAddress(address));
++ }
++
++ /// @brief Updates a context in the store.
++ ///
++ /// @param context context to update.
++ void updateContext(PingContextPtr& context) {
++ ASSERT_NO_THROW_LOG(mgr_->getStore()->updateContext(context));
++ }
++
++ /// @brief Tests equality of two timestamps within a given tolerance.
++ ///
++ /// The two time stamps are considered equal if the absolute value of their
++ /// difference is between 0 and the specified tolerance (inclusive).
++ ///
++ /// @param lhs first TimeStamp to compare.
++ /// @param rhs second TimeStamp to compare.
++ /// @param tolerance margin of difference allowed for equality in milliseconds.
++ /// Defaults to 10.
++ ///
++ /// @return True if the time stamps are "equal", false otherwise.
++ bool fuzzyEqual(const TimeStamp& lhs, const TimeStamp& rhs, long tolerance = 10) {
++ auto diff = abs(duration_cast<milliseconds>(lhs - rhs).count());
++ return (diff >= 0 && diff <= tolerance);
++ }
++
++ /// @brief Tests equality of two longs within a given tolerance.
++ ///
++ /// The two values are considered equal if the absolute value of their
++ /// difference is between 0 and the specified tolerance (inclusive).
++ ///
++ /// @param lhs first value to compare.
++ /// @param rhs second value to compare.
++ /// @param tolerance margin of difference allowed for equality in milliseconds.
++ /// Defaults to 10.
++ ///
++ /// @return True if the time values are "equal", false otherwise.
++ bool fuzzyEqual(const long& lhs, const long& rhs, long tolerance = 10) {
++ auto diff = abs(lhs - rhs);
++ return (diff >= 0 && diff <= tolerance);
++ }
++
++ /// @brief Creates an ECHO REQUEST message from a given address.
++ ///
++ /// @param target ip address to use as the echo's destination address.
++ /// @return Pointer to the new message.
++ ICMPMsgPtr makeEchoRequest(const IOAddress& target) {
++ ICMPMsgPtr msg(new ICMPMsg());
++ msg->setType(ICMPMsg::ECHO_REQUEST);
++ msg->setDestination(IOAddress(target));
++ msg->setSource(IOAddress("127.0.0.1"));
++ return (msg);
++ }
++
++ /// @brief Creates an ECHO_REPLY message from a given address.
++ ///
++ /// @param from ip address to use as the reply's source address.
++ /// @return Pointer to the new message.
++ ICMPMsgPtr makeEchoReply(const IOAddress& from) {
++ ICMPMsgPtr msg(new ICMPMsg());
++ msg->setType(ICMPMsg::ECHO_REPLY);
++ msg->setSource(IOAddress(from));
++ msg->setDestination(IOAddress("127.0.0.1"));
++ return (msg);
++ }
++
++ /// @brief Creates an TARGET_UNREACHABLE message from a given address.
++ ///
++ /// @param target ip address to use as the reply's source address.
++ /// @return Pointer to the new message.
++ ICMPMsgPtr makeUnreachable(const IOAddress& target) {
++ // Make the TARGET_UNREACHABLE message first.
++ ICMPMsgPtr msg(new ICMPMsg());
++ msg->setType(ICMPMsg::TARGET_UNREACHABLE);
++ msg->setSource(IOAddress("127.0.0.1"));
++ msg->setDestination(IOAddress("127.0.0.1"));
++
++ // Now embed the ping target's "original" echo into the unreachable
++ // message's payload. This includes the IP header followed by the
++ // ECHO REQUEST. First make the IP header and add it to the payload.
++ // We only set values we care about.
++ struct ip ip_header;
++ memset((void *)(&ip_header), 0x00, sizeof(struct ip));
++ ip_header.ip_v = 4;
++ ip_header.ip_hl = 5; /* shift left twice = 20 */
++ ip_header.ip_len = 48; /* ip_header + echo length */
++ ip_header.ip_dst.s_addr = htonl(target.toUint32());
++ ip_header.ip_src.s_addr = htonl(msg->getSource().toUint32());
++ msg->setPayload((const uint8_t*)(&ip_header), sizeof(struct ip));
++
++ // Now make the ECHO_REQUEST, pack it and add that to the payload.
++ ICMPMsgPtr echo = makeEchoRequest(target);
++ ICMPPtr packed_echo = echo->pack();
++ msg->setPayload((const uint8_t*)(packed_echo.get()), sizeof(struct icmp));
++
++ return (msg);
++ }
++
++ /// @brief Compares a LeaseQueryPair collection to the internal collection
++ /// of pairs created (see makeLeaseQueryPairs()).
++ ///
++ /// @param test_collection Collection of pairs to compare against those in
++ /// the creation collection.
++ void compareLeaseQueryPairs(LeaseQueryPairs& test_collection) {
++ // We should have as many in the test_collection as we have creation
++ // collection.
++ ASSERT_EQ(test_collection.size(), lease_query_pairs_.size());
++
++ // Order is not guaranteed so we sort both lists then compare.
++ std::sort(test_collection.begin(), test_collection.end(),
++ [](LeaseQueryPair const& a, LeaseQueryPair const& b)
++ { return (a.lease_->addr_ < b.lease_->addr_); });
++
++ std::sort(lease_query_pairs_.begin(), lease_query_pairs_.end(),
++ [](LeaseQueryPair const& a, LeaseQueryPair const& b)
++ { return (a.lease_->addr_ < b.lease_->addr_); });
++
++ auto dpi = test_collection.begin();
++ for (auto const& lqpi : lease_query_pairs_) {
++ ASSERT_EQ((*dpi).lease_->addr_, lqpi.lease_->addr_);
++ ++dpi;
++ }
++ }
++
++ /// @brief Exercises the operational basics: create, start, and stop
++ /// for TestablePingCheckMgr.
++ ///
++ /// @param num_threads number of threads in the thread pool.
++ void testOperationalBasics(size_t num_threads) {
++ SKIP_IF(notRoot());
++
++ // Create manager with the given number of threads.
++ ASSERT_NO_THROW_LOG(createMgr(num_threads));
++ ASSERT_TRUE(mgr_);
++
++ // Should not be running.
++ EXPECT_FALSE(mgr_->isRunning());
++ EXPECT_TRUE(mgr_->isStopped());
++ EXPECT_FALSE(mgr_->isPaused());
++
++ // Channel should not yet exist.
++ ASSERT_FALSE(mgr_->getChannel());
++
++ // Start the manager.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++
++ // Thread pool should exist in MT mode only.
++ if (MultiThreadingMgr::instance().getMode()) {
++ ASSERT_TRUE(mgr_->getThreadPool());
++ } else {
++ ASSERT_FALSE(mgr_->getThreadPool());
++ }
++
++ // Should be running.
++ EXPECT_TRUE(mgr_->isRunning());
++ EXPECT_FALSE(mgr_->isStopped());
++ EXPECT_FALSE(mgr_->isPaused());
++
++ // Channel should exist and be open.
++ auto channel = mgr_->getChannel();
++ ASSERT_TRUE(channel);
++ ASSERT_TRUE(channel->isOpen());
++
++ // Context store should exist and be empty.
++ auto store = mgr_->getStore();
++ ASSERT_TRUE(store);
++ auto pings = store->getAll();
++ ASSERT_EQ(0, pings->size());
++
++ // Destruction should be graceful.
++ ASSERT_NO_THROW_LOG(mgr_.reset());
++ }
++
++ /// @brief Verifies that startPing() creates a new context in the store and
++ /// it can be fetched with the nextToSend() callback.
++ void testStartPing() {
++ SKIP_IF(notRoot());
++
++ // Create manager with thread-pool size of 3, min_echos 2, reply_timeout 250 ms.
++ // ST mode should ingore requested thread number.
++ ASSERT_NO_THROW_LOG(createMgr(3, 2, 250));
++ ASSERT_TRUE(mgr_);
++
++ // Make a lease and query pair
++ auto lqp1 = makeLeaseQueryPair(IOAddress("127.0.0.101"), 101);
++
++ // Channel isn't open, startPing should throw.
++ ASSERT_THROW_MSG(mgr_->startPing(lqp1.lease_, lqp1.query_, parking_lot_), InvalidOperation,
++ "PingCheckMgr::startPing() - channel isn't open");
++
++ // Start the manager. This will open the channel.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++ ASSERT_TRUE(mgr_->isRunning());
++
++ if (mgr_->getThreadPool()) {
++ // Pause the manager so startPing() will succeed but no events will occur.
++ // This should let us add contexts that sit in WAITING_TO_SEND state.
++ ASSERT_NO_THROW_LOG(mgr_->pause());
++ ASSERT_TRUE(mgr_->isPaused());
++ }
++
++ // Call startPing() again. It should work.
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp1.lease_, lqp1.query_, parking_lot_));
++
++ // Calling startPing() on the same lease should fail, duplicates not allowed.
++ ASSERT_THROW_MSG(mgr_->startPing(lqp1.lease_, lqp1.query_, parking_lot_), DuplicateContext,
++ "PingContextStore::addContex: context already exists for: 127.0.0.101");
++
++ // Our context should be present.
++ auto const& store = mgr_->getStore();
++ auto pings = store->getAll();
++ ASSERT_EQ(1, pings->size());
++ PingContextPtr context1;
++ ASSERT_NO_THROW_LOG(context1 = store->getContextByAddress(lqp1.lease_->addr_));
++ ASSERT_TRUE(context1);
++
++ // Verify the context's state.
++ EXPECT_EQ(2, context1->getMinEchos());
++ EXPECT_EQ(250, context1->getReplyTimeout());
++ EXPECT_EQ(0, context1->getEchosSent());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context1->getLastEchoSentTime());
++ EXPECT_LE(test_start_time_, context1->getSendWaitStart());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context1->getNextExpiry());
++ EXPECT_LE(test_start_time_, context1->getCreatedTime());
++ EXPECT_EQ(lqp1.lease_, context1->getLease());
++ EXPECT_EQ(lqp1.query_, context1->getQuery());
++ EXPECT_EQ(PingContext::WAITING_TO_SEND, context1->getState());
++
++ // Sleep a bit to make sure there's a difference in context times.
++ usleep(5);
++
++ // Make a second lease and query pair
++ auto lqp2 = makeLeaseQueryPair(IOAddress("127.0.0.102"), 102);
++
++ // Start a ping for lease2.
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp2.lease_, lqp2.query_, parking_lot_));
++
++ // Both contexts should be present.
++ pings = store->getAll();
++ ASSERT_EQ(2, pings->size());
++
++ // Fetch the second context by address.
++ PingContextPtr context2;
++ ASSERT_NO_THROW_LOG(context2 = store->getContextByAddress(lqp2.lease_->addr_));
++ ASSERT_TRUE(context2);
++
++ // Verify the second context's state.
++ EXPECT_EQ(2, context2->getMinEchos());
++ EXPECT_EQ(250, context2->getReplyTimeout());
++ EXPECT_EQ(0, context2->getEchosSent());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context2->getLastEchoSentTime());
++ // Its send_wait_start_time_ should be more recent than context1.
++ EXPECT_LE(context1->getSendWaitStart(), context2->getSendWaitStart());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context2->getNextExpiry());
++ // Its created_time_ should be more recent than context1.
++ EXPECT_LE(context1->getCreatedTime(), context2->getCreatedTime());
++ EXPECT_EQ(lqp2.lease_, context2->getLease());
++ EXPECT_EQ(lqp2.query_, context2->getQuery());
++ EXPECT_EQ(PingContext::WAITING_TO_SEND, context2->getState());
++ }
++
++ /// @brief Exercises PingCheckMgr::nextToSend().
++ void testNextToSend() {
++ SKIP_IF(notRoot());
++
++ // Create a paused manager. 3 threads, 2 echos, 250 ms timeout.
++ // ST mode should ingore requested thread number.
++ createMgr(3, 2, 250, true);
++
++ // Calling nextToSend() should return false.
++ IOAddress next("0.0.0.0");
++ ASSERT_FALSE(mgr_->nextToSend(next));
++
++ // Now let's start 3 contexts.
++ size_t num_targets = 3;
++ IOAddress target("127.0.0.1");
++ for (auto i = 0; i < num_targets; ++i) {
++ auto lqp = makeLeaseQueryPair(IOAddress(target), i+1);
++
++ // Call startPing().
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp.lease_, lqp.query_, parking_lot_));
++ target = IOAddress::increase(target);
++
++ PingContextPtr context = getContext(lqp);
++ ASSERT_TRUE(context);
++
++ // Verify the context's initial state is correct.
++ EXPECT_EQ(0, context->getEchosSent());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getLastEchoSentTime());
++ EXPECT_LE(test_start_time_, context->getSendWaitStart());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getNextExpiry());
++ EXPECT_LE(test_start_time_, context->getCreatedTime());
++ EXPECT_EQ(PingContext::WAITING_TO_SEND, context->getState());
++
++ // Sleep a few before we add the next one to ensure ordering by
++ // time is consistent.
++ usleep(5);
++ }
++
++ // Consecutive calls to nextToSend() should return target addresses
++ // in the order they were created.
++ for (auto const& lqp : lease_query_pairs_) {
++ // Next to send should return the next address to send.
++ ASSERT_TRUE(mgr_->nextToSend(next));
++
++ // It should match the lease as created.
++ ASSERT_EQ(next, lqp.lease_->addr_);
++
++ // Fetch the corresponding context.
++ PingContextPtr context = getContext(next);
++ ASSERT_TRUE(context);
++
++ // Verify the state has properly moved to SENDING.
++ EXPECT_EQ(0, context->getEchosSent());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getLastEchoSentTime());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getNextExpiry());
++ EXPECT_EQ(PingContext::SENDING, context->getState());
++ }
++
++ // A final call to nextToSend should return false.
++ ASSERT_FALSE(mgr_->nextToSend(next));
++ }
++
++ /// @brief Exercises PingCheckMgr::setNextExpiration.
++ void testSetNextExpiration() {
++ SKIP_IF(notRoot());
++
++ // Create a paused manager. 3 threads, 2 echos, 500 ms timeout.
++ // ST mode should ingore requested thread number.
++ createMgr(3, 2, 500, true);
++
++ // Should not have an expiration time, timer should not be running.
++ ASSERT_EQ(PingContext::EMPTY_TIME(), mgr_->getNextExpiry());
++ ASSERT_EQ(mgr_->getExpirationTimerInterval(), 0);
++
++ // Now let's start 3 contexts.
++ size_t num_targets = 3;
++ IOAddress target("127.0.0.1");
++ for (auto i = 0; i < num_targets; ++i) {
++ auto lqp = makeLeaseQueryPair(IOAddress(target), i+1);
++
++ // Call startPing().
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp.lease_, lqp.query_, parking_lot_));
++ target = IOAddress::increase(target);
++ }
++
++ // Still should not have an expiration time nor running timer.
++ ASSERT_EQ(PingContext::EMPTY_TIME(), mgr_->getNextExpiry());
++ EXPECT_EQ(mgr_->getExpirationTimerInterval(), 0);
++
++ // Simulate a completed send for the second context.
++ PingContextPtr context2;
++ context2 = getContext(lease_query_pairs_[1]);
++ ASSERT_TRUE(context2);
++ context2->beginWaitingForReply(test_start_time_ - milliseconds(50));
++ updateContext(context2);
++
++ // Call setNextExpiration().
++ ASSERT_NO_THROW_LOG(mgr_->setNextExpiration());
++
++ // Refresh the context.
++ context2 = getContext(lease_query_pairs_[1]);
++
++ // Verify the mgr has the same next expiration as the context and
++ // that the expiration timer is running. Allow for some fudge in
++ // the checks.
++ auto original_mgr_expiry = mgr_->getNextExpiry();
++ EXPECT_TRUE(fuzzyEqual(original_mgr_expiry, context2->getNextExpiry()));
++
++ auto original_interval = mgr_->getExpirationTimerInterval();
++ EXPECT_TRUE(fuzzyEqual(original_interval, 450));
++
++ // Simulate a completed send for the third context.
++ PingContextPtr context3;
++ context3 = getContext(lease_query_pairs_[2]);
++ ASSERT_TRUE(context3);
++ context3->beginWaitingForReply();
++ updateContext(context3);
++
++ // Call setNextExpiration().
++ ASSERT_NO_THROW_LOG(mgr_->setNextExpiration());
++
++ // Refresh the context.
++ context3 = getContext(lease_query_pairs_[2]);
++
++ // Context3 should have a later expiration than context2.
++ EXPECT_LT(context2->getNextExpiry(), context3->getNextExpiry());
++
++ // Expiration and timer should still match the original values based on
++ // the second context.
++ EXPECT_TRUE(fuzzyEqual(mgr_->getNextExpiry(), original_mgr_expiry));
++ EXPECT_EQ(mgr_->getExpirationTimerInterval(), original_interval);
++
++ // Simulate a completed send for the first context but use a smaller
++ // timeout and back date it.
++ PingContextPtr context1;
++ context1 = getContext(lease_query_pairs_[0]);
++ ASSERT_TRUE(context1);
++ context1->setReplyTimeout(50);
++ context1->beginWaitingForReply(test_start_time_ - milliseconds(1));
++ updateContext(context1);
++
++ // Call setNextExpiration().
++ ASSERT_NO_THROW_LOG(mgr_->setNextExpiration());
++
++ // Refresh the context.
++ context1 = getContext(lease_query_pairs_[0]);
++
++ // Context1 should have a earlier expiration than context2.
++ EXPECT_LT(context1->getNextExpiry(), context2->getNextExpiry());
++ // Timer interval should be based on context1.
++ EXPECT_TRUE(fuzzyEqual(mgr_->getExpirationTimerInterval(), 50, 20))
++ << " interval: " << mgr_->getExpirationTimerInterval();
++
++ // Move all contexts to TARGET_FREE. This should leave none
++ // still waiting.
++ context1->setState(PingContext::TARGET_FREE);
++ updateContext(context1);
++ context2->setState(PingContext::TARGET_FREE);
++ updateContext(context2);
++ context3->setState(PingContext::TARGET_FREE);
++ updateContext(context3);
++
++ // Call setNextExpiration().
++ ASSERT_NO_THROW_LOG(mgr_->setNextExpiration());
++
++ // Should not have an expiration time, timer should not be running.
++ ASSERT_EQ(PingContext::EMPTY_TIME(), mgr_->getNextExpiry());
++ ASSERT_EQ(mgr_->getExpirationTimerInterval(), 0);
++ }
++
++ /// @brief Exercises PingCheckMgr::sendCompleted.
++ void testSendCompleted() {
++ SKIP_IF(notRoot());
++
++ // Create a paused manager. 3 threads, 2 echos, 500 ms timeout.
++ // ST mode should ingore requested thread number.
++ createMgr(3, 2, 500, true);
++
++ // Start a ping for an address so we have a context.
++ IOAddress target("127.0.0.2");
++ auto lqp = makeLeaseQueryPair(IOAddress(target), 102);
++
++ // Call startPing().
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp.lease_, lqp.query_, parking_lot_));
++
++ // Simulate a completed send for the context.
++ PingContextPtr context;
++ context = getContext(lqp);
++ ASSERT_TRUE(context);
++
++ // Make an ECHO REQUEST packet based on context.
++ ICMPMsgPtr echo_request = makeEchoRequest(context->getLease()->addr_);
++
++ // Invoke sendCompleted() with fabricated request. Should succeed.
++ ASSERT_NO_THROW_LOG(mgr_->sendCompleted(echo_request, false));
++
++ // Refresh the context.
++ context = getContext(context->getLease()->addr_);
++
++ EXPECT_EQ(PingContext::WAITING_FOR_REPLY, context->getState());
++ EXPECT_EQ(1, context->getEchosSent());
++ EXPECT_GE(context->getLastEchoSentTime(), test_start_time_);
++
++ // Verify the mgr has the same next expiration as the context and
++ // that the expiration timer is running. Allow for some fudge in
++ // the checks.
++ EXPECT_GT(context->getNextExpiry(), test_start_time_);
++ EXPECT_TRUE(fuzzyEqual(mgr_->getNextExpiry(), context->getNextExpiry()));
++ EXPECT_TRUE(fuzzyEqual(mgr_->getExpirationTimerInterval(), 500));
++
++ // Make an ECHO REQUEST packet for an address that has no context.
++ echo_request = makeEchoRequest(IOAddress("192.168.0.1"));
++
++ // Invoking sendCompleted() with request for a non-existent address be harmless.
++ ASSERT_NO_THROW_LOG(mgr_->sendCompleted(echo_request, false));
++
++ // Invoking sendCompleted() with an invalid message type should be harmless.
++ echo_request->setType(ICMPMsg::ECHO_REPLY);
++ ASSERT_NO_THROW_LOG(mgr_->sendCompleted(ICMPMsgPtr(), false));
++
++ // Invoking sendCompleted() with an empty message should be harmless.
++ echo_request.reset();
++ ASSERT_NO_THROW_LOG(mgr_->sendCompleted(ICMPMsgPtr(), false));
++
++ // Verify expiration values should not have not been altered.
++ EXPECT_TRUE(fuzzyEqual(mgr_->getNextExpiry(), context->getNextExpiry()));
++ EXPECT_TRUE(fuzzyEqual(mgr_->getExpirationTimerInterval(), 500));
++ }
++
++ /// @brief Exercises PingCheckMgr::replyReceived() for ECHO REPLYs. Note this
++ /// also exercises handleEchoReply().
++ void testReplyReceivedForEchoReply() {
++ SKIP_IF(notRoot());
++
++ // Create a paused manager. 3 threads, 2 echos, 500 ms timeout.
++ // ST mode should ingore requested thread number.
++ createMgr(3, 2, 500, true);
++
++ // Install a post reply received callback to stop the test if we're done.
++ mgr_->post_reply_received_cb_ =
++ [this](const ICMPMsgPtr& /* reply */) {
++ MultiThreadingLock lock(*mutex_);
++ if (mgr_->parkingLotSize() == 0) {
++ stopTestService();
++ return;
++ }
++ };
++
++ // Turn off loopback routing.
++ mgr_->getChannel()->route_loopback_ = false;
++
++ // Start a ping for an address so we have a context.
++ startTargets(1);
++ auto lqp = lease_query_pairs_[0];
++
++ // Simulate a completed send for the context.
++ PingContextPtr context;
++ context = getContext(lqp);
++ ASSERT_TRUE(context);
++
++ // Make an ECHO REQUEST packet based on context and invoke sendCompleted().
++ ICMPMsgPtr echo_request = makeEchoRequest(context->getLease()->addr_);
++ ASSERT_NO_THROW_LOG(mgr_->sendCompleted(echo_request, false));
++
++ // Should still have one parked query.
++ EXPECT_EQ(1, mgr_->parkingLotSize());
++
++ // Verify the expiration timer is running.
++ EXPECT_TRUE(fuzzyEqual(mgr_->getExpirationTimerInterval(), 500));
++
++ // Make an ECHO REPLY packet based on context and invoke replyReceived().
++ ICMPMsgPtr echo_reply = makeEchoReply(context->getLease()->addr_);
++ ASSERT_NO_THROW_LOG(mgr_->replyReceived(echo_reply));
++
++ // Verify the expiration timer is no longer running.
++ EXPECT_EQ(mgr_->getExpirationTimerInterval(), 0);
++
++ // The context should no longer be in the store.
++ EXPECT_FALSE(getContext(lqp));
++
++ // We should have dropped the query from the lot rather than unparking it.
++ EXPECT_EQ(mgr_->parkingLotSize(), 0);
++ EXPECT_EQ(unparked_, 1);
++
++ // We should have one decline that matches our lease query pair.
++ compareLeaseQueryPairs(declines_);
++
++ // Make an ECHO REPLY packet for an address that has no context.
++ echo_reply = makeEchoReply(IOAddress("192.168.0.1"));
++
++ // Invoke replyReceived() for a reply with no matching context,
++ // it should not throw.
++ ASSERT_NO_THROW_LOG(mgr_->PingCheckMgr::replyReceived(echo_reply));
++
++ // Invoke replyReceived() an empty message, it should not throw.
++ // (Bypass test implementation for this check).
++ echo_reply.reset();
++ ASSERT_NO_THROW_LOG(mgr_->PingCheckMgr::replyReceived(echo_reply));
++ }
++
++ /// @brief Exercises PingCheckMgr::replyReceived() for UNREACHABLEs. Note this
++ /// also exercises handleTargetUnreachable().
++ void testReplyReceivedForTargetUnreachable() {
++ SKIP_IF(notRoot());
++
++ // Create a paused manager. 3 threads, 2 echos, 500 ms timeout.
++ // ST mode should ingore requested thread number.
++ createMgr(3, 2, 500, true);
++
++ // Install a post reply received callback to stop the test if we're done.
++ mgr_->post_reply_received_cb_ =
++ [this](const ICMPMsgPtr& /* reply */) {
++ MultiThreadingLock lock(*mutex_);
++ if (mgr_->parkingLotSize() == 0) {
++ stopTestService();
++ return;
++ }
++ };
++
++ // Turn off loopback routing.
++ mgr_->getChannel()->route_loopback_ = false;
++
++ // Start a ping for an address so we have a context.
++ startTargets(1);
++ auto lqp = lease_query_pairs_[0];
++
++ // Simulate a completed send for the context.
++ PingContextPtr context;
++ context = getContext(lqp);
++ ASSERT_TRUE(context);
++
++ // Make an ECHO REQUEST packet based on context and invoke sendCompleted().
++ ICMPMsgPtr echo_request = makeEchoRequest(context->getLease()->addr_);
++ ASSERT_NO_THROW_LOG(mgr_->sendCompleted(echo_request, false));
++
++ // Should still have one parked query.
++ EXPECT_EQ(1, mgr_->parkingLotSize());
++
++ // Verify the expiration timer is running.
++ EXPECT_TRUE(fuzzyEqual(mgr_->getExpirationTimerInterval(), 500));
++
++ // Make an ECHO REPLY packet based on context and invoke replyReceived().
++ ICMPMsgPtr unreachable = makeUnreachable(context->getLease()->addr_);
++ ASSERT_NO_THROW_LOG(mgr_->replyReceived(unreachable));
++
++ // Verify the expiration timer is no longer running.
++ EXPECT_EQ(mgr_->getExpirationTimerInterval(), 0);
++
++ // The context should no longer be in the store.
++ EXPECT_FALSE(getContext(lqp));
++
++ // We should have unparked the query from the lot.
++ EXPECT_EQ(mgr_->parkingLotSize(), 0);
++ EXPECT_EQ(unparked_, 1);
++
++ // We should have one free that matches our lease query pair.
++ compareLeaseQueryPairs(frees_);
++
++ // Invoke replyReceived() for an unreachable with no matching context,
++ // it should not throw.
++ unreachable = makeUnreachable(IOAddress("192.168.0.1"));
++ ASSERT_NO_THROW_LOG(mgr_->replyReceived(unreachable));
++ }
++
++ /// @brief Verifies expiration processing by invoking expirationTimedout().
++ /// This also exercises processExpiredSince(), doNextEcho(), finishFree(),
++ /// and setNextExpiration().
++ void testExpirationProcessing() {
++ SKIP_IF(notRoot());
++
++ // Create a paused manager. 3 threads, 1 echos, 250 ms timeout.
++ // ST mode should ingore requested thread number.
++ createMgr(3, 1, 250, true);
++
++ // Start four ping checks, then stage them so:
++ //
++ // First context is WAITING_TO_SEND, no expiry.
++ // Second context is WAITING_FOR_REPLY, has expired and has
++ // exhausted min_echos_.
++ // Third context is WAITING_FOR_REPLY, has expired but has
++ // not exhausted min_echos_.
++ // Fourth context is WAITING_FOR_REPLY but has not yet expired.
++ //
++ size_t num_targets = 4;
++
++ // Start the desired number of targets with an unpark callback
++ // that increments the unparked count.
++ startTargets(num_targets);
++
++ // Now establish the desired state for each context.
++ // First context is in WAITING_TO_SEND, no expiry.
++ PingContextPtr context1 = getContext(lease_query_pairs_[0]);
++ ASSERT_TRUE(context1);
++ EXPECT_EQ(context1->getState(), PingContext::WAITING_TO_SEND);
++
++ // Second context setup: expired and has exhausted min_echos_
++ PingContextPtr context2 = getContext(lease_query_pairs_[1]);
++ ASSERT_TRUE(context2);
++ context2->beginWaitingForReply(test_start_time_ - milliseconds(500));
++ updateContext(context2);
++
++ // Third context setup: expired but has not exhausted min_echos_
++ PingContextPtr context3 = getContext(lease_query_pairs_[2]);
++ ASSERT_TRUE(context3);
++ context3->setMinEchos(2);
++ context3->beginWaitingForReply(test_start_time_ - milliseconds(500));
++ updateContext(context3);
++
++ // Fourth context setup: has not yet expired
++ PingContextPtr context4 = getContext(lease_query_pairs_[3]);
++ ASSERT_TRUE(context4);
++ context4->beginWaitingForReply(test_start_time_);
++ updateContext(context4);
++
++ // Now invoke expirationTimedout().
++ ASSERT_NO_THROW_LOG(mgr_->expirationTimedOut());
++
++ // Verify the contexts are in the expected states.
++ // Context1 should still be WAITING_TO_SEND.
++ context1 = getContext(lease_query_pairs_[0]);
++ ASSERT_TRUE(context1);
++ EXPECT_EQ(context1->getState(), PingContext::WAITING_TO_SEND);
++
++ // Context2 should be gone by unparking and its address freed.
++ IOAddress address = lease_query_pairs_[1].lease_->addr_;
++ context2 = getContext(address);
++ ASSERT_FALSE(context2);
++ EXPECT_EQ(unparked_, 1);
++ ASSERT_EQ(frees_.size(), 1);
++ EXPECT_EQ(frees_[0].lease_->addr_, address);
++
++ // Context3 should be in WAITING_TO_SEND.
++ context3 = getContext(lease_query_pairs_[2]);
++ ASSERT_TRUE(context3);
++ EXPECT_EQ(context3->getState(), PingContext::WAITING_TO_SEND);
++
++ // Context4 should still be WAITING_FOR_REPLY.
++ context4 = getContext(lease_query_pairs_[3]);
++ ASSERT_TRUE(context4);
++ EXPECT_EQ(context4->getState(), PingContext::WAITING_FOR_REPLY);
++
++ // Manager's next_expiry_ should be based on context4?
++ EXPECT_TRUE(fuzzyEqual(mgr_->getNextExpiry(), context4->getNextExpiry()));
++ }
++
++ /// @brief Generates a number of ping checks to local loop back addresses.
++ ///
++ /// Pings should all result in ECHO_REPLYs that get "declined". Declined
++ /// addresses are added to a list. Test completion is gated by the parking
++ /// lot becoming empty or test times out.
++ void testMultiplePingsWithReply() {
++ SKIP_IF(notRoot());
++
++ // Create manager with thread-pool size of 3, min_echos 1,
++ // reply_timeout 1000 milliseconds. Larger time out for this test
++ // avoids sporadic expirations which leads to unaccounted for UNPARKs.
++ // ST mode should ingore requested thread number.
++ ASSERT_NO_THROW_LOG(createMgr(3, 1, 1000));
++ ASSERT_TRUE(mgr_);
++
++ // Install a post reply received callback to stop the test if we're done.
++ int num_targets = 25;
++ mgr_->post_reply_received_cb_ =
++ [this, num_targets](const ICMPMsgPtr& /* reply */) {
++ MultiThreadingLock lock(*mutex_);
++ if (unparked_ == num_targets) {
++ stopTestService();
++ return;
++ }
++ };
++
++ // Start the manager. This will open the channel.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++ ASSERT_TRUE(mgr_->isRunning());
++
++ // Start the ping checks.
++ startTargets(num_targets);
++
++ // Run the main thread's IOService until we complete or timeout.
++ ASSERT_NO_THROW_LOG(runIOService());
++
++ // Stop the thread pool.
++ ASSERT_NO_THROW_LOG(mgr_->stop());
++ ASSERT_TRUE(mgr_->isStopped());
++
++ // Calling nextToSend() should return false.
++ IOAddress next("0.0.0.0");
++ ASSERT_FALSE(mgr_->nextToSend(next));
++
++ // We should have as many declines as we have pairs created.
++ compareLeaseQueryPairs(declines_);
++ }
++
++ /// @brief Generates a large number of ping checks to local loop back addresses.
++ ///
++ /// A pause is induced approximately halfway through the number of replies
++ /// at which point the manager is paused and then resumed. This is intended
++ /// to demonstrate the ability to pause and resume the manager gracefully.
++ /// The pings should all result in ECHO_REPLYs that get "declined". Declined
++ /// addresses are added to a list. Test completion is gated by the parking
++ /// lot becoming empty or test times out.
++ void testMultiplePingsWithReplyAndPause() {
++ SKIP_IF(notRoot());
++
++ // Create manager with thread-pool size of 3, min_echos 1,
++ // reply_timeout 1000 milliseconds. Larger time out for this test
++ // avoids sporadic expirations which leads to unaccounted for UNPARKs.
++ // ST mode should ingore requested thread number.
++ ASSERT_NO_THROW_LOG(createMgr(3, 1, 1000));
++ ASSERT_TRUE(mgr_);
++
++ // Generate ping checks to the desired number of targets.
++ // Set up the pause callback to pause at half the number of
++ // expected replies.
++ size_t num_targets = 24;
++ size_t reply_cnt = 0;
++ size_t pause_at = num_targets / 2;
++ bool test_paused = false;
++
++ // Install post reply callback to stop the test thread when we reach
++ // the pause count.
++ mgr_->post_reply_received_cb_ =
++ [this, &reply_cnt, &test_paused, &pause_at](const ICMPMsgPtr& reply) {
++ MultiThreadingLock lock(*mutex_);
++ if (reply->getType() == ICMPMsg::ECHO_REPLY) {
++ ++reply_cnt;
++ if (pause_at && (reply_cnt >= pause_at)) {
++ test_paused = true;
++ stopTestService();
++ pause_at = 0;
++ }
++ }
++ };
++
++ // Start the manager. This will open the channel.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++ ASSERT_TRUE(mgr_->isRunning());
++ ASSERT_NO_THROW_LOG(mgr_->pause());
++
++ // Start 1/2 desired number of ping checks.
++ startTargets(num_targets / 2);
++
++ // Run the main thread's IOService until we pause or timeout.
++ ASSERT_NO_THROW_LOG(mgr_->resume());
++ ASSERT_TRUE(mgr_->isRunning());
++ ASSERT_NO_THROW_LOG(runIOService());
++
++ // Manager should still be running. Pause it.
++ ASSERT_TRUE(mgr_->isRunning());
++ if (mgr_->getThreadPool()) {
++ ASSERT_NO_THROW_LOG(mgr_->pause());
++ ASSERT_TRUE(mgr_->isPaused());
++ }
++
++ // Verify that the pause callback is why we stopped, that we
++ // received at least as many as we should have before pause
++ // and that we have more work to do. The test is a range as
++ // pausing does not happen exactly at the same point from test
++ // run to test run.
++ ASSERT_TRUE(test_paused);
++ ASSERT_TRUE((reply_cnt >= pause_at) && (reply_cnt < num_targets))
++ << "reply_cnt " << reply_cnt
++ << ", pause_at " << pause_at
++ << ", num_targets " << num_targets;
++
++ mgr_->post_reply_received_cb_ =
++ [this, num_targets](const ICMPMsgPtr& /* reply */) {
++ MultiThreadingLock lock(*mutex_);
++ if (unparked_ == num_targets) {
++ stopTestService();
++ return;
++ }
++ };
++
++ // Start second batch of targets.
++ startTargets(num_targets / 2, IOAddress("127.0.0.15"));
++
++ ASSERT_NO_THROW_LOG(mgr_->resume());
++ ASSERT_TRUE(mgr_->isRunning());
++
++ // Restart the main thread's IOService until we complete or timeout.
++ ASSERT_NO_THROW_LOG(runIOService());
++
++ ASSERT_NO_THROW_LOG(mgr_->stop());
++ ASSERT_TRUE(mgr_->isStopped());
++
++ // Calling nextToSend() should return false.
++ IOAddress next("0.0.0.0");
++ ASSERT_FALSE(mgr_->nextToSend(next));
++
++ // We should have as many declines as we have pairs created.
++ compareLeaseQueryPairs(declines_);
++ }
++
++ /// @brief Verifies that a recoverable error completion in sendCompleted() results
++ /// in the target address being free to use. In other words, it should have
++ /// the same outcome as the receiving a TARGET_UNREACHABLE reply from the OS.
++ void testSendCompletedSendFailed() {
++ SKIP_IF(notRoot());
++
++ // Create manager with thread-pool size of 3, min_echos 1,
++ // reply_timeout 250 milliseconds.
++ // ST mode should ingore requested thread number.
++ ASSERT_NO_THROW_LOG(createMgr(3, 1, 250));
++ ASSERT_TRUE(mgr_);
++
++ // Install a post send completed callback to stop the test if we're done.
++ mgr_->post_send_completed_cb_ =
++ [this](const ICMPMsgPtr& /* echo */, bool send_failed) {
++ MultiThreadingLock lock(*mutex_);
++ if (send_failed) {
++ stopTestService();
++ }
++ };
++
++ // Start the manager.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++
++ // Set the test channel to complete the first send with a network_unreachable
++ // error. This saves us from trying to determine an address in the test
++ // environment that would cause it.
++ mgr_->getChannel()->ec_on_write_number_ = 1;
++ mgr_->getChannel()->write_error_ec_ = make_error_code(network_unreachable);
++
++ // Start a ping for one target.
++ startTargets(1);
++ auto lqp = lease_query_pairs_[0];
++
++ // Run the main thread's IOService until we complete or timeout.
++ ASSERT_NO_THROW_LOG(runIOService());
++
++ // Verify the expiration timer is no longer running.
++ EXPECT_EQ(mgr_->getExpirationTimerInterval(), 0);
++
++ // The context should no longer be in the store.
++ EXPECT_FALSE(getContext(lqp));
++
++ // We should have unparked the query from the lot.
++ EXPECT_EQ(mgr_->parkingLotSize(), 0);
++ EXPECT_EQ(unparked_, 1);
++
++ // We should have one free that matches our lease query pair.
++ compareLeaseQueryPairs(frees_);
++ }
++
++ /// @brief Exercises shouldPing().
++ void testShouldPingTest() {
++ SKIP_IF(notRoot());
++
++ // Create manager with thread-pool size of 3, min_echos 1,
++ // reply_timeout 250 milliseconds.
++ // ST mode should ingore requested thread number.
++ ASSERT_NO_THROW_LOG(createMgr(3, 1, 250));
++ ASSERT_TRUE(mgr_);
++
++ // Make a default config.
++ PingCheckConfigPtr config(new PingCheckConfig());
++
++ // Make a lease query pair.
++ auto lqp1 = makeLeaseQueryPair(IOAddress("127.0.0.2"), 111);
++ const uint8_t id1[] = { 0x31, 0x32, 0x33, 0x34 };
++ ClientIdPtr cid1(new ClientId(id1, sizeof(id1)));
++ lqp1.lease_->client_id_ = cid1;
++
++ Lease4Ptr empty_lease;
++ CalloutHandle::CalloutNextStep status;
++
++ // Ping checking enabled, no old lease, channel doesn't exist, should return CONTINUE.
++ ASSERT_TRUE(config->getEnablePingCheck());
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, empty_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_CONTINUE);
++
++ // Start the manager, then pause it. This lets us start pings without
++ // them changing state.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++ ASSERT_NO_THROW_LOG(mgr_->pause());
++
++ // Ping checking disabled, no old lease, should return CONTINUE.
++ config->setEnablePingCheck(false);
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, empty_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_CONTINUE);
++
++ // Ping checking enabled, no old lease, should return PARK.
++ config->setEnablePingCheck(true);
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, empty_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_PARK);
++
++ // Make an old lease based on the first lease.
++ time_t now = time(0);
++ Lease4Ptr old_lease(new Lease4(*(lqp1.lease_)));
++
++ // Prior lease belonging to the same client with cltt greater than ping-cltt-secs
++ // should return PARK.
++ old_lease->cltt_ = now - config->getPingClttSecs() * 2;
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, old_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_PARK);
++
++ // Prior lease belonging to the same client but with cltt less than ping-cltt-secs
++ // should return CONTINUE.
++ old_lease->cltt_ = now - config->getPingClttSecs() / 2;
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, old_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_CONTINUE);
++
++ // Prior lease belonging to a different client, should return PARK.
++ const uint8_t id2[] = { 0x35, 0x36, 0x37, 0x34 };
++ old_lease->client_id_.reset(new ClientId(id2, sizeof(id2)));
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, old_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_PARK);
++
++ // Now let's start a ping for the lease-query pair.
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp1.lease_, lqp1.query_, parking_lot_));
++
++ // Make a second lease query pair. Same address, different client.
++ auto lqp2 = makeLeaseQueryPair(IOAddress("127.0.0.2"), 333);
++ lqp2.lease_->client_id_ = old_lease->client_id_;
++
++ // Trying to start a ping for an address already being checked should return DROP.
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp2.lease_, lqp2.query_, empty_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_DROP);
++
++ // Stop the mgr.
++ ASSERT_NO_THROW(mgr_->stop());
++
++ // Ping checking enabled, no old lease, channel isn't open, should return CONTINUE.
++ ASSERT_TRUE(config->getEnablePingCheck());
++ ASSERT_NO_THROW_LOG(status = mgr_->shouldPing(lqp1.lease_, lqp1.query_, empty_lease, config));
++ EXPECT_EQ(status, CalloutHandle::NEXT_STEP_CONTINUE);
++ }
++
++ /// @brief Exercise's getScopedConfig().
++ void testGetScopedConfig() {
++ CfgMgr::instance().setFamily(AF_INET);
++
++ // Start with empty cache, any subnet that hasn't been seen should get parsed
++ // and, if valid, added to the cache.
++ CfgMgr& cfg_mgr = CfgMgr::instance();
++ CfgSubnets4Ptr subnets = cfg_mgr.getStagingCfg()->getCfgSubnets4();
++
++ // Subnet 1 has no ping-check config. Should return global config.
++ ElementPtr user_context = Element::createMap();
++ Subnet4Ptr subnet(new Subnet4(IOAddress("192.0.1.0"), 24, 30, 40, 60, 1));
++ subnet->setContext(user_context);
++ subnets->add(subnet);
++
++ // Subnet 2 has invalid ping-check content. Should return global config.
++ std::string invalid_json_cfg =
++ R"({
++ "ping-check": {
++ "enable-ping-check" : true,
++ "bogus-key-word" : true
++ }
++ })";
++
++ user_context = Element::fromJSON(invalid_json_cfg);
++ subnet.reset(new Subnet4(IOAddress("192.0.2.0"), 24, 30, 40, 60, 2));
++ subnet->setContext(user_context);
++ subnets->add(subnet);
++
++ // Subnet 3 has valid ping check. Should return subnet config
++ std::string valid_json_cfg =
++ R"({
++ "ping-check": {
++ "enable-ping-check" : true,
++ "min-ping-requests" : 13
++ }
++ })";
++
++ user_context = Element::fromJSON(valid_json_cfg);
++ subnet.reset(new Subnet4(IOAddress("192.0.3.0"), 24, 30, 40, 60, 3));
++ subnet->setContext(user_context);
++ subnets->add(subnet);
++
++ // Commit the subnet configuration.
++ cfg_mgr.commit();
++
++ // Create manager with thread-pool size of 3, min_echos 2, reply_timeout 250 ms.
++ ASSERT_NO_THROW_LOG(createMgr(3, 2, 250));
++ ASSERT_TRUE(mgr_);
++
++ Lease4Ptr lease(new Lease4());
++ PingCheckConfigPtr config;
++
++ // Should get the global configuration for subnet 1.
++ lease->addr_ = IOAddress("192.0.1.1");
++ lease->subnet_id_ = 1;
++ ASSERT_NO_THROW_LOG(config = mgr_->getScopedConfig(lease));
++ ASSERT_TRUE(config);
++ ASSERT_EQ(config, mgr_->getGlobalConfig());
++
++ // Should get the global configuration for subnet 2.
++ lease->addr_ = IOAddress("192.0.2.1");
++ lease->subnet_id_ = 2;
++ ASSERT_NO_THROW_LOG(config = mgr_->getScopedConfig(lease));
++ ASSERT_TRUE(config);
++ ASSERT_EQ(config, mgr_->getGlobalConfig());
++
++ // Should get subnet configuration for subnet 3.
++ lease->addr_ = IOAddress("192.0.3.1");
++ lease->subnet_id_ = 3;
++ ASSERT_NO_THROW_LOG(config = mgr_->getScopedConfig(lease));
++ ASSERT_TRUE(config);
++ ASSERT_NE(config, mgr_->getGlobalConfig());
++ EXPECT_EQ(config->getMinPingRequests(), 13);
++ }
++
++ /// @brief Exercises checkSuspended().
++ ///
++ /// This is intended to verify that ping checking is suspended and resumed based
++ /// on the DHCP service state, not to verify every place that checkSuspended()
++ /// is called.
++ void testCheckSuspended() {
++ SKIP_IF(notRoot());
++
++ // Create manager with thread-pool size of 3, min_echos 1,
++ // reply_timeout 250 milliseconds.
++ ASSERT_NO_THROW_LOG(createMgr(3, 1, 250));
++ ASSERT_TRUE(mgr_);
++
++ // Make a default config.
++ PingCheckConfigPtr config(new PingCheckConfig());
++
++ // Give the manager a NetworkState instance.
++ NetworkStatePtr network_state(new NetworkState());
++ mgr_->setNetworkState(network_state);
++
++ // Verify that ping checking is not suspended.
++ ASSERT_FALSE(mgr_->checkSuspended());
++
++ // Start the manager, then pause it. This lets us start pings without
++ // them changing state.
++ ASSERT_NO_THROW_LOG(mgr_->start());
++ ASSERT_NO_THROW_LOG(mgr_->pause());
++
++ // Verfify the ping store is empty.
++ auto store = mgr_->getStore();
++ ASSERT_TRUE(store);
++ auto pings = store->getAll();
++ ASSERT_EQ(0, pings->size());
++
++ // Make a lease query pair.
++ auto lqp1 = makeLeaseQueryPair(IOAddress("127.0.0.2"), 111);
++ const uint8_t id1[] = { 0x31, 0x32, 0x33, 0x34 };
++ ClientIdPtr cid1(new ClientId(id1, sizeof(id1)));
++ lqp1.lease_->client_id_ = cid1;
++
++ // Now let's try to start a ping for the lease-query pair. It should work.
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp1.lease_, lqp1.query_, parking_lot_));
++
++ // Verify we have an entry in the store.
++ pings = store->getAll();
++ ASSERT_EQ(1, pings->size());
++
++ // Disable the DHCP service.
++ network_state->disableService(NetworkState::USER_COMMAND);
++
++ // Make a second lease query pair. Different address, different client.
++ auto lqp2 = makeLeaseQueryPair(IOAddress("127.0.0.3"), 333);
++ const uint8_t id2[] = { 0x31, 0x32, 0x33, 0x35 };
++ ClientIdPtr cid2(new ClientId(id1, sizeof(id2)));
++ lqp2.lease_->client_id_ = cid2;
++
++ // Try to start a ping. We should not be able to do it.
++ ASSERT_THROW_MSG(mgr_->startPing(lqp2.lease_, lqp2.query_, parking_lot_),
++ InvalidOperation,
++ "PingCheckMgr::startPing() - DHCP service is suspended!");
++
++ // Store should be empty, having been flushed by suspension detection.
++ pings = store->getAll();
++ ASSERT_EQ(0, pings->size());
++
++ // Ping checking should report as suspended.
++ ASSERT_TRUE(mgr_->checkSuspended());
++
++ // Re-enable the DHCP service.
++ network_state->enableService(NetworkState::USER_COMMAND);
++
++ // Suspension checking should lift the suspension and we should once again
++ // be able to start a new ping check.
++ ASSERT_NO_THROW_LOG(mgr_->startPing(lqp2.lease_, lqp2.query_, parking_lot_));
++
++ // Store should have one check in it.
++ pings = store->getAll();
++ ASSERT_EQ(1, pings->size());
++
++ // Ping checking should report as not suspended.
++ ASSERT_FALSE(mgr_->checkSuspended());
++ }
++
++ /// @brief Manager instance.
++ TestablePingCheckMgrPtr mgr_;
++
++ /// @brief List of lease/query pairs used during the test, in the order
++ /// they were created.
++ LeaseQueryPairs lease_query_pairs_;
++
++ /// @brief The mutex used to protect internal state.
++ const boost::scoped_ptr<std::mutex> mutex_;
++
++ /// @brief Marks the start time of a test.
++ TimeStamp test_start_time_;
++
++ /// @brief Parking lot where the associated query is parked.
++ /// If empty parking is not being employed.
++ ParkingLotHandlePtr parking_lot_;
++
++ /// @brief Number of queries unparked during a test.
++ size_t unparked_;
++
++ /// @brief List of leases that were found to be in-use during a test.
++ LeaseQueryPairs declines_;
++
++ /// @brief List of leases that were found to be free to use during a test.
++ LeaseQueryPairs frees_;
++};
++
++TEST_F(PingCheckMgrTest, operationalBasicsST) {
++ testOperationalBasics(0);
++}
++
++TEST_F(PingCheckMgrTest, operationalBasicsMT) {
++ MultiThreadingTest mt;
++ testOperationalBasics(3);
++}
++
++TEST_F(PingCheckMgrTest, startPingST) {
++ testStartPing();
++}
++
++TEST_F(PingCheckMgrTest, startPingMT) {
++ MultiThreadingTest mt;
++ testStartPing();
++}
++
++TEST_F(PingCheckMgrTest, nextToSendST) {
++ testNextToSend();
++}
++
++TEST_F(PingCheckMgrTest, nextToSendMT) {
++ MultiThreadingTest mt;
++ testNextToSend();
++}
++
++TEST_F(PingCheckMgrTest, setNextExpirationST) {
++ testSetNextExpiration();
++}
++
++TEST_F(PingCheckMgrTest, setNextExpirationMT) {
++ MultiThreadingTest mt;
++ testSetNextExpiration();
++}
++
++TEST_F(PingCheckMgrTest, sendCompletedST) {
++ testSendCompleted();
++}
++
++TEST_F(PingCheckMgrTest, sendCompletedMT) {
++ MultiThreadingTest mt;
++ testSendCompleted();
++}
++
++TEST_F(PingCheckMgrTest, replyReceivedForEchoReplyST) {
++ testReplyReceivedForEchoReply();
++}
++
++TEST_F(PingCheckMgrTest, replyReceivedForEchoReplyMT) {
++ MultiThreadingTest mt;
++ testReplyReceivedForEchoReply();
++}
++
++TEST_F(PingCheckMgrTest, replyReceivedForTargetUnreachableST) {
++ testReplyReceivedForTargetUnreachable();
++}
++
++TEST_F(PingCheckMgrTest, replyReceivedForTargetUnreachableMT) {
++ MultiThreadingTest mt;
++ testReplyReceivedForTargetUnreachable();
++}
++
++TEST_F(PingCheckMgrTest, expirationProcessingST) {
++ testExpirationProcessing();
++}
++
++TEST_F(PingCheckMgrTest, expirationProcessingMT) {
++ MultiThreadingTest mt;
++ testExpirationProcessing();
++}
++
++TEST_F(PingCheckMgrTest, multiplePingsWithReplyST) {
++ testMultiplePingsWithReply();
++}
++
++TEST_F(PingCheckMgrTest, multiplePingsWithReplyMT) {
++ MultiThreadingTest mt;
++ testMultiplePingsWithReply();
++}
++
++TEST_F(PingCheckMgrTest, multiplePingsWithReplyAndPauseST) {
++ testMultiplePingsWithReplyAndPause();
++}
++
++TEST_F(PingCheckMgrTest, multiplePingsWithReplyAndPauseMT) {
++ MultiThreadingTest mt;
++ testMultiplePingsWithReplyAndPause();
++}
++
++TEST_F(PingCheckMgrTest, sendCompletedSendFailedST) {
++ testSendCompletedSendFailed();
++}
++
++TEST_F(PingCheckMgrTest, sendCompletedSendFailedMT) {
++ MultiThreadingTest mt;
++ testSendCompletedSendFailed();
++}
++
++TEST_F(PingCheckMgrTest, shouldPingST) {
++ testShouldPingTest();
++}
++
++TEST_F(PingCheckMgrTest, shouldPingMT) {
++ MultiThreadingTest mt;
++ testShouldPingTest();
++}
++
++TEST_F(PingCheckMgrTest, getScopedConfigST) {
++ testGetScopedConfig();
++}
++
++TEST_F(PingCheckMgrTest, getScopedConfigMT) {
++ MultiThreadingTest mt;
++ testGetScopedConfig();
++}
++
++TEST_F(PingCheckMgrTest, checkSuspendedST) {
++ testCheckSuspended();
++}
++
++TEST_F(PingCheckMgrTest, checkSuspendedMT) {
++ MultiThreadingTest mt;
++ testCheckSuspended();
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/ping_context_store_unittests.cc b/src/hooks/dhcp/ping_check/tests/ping_context_store_unittests.cc
+new file mode 100644
+index 0000000000..3a8854eb0e
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/ping_context_store_unittests.cc
+@@ -0,0 +1,467 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the PingContextStore class.
++
++#include <config.h>
++#include <ping_context_store.h>
++#include <asiolink/io_address.h>
++#include <testutils/gtest_utils.h>
++#include <testutils/multi_threading_utils.h>
++
++#include <gtest/gtest.h>
++#include <sstream>
++
++using namespace std;
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::ping_check;
++using namespace isc::test;
++using namespace std::chrono;
++
++namespace {
++
++/// @brief Text fixture class for @c PingContextStore
++///
++/// In order to facilitate single and multi threaded testing,
++/// individual tests are implemented as methods that are called
++/// from within TEST_F bodies rather than in TEST_F bodies.
++class PingContextStoreTest : public ::testing::Test {
++public:
++
++ /// @brief Constructor
++ PingContextStoreTest() {
++ }
++
++ /// @brief Destructor
++ virtual ~PingContextStoreTest() = default;
++
++ /// @brief Verifies that contexts can be added to the store given valid leases and queries.
++ /// Also verifies that they can be fetched by address.
++ void addContextTest() {
++ PingContextStore store;
++ PingContextPtr context;
++
++ // Add three contexts, one for each lease/query.
++ auto now = PingContext::now();
++ for (int i = 0; i < leases_.size(); ++i) {
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[i], queries_[i], 2, 300));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++
++ // Check initial values.
++ EXPECT_EQ(PingContext::WAITING_TO_SEND, context->getState());
++ EXPECT_LE(now, context->getSendWaitStart());
++ EXPECT_EQ(2, context->getMinEchos());
++ EXPECT_EQ(300, context->getReplyTimeout());
++ }
++
++ // Make sure they can be fetched by address and by query individually.
++ for (int i = 0; i < leases_.size(); ++i) {
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[i]->addr_));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++
++ ASSERT_NO_THROW_LOG(context = store.getContextByQuery(queries_[i]));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(queries_[i], context->getQuery());
++ }
++ }
++
++ /// @brief Verifies that the store only allows once entry per IP address.
++ void addContextDuplicateTest() {
++ PingContextStore store;
++ PingContextPtr context;
++
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[0], queries_[0], 1, 100));
++ ASSERT_TRUE(context);
++ ASSERT_THROW_MSG(store.addContext(leases_[0], queries_[0], 1, 100), DuplicateContext,
++ "PingContextStore::addContex: context already exists for: 192.0.2.1");
++ }
++
++ /// @brief Verify that addContext fails given invalid input.
++ void addContextInvalidTest() {
++ PingContextStore store;
++
++ // Verify that given an empty lease the add will fail.
++ Lease4Ptr empty_lease;
++ ASSERT_THROW_MSG(store.addContext(empty_lease, queries_[0], 1, 100), BadValue,
++ "PingContextStore::addContext failed:"
++ " PingContext ctor - lease cannot be empty");
++
++ // Verify that given an empty query the add will fail.
++ Pkt4Ptr empty_query;
++ ASSERT_THROW_MSG(store.addContext(leases_[0], empty_query, 1, 100), BadValue,
++ "PingContextStore::addContext failed:"
++ " PingContext ctor - query cannot be empty");
++ }
++
++ /// @brief Verify that contexts can be deleted from the store.
++ void deleteContextTest() {
++ PingContextStore store;
++
++ // Add contexts to store.
++ for (int i = 0; i < leases_.size(); ++i) {
++ PingContextPtr context;
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[i], queries_[i], 1, 100));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++ }
++
++ // Fetch the second context.
++ PingContextPtr orig_context;
++ ASSERT_NO_THROW_LOG(orig_context = store.getContextByAddress(leases_[1]->addr_));
++ ASSERT_TRUE(orig_context);
++ EXPECT_EQ(leases_[1], orig_context->getLease());
++
++ // Delete it.
++ ASSERT_NO_THROW_LOG(store.deleteContext(orig_context));
++
++ // Try to fetch it, shouldn't find it.
++ PingContextPtr context;
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[1]->addr_));
++ ASSERT_FALSE(context);
++
++ // Deleting it again should do no harm.
++ ASSERT_NO_THROW_LOG(store.deleteContext(orig_context));
++ }
++
++ /// @brief Verify that contexts in the store can be updated.
++ void updateContextTest() {
++ PingContextStore store;
++ PingContextPtr context;
++
++ // Try to update a context that doesn't exist. It should throw.
++ ASSERT_NO_THROW_LOG(context.reset(new PingContext(leases_[0], queries_[0])));
++ ASSERT_THROW_MSG(store.updateContext(context), InvalidOperation,
++ "PingContextStore::updateContext failed for address:"
++ " 192.0.2.1, not in store");
++
++ auto test_start = PingContext::now();
++
++ // Add contexts to store.
++ for (int i = 0; i < leases_.size(); ++i) {
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[i], queries_[i], 1, 100));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++ }
++
++ // Fetch the second context.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[1]->addr_));
++ ASSERT_TRUE(context);
++ ASSERT_EQ(leases_[1], context->getLease());
++ ASSERT_EQ(queries_[1], context->getQuery());
++
++ // Check initial values for state and expiration.
++ EXPECT_EQ(PingContext::WAITING_TO_SEND, context->getState());
++ EXPECT_LE(test_start, context->getSendWaitStart());
++ EXPECT_LE(PingContext::EMPTY_TIME(), context->getNextExpiry());
++
++ // Modify the state and expiration, then update the context.
++ auto wait_start = PingContext::now();
++ context->beginWaitingForReply(wait_start);
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Fetch the context and verify the values are correct.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[1]->addr_));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(PingContext::WAITING_FOR_REPLY, context->getState());
++ EXPECT_LE(wait_start + milliseconds(context->getReplyTimeout()), context->getNextExpiry());
++ }
++
++ /// @brief Verify that contexts can be fetched based on when they entered WAITING_TO_SEND
++ /// by getNextToSend().
++ void getNextToSendTest() {
++ PingContextStore store;
++ PingContextPtr context;
++
++ // Capture time now.
++ auto start_time = PingContext::now();
++
++ // Add contexts to store.
++ for (int i = 0; i < leases_.size(); ++i) {
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[i], queries_[i], 1, 100));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++ usleep(1000);
++ }
++
++ // Fetching the next context to send should return the first context as
++ // it has the oldest send wait start time.
++ context.reset();
++ ASSERT_NO_THROW(context = store.getNextToSend());
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[0], context->getLease());
++ EXPECT_EQ(queries_[0], context->getQuery());
++ EXPECT_LE(start_time, context->getSendWaitStart());
++
++ // Update the first context's state to TARGET_FREE which should
++ // disqualify it from being returned as next to send.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[0]->addr_));
++ ASSERT_TRUE(context);
++ ASSERT_EQ(PingContext::WAITING_TO_SEND, context->getState());
++ context->setState(PingContext::TARGET_FREE);
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Update the send wait start of the second context making it the
++ // youngest send wait start time.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[1]->addr_));
++ ASSERT_TRUE(context);
++ ASSERT_EQ(PingContext::WAITING_TO_SEND, context->getState());
++ context->setSendWaitStart(start_time + milliseconds(1000));
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Update the send wait start of the third context, making it the oldest.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[2]->addr_));
++ ASSERT_TRUE(context);
++ ASSERT_EQ(PingContext::WAITING_TO_SEND, context->getState());
++ context->setSendWaitStart(start_time + milliseconds(500));
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Fetching the next context to send should return the third context.
++ context.reset();
++ ASSERT_NO_THROW(context = store.getNextToSend());
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[2], context->getLease());
++ EXPECT_EQ(queries_[2], context->getQuery());
++ EXPECT_EQ(start_time + milliseconds(500), context->getSendWaitStart());
++ }
++
++ /// @brief Verify that contexts can be fetched based on when they expire using
++ /// getExpiresNext() and getExpiredSince().
++ void getByExpirationTest() {
++ PingContextStore store;
++ PingContextPtr context;
++
++ // Add contexts to store.
++ for (int i = 0; i < leases_.size(); ++i) {
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[i], queries_[i], 1, 100));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++ }
++
++ // Capture time now.
++ auto start_time = PingContext::now();
++
++ // Update the state and expiration of the first context.
++ // State set to TARGET_FREE should disqualify if from
++ // fetch by expiration even though it has the soonest expiration
++ // time.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[0]->addr_));
++ ASSERT_TRUE(context);
++ context->setState(PingContext::TARGET_FREE);
++ context->setNextExpiry(start_time + milliseconds(1));
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Update the state and expiration of the second context giving it
++ // the youngest expiration time.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[1]->addr_));
++ ASSERT_TRUE(context);
++ context->setState(PingContext::WAITING_FOR_REPLY);
++ context->setNextExpiry(start_time + milliseconds(1000));
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Update the state and expiration of the third context, make it the
++ // soonest qualified expiration time.
++ ASSERT_NO_THROW_LOG(context = store.getContextByAddress(leases_[2]->addr_));
++ ASSERT_TRUE(context);
++ context->setState(PingContext::WAITING_FOR_REPLY);
++ context->setNextExpiry(start_time + milliseconds(500));
++ ASSERT_NO_THROW_LOG(store.updateContext(context));
++
++ // Fetching the context that expires next should return the third context.
++ context.reset();
++ ASSERT_NO_THROW(context = store.getExpiresNext());
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[2], context->getLease());
++ EXPECT_EQ(queries_[2], context->getQuery());
++ EXPECT_EQ(start_time + milliseconds(500), context->getNextExpiry());
++
++ // Fetch all that have expired since current time. Should be none.
++ PingContextCollectionPtr expired_since;
++ ASSERT_NO_THROW_LOG(expired_since = store.getExpiredSince());
++ ASSERT_TRUE(expired_since);
++ EXPECT_EQ(0, expired_since->size());
++
++ // Fetch all that have expired since start time + 750 ms, should be third context.
++ ASSERT_NO_THROW_LOG(expired_since = store.getExpiredSince(start_time + milliseconds(750)));
++ ASSERT_TRUE(expired_since);
++ EXPECT_EQ(1, expired_since->size());
++ context = (*expired_since)[0];
++ EXPECT_EQ(leases_[2], context->getLease());
++ EXPECT_EQ(queries_[2], context->getQuery());
++ EXPECT_EQ(start_time + milliseconds(500), context->getNextExpiry());
++
++ // Fetch all that have expired since start time + 1500 ms
++ // Should be the third and second contexts
++ ASSERT_NO_THROW_LOG(expired_since = store.getExpiredSince(start_time + milliseconds(1500)));
++ ASSERT_TRUE(expired_since);
++ EXPECT_EQ(2, expired_since->size());
++
++ // First in list should be the third context.
++ context = (*expired_since)[0];
++ EXPECT_EQ(leases_[2], context->getLease());
++ EXPECT_EQ(queries_[2], context->getQuery());
++ EXPECT_EQ(start_time + milliseconds(500), context->getNextExpiry());
++
++ // The last one in the list should be the second context.
++ context = (*expired_since)[1];
++ EXPECT_EQ(leases_[1], context->getLease());
++ EXPECT_EQ(queries_[1], context->getQuery());
++ EXPECT_EQ(start_time + milliseconds(1000), context->getNextExpiry());
++ }
++
++ /// @brief Verifies that getAll() and clear() work properly.
++ void getAllAndClearTest() {
++ PingContextStore store;
++
++ // Add contexts to store.
++ for (int i = 0; i < leases_.size(); ++i) {
++ PingContextPtr context;
++ ASSERT_NO_THROW_LOG(context = store.addContext(leases_[i], queries_[i], 1, 100));
++ ASSERT_TRUE(context);
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++ }
++
++ // Fetch them all.
++ PingContextCollectionPtr contexts;
++ ASSERT_NO_THROW_LOG(contexts = store.getAll());
++ ASSERT_EQ(leases_.size(), contexts->size());
++
++ // Verify we got them all in order.
++ int i = 0;
++ for (auto const& context : *contexts) {
++ EXPECT_EQ(leases_[i], context->getLease());
++ EXPECT_EQ(queries_[i], context->getQuery());
++ ++i;
++ }
++
++ // Now clear the store. Verify it's empty.
++ ASSERT_NO_THROW_LOG(store.clear());
++ ASSERT_NO_THROW_LOG(contexts = store.getAll());
++ ASSERT_EQ(0, contexts->size());
++
++ // Verify clearing an empty store does no harm.
++ ASSERT_NO_THROW_LOG(store.clear());
++ }
++
++private:
++ /// @brief Prepares the class for a test.
++ virtual void SetUp() {
++ Lease4Ptr lease;
++ lease.reset(new Lease4());
++ lease->addr_ = IOAddress("192.0.2.1");
++ leases_.push_back(lease);
++
++ lease.reset(new Lease4());
++ lease->addr_ = IOAddress("192.0.2.2");
++ leases_.push_back(lease);
++
++ lease.reset(new Lease4());
++ lease->addr_ = IOAddress("192.0.2.3");
++ leases_.push_back(lease);
++
++ Pkt4Ptr query;
++ query.reset(new Pkt4(DHCPDISCOVER, 101));
++ queries_.push_back(query);
++
++ query.reset(new Pkt4(DHCPDISCOVER, 102));
++ queries_.push_back(query);
++
++ query.reset(new Pkt4(DHCPDISCOVER, 103));
++ queries_.push_back(query);
++
++ ASSERT_EQ(leases_.size(), queries_.size());
++ }
++
++public:
++ /// @brief List of pre-made leases.
++ std::vector<Lease4Ptr> leases_;
++
++ /// @brief List of pre-made queries.
++ std::vector<Pkt4Ptr> queries_;
++};
++
++TEST_F(PingContextStoreTest, addContext) {
++ addContextTest();
++}
++
++TEST_F(PingContextStoreTest, addContextMultiThreading) {
++ MultiThreadingTest mt;
++ addContextTest();
++}
++
++TEST_F(PingContextStoreTest, addContextDuplicate) {
++ addContextDuplicateTest();
++}
++
++TEST_F(PingContextStoreTest, addContextDuplicateMultiThreading) {
++ MultiThreadingTest mt;
++ addContextDuplicateTest();
++}
++
++TEST_F(PingContextStoreTest, addContextInvalid) {
++ addContextInvalidTest();
++}
++
++TEST_F(PingContextStoreTest, addContextInvalidMultiThreading) {
++ MultiThreadingTest mt;
++ addContextInvalidTest();
++}
++
++TEST_F(PingContextStoreTest, deleteContext) {
++ deleteContextTest();
++}
++
++TEST_F(PingContextStoreTest, deleteContextMultiThreading) {
++ MultiThreadingTest mt;
++ deleteContextTest();
++}
++
++TEST_F(PingContextStoreTest, updateContext) {
++ updateContextTest();
++}
++
++TEST_F(PingContextStoreTest, updateContextMultiThreading) {
++ MultiThreadingTest mt;
++ updateContextTest();
++}
++
++TEST_F(PingContextStoreTest, getNextToSend) {
++ getNextToSendTest();
++}
++
++TEST_F(PingContextStoreTest, getNextToSendMultiThreading) {
++ MultiThreadingTest mt;
++ getNextToSendTest();
++}
++
++TEST_F(PingContextStoreTest, getByExpiration) {
++ getByExpirationTest();
++}
++
++TEST_F(PingContextStoreTest, getByExpirationMultiThreading) {
++ MultiThreadingTest mt;
++ getByExpirationTest();
++}
++
++TEST_F(PingContextStoreTest, getAllAndClear) {
++ getAllAndClearTest();
++}
++
++TEST_F(PingContextStoreTest, getAllAndClearMultiThreading) {
++ MultiThreadingTest mt;
++ getAllAndClearTest();
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/ping_context_unittests.cc b/src/hooks/dhcp/ping_check/tests/ping_context_unittests.cc
+new file mode 100644
+index 0000000000..4a38277ad6
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/ping_context_unittests.cc
+@@ -0,0 +1,146 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++/// @file This file contains tests which exercise the PingContext class.
++
++#include <config.h>
++#include <ping_context.h>
++#include <asiolink/io_address.h>
++#include <testutils/gtest_utils.h>
++
++#include <gtest/gtest.h>
++#include <sstream>
++
++using namespace std;
++using namespace isc;
++using namespace isc::asiolink;
++using namespace isc::dhcp;
++using namespace isc::ping_check;
++using namespace std::chrono;
++
++namespace {
++
++TEST(PingContextTest, validConstruction) {
++ // Make a valid lease and query.
++ Lease4Ptr lease(new Lease4());
++ lease->addr_ = IOAddress("192.0.2.1");
++ Pkt4Ptr query(new Pkt4(DHCPDISCOVER, 1234));
++
++ // Capture time now.
++ auto start_time = PingContext::now();
++
++ // Construct the context.
++ PingContextPtr context;
++ ASSERT_NO_THROW_LOG(context.reset(new PingContext(lease, query)));
++
++ // Verify initial content.
++ EXPECT_EQ(lease->addr_, context->getTarget());
++ EXPECT_EQ(1, context->getMinEchos());
++ EXPECT_EQ(100, context->getReplyTimeout());
++ EXPECT_EQ(0, context->getEchosSent());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getLastEchoSentTime());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getSendWaitStart());
++ EXPECT_EQ(PingContext::EMPTY_TIME(), context->getNextExpiry());
++ EXPECT_EQ(PingContext::NEW, context->getState());
++
++ // Start time should be less than or equal to created time.
++ EXPECT_LE(start_time, context->getCreatedTime());
++ EXPECT_EQ(lease, context->getLease());
++ EXPECT_EQ(query, context->getQuery());
++}
++
++TEST(PingContextTest, invalidConstruction) {
++ // Make a valid lease and query.
++ Lease4Ptr lease(new Lease4());
++ lease->addr_ = IOAddress("192.0.2.1");
++ Pkt4Ptr query(new Pkt4(DHCPDISCOVER, 1234));
++
++ // Empty lease should throw.
++ Lease4Ptr empty_lease;
++ PingContextPtr context;
++ ASSERT_THROW_MSG(context.reset(new PingContext(empty_lease, query)), BadValue,
++ "PingContext ctor - lease cannot be empty");
++
++ // Empty query should throw.
++ Pkt4Ptr empty_query;
++ ASSERT_THROW_MSG(context.reset(new PingContext(lease, empty_query)), BadValue,
++ "PingContext ctor - query cannot be empty");
++
++ // Empty lease address should throw.
++ lease->addr_ = IOAddress::IPV4_ZERO_ADDRESS();
++ ASSERT_THROW_MSG(context.reset(new PingContext(lease, query)), BadValue,
++ "PingContext ctor - target address cannot be 0.0.0.0");
++}
++
++// Tests conversion of PingContext::State to string and vice-versa.
++TEST(PingContext, stateConversion) {
++ EXPECT_EQ(PingContext::NEW, PingContext::stringToState("NEW"));
++ EXPECT_EQ(PingContext::WAITING_TO_SEND, PingContext::stringToState("WAITING_TO_SEND"));
++ EXPECT_EQ(PingContext::SENDING, PingContext::stringToState("SENDING"));
++ EXPECT_EQ(PingContext::WAITING_FOR_REPLY, PingContext::stringToState("WAITING_FOR_REPLY"));
++ EXPECT_EQ(PingContext::TARGET_FREE, PingContext::stringToState("TARGET_FREE"));
++ EXPECT_EQ(PingContext::TARGET_IN_USE, PingContext::stringToState("TARGET_IN_USE"));
++ ASSERT_THROW_MSG(PingContext::stringToState("bogus"), BadValue,
++ "Invalid PingContext::State: 'bogus'");
++
++ EXPECT_EQ("NEW", PingContext::stateToString(PingContext::NEW));
++ EXPECT_EQ("WAITING_TO_SEND", PingContext::stateToString(PingContext::WAITING_TO_SEND));
++ EXPECT_EQ("SENDING", PingContext::stateToString(PingContext::SENDING));
++ EXPECT_EQ("WAITING_FOR_REPLY", PingContext::stateToString(PingContext::WAITING_FOR_REPLY));
++ EXPECT_EQ("TARGET_FREE", PingContext::stateToString(PingContext::TARGET_FREE));
++ EXPECT_EQ("TARGET_IN_USE", PingContext::stateToString(PingContext::TARGET_IN_USE));
++}
++
++TEST(PingContext, accessors) {
++ // Make a valid lease and query.
++ Lease4Ptr lease(new Lease4());
++ lease->addr_ = IOAddress("192.0.2.1");
++ Pkt4Ptr query(new Pkt4(DHCPDISCOVER, 1234));
++
++ // Capture time now.
++ auto time_now = PingContext::now();
++
++ // Construct a context.
++ PingContextPtr context;
++ ASSERT_NO_THROW_LOG(context.reset(new PingContext(lease, query, 1, 50)));
++
++ EXPECT_NO_THROW_LOG(context->setMinEchos(4));
++ EXPECT_EQ(4, context->getMinEchos());
++
++ EXPECT_NO_THROW_LOG(context->setReplyTimeout(200));
++ EXPECT_EQ(200, context->getReplyTimeout());
++
++ EXPECT_NO_THROW_LOG(context->setEchosSent(7));
++ EXPECT_EQ(7, context->getEchosSent());
++
++ EXPECT_NO_THROW_LOG(context->setLastEchoSentTime(time_now));
++ EXPECT_EQ(time_now, context->getLastEchoSentTime());
++
++ EXPECT_NO_THROW_LOG(context->setState(PingContext::SENDING));
++ EXPECT_EQ(PingContext::SENDING, context->getState());
++
++ time_now += milliseconds(100);
++ EXPECT_NO_THROW_LOG(context->setSendWaitStart(time_now));
++ EXPECT_EQ(time_now, context->getSendWaitStart());
++
++ time_now += milliseconds(100);
++ EXPECT_NO_THROW_LOG(context->setNextExpiry(time_now));
++ EXPECT_EQ(time_now, context->getNextExpiry());
++
++ EXPECT_FALSE(context->isWaitingToSend());
++ time_now += milliseconds(100);
++ ASSERT_NO_THROW_LOG(context->beginWaitingToSend(time_now));
++ EXPECT_EQ(time_now, context->getSendWaitStart());
++ EXPECT_TRUE(context->isWaitingToSend());
++
++ EXPECT_FALSE(context->isWaitingForReply());
++ auto exp_expiry = time_now + milliseconds(context->getReplyTimeout());
++ ASSERT_NO_THROW_LOG(context->beginWaitingForReply(time_now));
++ EXPECT_EQ(exp_expiry, context->getNextExpiry());
++ EXPECT_TRUE(context->isWaitingForReply());
++}
++
++} // end of anonymous namespace
+diff --git a/src/hooks/dhcp/ping_check/tests/ping_test_utils.h b/src/hooks/dhcp/ping_check/tests/ping_test_utils.h
+new file mode 100644
+index 0000000000..df1ede7526
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/ping_test_utils.h
+@@ -0,0 +1,396 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#ifndef PING_TEST_UTILS_H
++#define PING_TEST_UTILS_H
++
++#include <ping_channel.h>
++#include <asiolink/interval_timer.h>
++#include <asiolink/io_service.h>
++#include <asiolink/io_address.h>
++#include <testutils/gtest_utils.h>
++#include <asiolink/io_service_thread_pool.h>
++#include <util/multi_threading_mgr.h>
++#include <mutex>
++
++#include <gtest/gtest.h>
++#include <queue>
++#include <list>
++#include <thread>
++#include <map>
++
++namespace isc {
++namespace ping_check {
++
++/// @brief Test timeout (ms).
++const long TEST_TIMEOUT = 10000;
++
++/// @brief Maps IOAddresses to sequence numbers.
++///
++/// Outbound requests are assigned a unique id and sequence
++/// number. This map is used to track the request's destination
++/// address by its sequence number. The channel can then substitute
++/// the loopback address, 127.0.0.1, as the destination address.
++/// Upon response receipt, the original destination can be found by
++/// the sequence number sent back in the response.
++class LoopbackMap {
++public:
++ /// @brief Constructor.
++ LoopbackMap() : map_(), mutex_(new std::mutex) {
++ }
++
++ /// @brief Destructor.
++ ~LoopbackMap() = default;
++
++ /// @brief Find and IOAddress associated with a sequence number.
++ ///
++ /// @param sequence sequence number to search by
++ ///
++ /// @return address found or IPV4_ZERO_ADDRESS.
++ asiolink::IOAddress find(uint16_t sequence) {
++ util::MultiThreadingLock lock(*mutex_);
++ auto const& iter = map_.find(sequence);
++ if (iter == map_.end()) {
++ return (asiolink::IOAddress::IPV4_ZERO_ADDRESS());
++ }
++
++ return (iter->second);
++ }
++
++ /// @brief Adds an entry for a sequence number and address
++ ///
++ /// @param sequence sequence number associated with the address
++ /// @param address address to add to the map
++ ///
++ /// @return true if the entry was added, false otherwise.
++ bool add(uint16_t sequence, const asiolink::IOAddress& address) {
++ util::MultiThreadingLock lock(*mutex_);
++ if (map_.count(sequence)) {
++ return (false);
++ }
++
++ map_.emplace(sequence, address);
++ return (true);
++ };
++
++ /// @brief Map of addresses by sequence number.
++ std::map<uint16_t, asiolink::IOAddress> map_;
++
++ /// @brief Mutex to protect the map during operations.
++ const boost::scoped_ptr<std::mutex> mutex_;
++};
++
++/// @brief Testable derivation of PingChannel
++///
++/// Overrides read and write functions to inject IO errors.
++class TestablePingChannel : public PingChannel {
++public:
++ /// @brief Constructor
++ ///
++ /// Instantiates the channel with its socket closed.
++ ///
++ /// @param io_service pointer to the IOService instance that will manage
++ /// the channel's IO. Must not be empty
++ /// @param next_to_send_cb callback to invoke to fetch the next IOAddress
++ /// to ping
++ /// @param echo_sent_cb callback to invoke when an ECHO send has completed
++ /// @param reply_received_cb callback to invoke when an ICMP reply has been
++ /// received. This callback is passed all inbound ICMP messages (e.g. ECHO
++ /// REPLY, UNREACHABLE, etc...)
++ /// @param shutdown_cb callback to invoke when the channel has shutdown due
++ /// to an error
++ ///
++ /// @throw BadValue if io_service is empty.
++ TestablePingChannel(asiolink::IOServicePtr& io_service,
++ NextToSendCallback next_to_send_cb,
++ EchoSentCallback echo_sent_cb,
++ ReplyReceivedCallback reply_received_cb,
++ ShutdownCallback shutdown_cb = ShutdownCallback())
++ : PingChannel(io_service, next_to_send_cb, echo_sent_cb, reply_received_cb, shutdown_cb),
++ read_number_(0), throw_on_read_number_(0), ec_on_read_number_(0), read_error_ec_(),
++ write_number_(0), throw_on_write_number_(0), ec_on_write_number_(0), write_error_ec_(),
++ route_loopback_(true), loopback_map_(), stopped_(false) {
++ }
++
++ /// @brief Virtual destructor
++ virtual ~TestablePingChannel() {
++ stopped_ = true;
++ }
++
++ // @brief Schedules the next send.
++ //
++ // If the socket is not currently sending it posts a call to @c sendNext()
++ // to the channel's IOService.
++ virtual void startSend() {
++ if (stopped_) {
++ return;
++ }
++ PingChannel::startSend();
++ }
++
++ /// @brief Perform asynchronous read or feign a read error
++ ///
++ /// This virtual function is provided as means to inject errors during
++ /// read operations to facilitate testing. It tracks the number of
++ /// reads that have occurred since channel open and instigates an
++ /// error trigger on the trigger read number if a trigger has been set.
++ ///
++ /// @param data buffer to receive incoming message
++ /// @param length length of the data buffer
++ /// @param offset offset into buffer where data is to be put
++ /// @param endpoint source of the communication
++ /// @param callback callback object
++ virtual void asyncReceive(void* data, size_t length, size_t offset,
++ asiolink::IOEndpoint* endpoint, SocketCallback& callback) {
++ if (stopped_) {
++ return;
++ }
++ ++read_number_;
++
++ // If we're set to fail with an exception, do so.
++ if (throw_on_read_number_ && (read_number_ == throw_on_read_number_)) {
++ isc_throw(Unexpected, "Injected read error");
++ }
++
++ // If we're set to fail via the callback, post a call with the
++ // desired error code.
++ if (ec_on_read_number_ && read_number_ == ec_on_read_number_) {
++ getIOService()->post([this]() { socketReadCallback(read_error_ec_, 0); });
++ return;
++ }
++
++ // No scheduled error, proceed with normal read.
++ PingChannel::asyncReceive(data, length, offset, endpoint, callback);
++ }
++
++ /// @brief Perform asynchronous write or feign a write error
++ ///
++ /// This virtual function is provided as means to inject errors during
++ /// write operations to facilitate testing. It tracks the number of
++ /// writes that have occurred since channel open and instigates an
++ /// error trigger on the trigger write number if a trigger has been set.
++ ///
++ /// @param data buffer of data to write
++ /// @param length length of the data buffer
++ /// @param endpoint destination of the communication
++ /// @param callback callback object
++ virtual void asyncSend(void* data, size_t length, asiolink::IOEndpoint* endpoint,
++ SocketCallback& callback) {
++ if (stopped_) {
++ return;
++ }
++ ++write_number_;
++ if (throw_on_write_number_ && (write_number_ == throw_on_write_number_)) {
++ isc_throw(Unexpected, "Injected write error");
++ }
++
++ if (ec_on_write_number_ && write_number_ == ec_on_write_number_) {
++ ICMPMsgPtr fake_echo(new ICMPMsg());
++ fake_echo->setType(ICMPMsg::ECHO_REQUEST);
++ fake_echo->setDestination(endpoint->getAddress());
++ getIOService()->post([this, fake_echo]() { socketWriteCallback(fake_echo, write_error_ec_, 0); });
++ return;
++ }
++
++ // In order to make testing more predictable, we need slow writes down a bit.
++ usleep(5000);
++
++ // If loopback routing is enabled, store the destination address by
++ // sequence number in the loopback map, then replace the destination
++ // endpoint with 127.0.0.1 and send it there.
++ if (route_loopback_) {
++ struct icmp* reply = (struct icmp*)(data);
++ auto sequence = (ntohs(reply->icmp_hun.ih_idseq.icd_seq));
++ loopback_map_.add(sequence, endpoint->getAddress());
++ ICMPEndpoint lo_endpoint(asiolink::IOAddress("127.0.0.1"));
++ PingChannel::asyncSend(data, length, &lo_endpoint, callback);
++ return;
++ }
++
++ PingChannel::asyncSend(data, length, endpoint, callback);
++ }
++
++ /// @brief Fetches the PingSocket.
++ ///
++ /// @return pointer to the PingSocket instance.
++ PingSocketPtr getPingSocket() {
++ return (socket_);
++ }
++
++ /// @brief Checks if channel was opened in single-threaded mode.
++ ///
++ /// @return True if channel is single-threaded.
++ bool getSingleThreaded() const {
++ return (single_threaded_);
++ }
++
++ /// @brief Fetch the WatchSocket instance.
++ ///
++ /// @return pointer to the WatchSocket.
++ util::WatchSocketPtr getWatchSocket() const {
++ return (watch_socket_);
++ }
++
++ /// @brief The "write-ready" socket descriptor registered IfaceMgr.
++ ///
++ /// @return registered socket descriptor.
++ int getRegisteredWriteFd() const {
++ return (registered_write_fd_);
++ }
++
++ /// @brief The "read-ready" socket descriptor registered IfaceMgr.
++ ///
++ /// @return registered socket descriptor.
++ int getRegisteredReadFd() const {
++ return (registered_read_fd_);
++ }
++
++ /// @brief Tracks the number of reads since the channel was created
++ size_t read_number_;
++
++ /// @brief Read number on which to thrown an exception from asyncReceive()
++ size_t throw_on_read_number_;
++
++ /// @brief Read number on which to inject a socketReadCallback with an error code
++ size_t ec_on_read_number_;
++
++ /// @brief Error code to inject on read error trigger
++ boost::system::error_code read_error_ec_;
++
++ /// @brief Tracks the number of writes since the channel was created
++ size_t write_number_;
++
++ /// @brief Write number on which to thrown an exception from asyncSend()
++ size_t throw_on_write_number_;
++
++ /// @brief Error code to inject on write error trigger
++ size_t ec_on_write_number_;
++
++ /// @brief Error code to inject on write error trigger
++ boost::system::error_code write_error_ec_;
++
++ /// @brief Enables routing of 127.0.0.x by to 127.0.0.1 via sequence number.
++ bool route_loopback_;
++
++ /// @brief Maps loopback addresses to sequence numbers when loopback routing
++ /// is enabled.
++ LoopbackMap loopback_map_;
++
++ /// @brief Flag which indicates that the manager has been stopped.
++ bool stopped_;
++};
++
++/// @brief Defines a pointer to a TestablePingChannel
++typedef boost::shared_ptr<TestablePingChannel> TestablePingChannelPtr;
++
++/// @brief Defines a callback type for test completion check functions.
++typedef std::function<bool()> TestDoneCallback;
++
++/// @brief Test fixture class which uses an IOService for time management and/or IO
++class IOServiceTest : public ::testing::Test {
++public:
++ /// @brief Constructor.
++ ///
++ /// Starts test timer which detects timeouts.
++ IOServiceTest()
++ : test_io_service_(new asiolink::IOService()),
++ test_timer_(test_io_service_),
++ run_io_service_timer_(test_io_service_),
++ test_done_cb_() {
++ test_timer_.setup(std::bind(&IOServiceTest::timeoutHandler, this, true),
++ TEST_TIMEOUT,
++ asiolink::IntervalTimer::ONE_SHOT);
++ }
++
++ /// @brief Indicates if current user is not root
++ ///
++ /// @return True if neither the uid or the effective
++ /// uid is root.
++ static bool notRoot() {
++ return (getuid() != 0 && geteuid() != 0);
++ }
++
++ /// @brief Destructor.
++ ///
++ /// Removes active clients.
++ virtual ~IOServiceTest() {
++ test_timer_.cancel();
++ run_io_service_timer_.cancel();
++ test_io_service_->stopAndPoll();
++ }
++
++ /// @brief Callback function invoke upon test timeout.
++ ///
++ /// It stops the IO service and reports test timeout.
++ ///
++ /// @param fail_on_timeout Specifies if test failure should be reported.
++ void timeoutHandler(const bool fail_on_timeout) {
++ if (fail_on_timeout) {
++ ADD_FAILURE() << "Timeout occurred while running the test!";
++ }
++
++ test_io_service_->stop();
++ }
++
++ /// @brief Stops the IOService if criteria for test completion has been met.
++ ///
++ /// Stops the IOService If there either no test completion callback or the
++ /// call back returns true.
++ void stopIfDone() {
++ // If there is no done test callback or it returns true, stop the service.
++ if (!test_done_cb_ || (test_done_cb_)()) {
++ test_io_service_->stop();
++ }
++ }
++
++ /// @brief Posts a call to stop the io service to the io service.
++ ///
++ /// This should be used when stopping the service from callbacks on
++ /// thread pool threads.
++ void stopTestService() {
++ if (!test_io_service_->stopped()) {
++ test_io_service_->post([&]() { test_io_service_->stop(); });
++ }
++ }
++
++ /// @brief Runs IO service with optional timeout.
++ ///
++ /// @param timeout number of milliseconds to run the io service. Defaults to
++ /// zero which means run forever.
++ void runIOService(long timeout = 0) {
++ test_io_service_->stop();
++ test_io_service_->restart();
++
++ if (timeout > 0) {
++ run_io_service_timer_.setup(std::bind(&IOServiceTest::timeoutHandler,
++ this, false),
++ timeout,
++ asiolink::IntervalTimer::ONE_SHOT);
++ }
++
++ test_io_service_->run();
++ test_io_service_->stopAndPoll();
++ }
++
++ /// @brief IO service used in the tests.
++ asiolink::IOServicePtr test_io_service_;
++
++ /// @brief Asynchronous timer service to detect timeouts.
++ asiolink::IntervalTimer test_timer_;
++
++ /// @brief Asynchronous timer for running IO service for a specified amount
++ /// of time.
++ asiolink::IntervalTimer run_io_service_timer_;
++
++ /// @brief Callback function which event handlers can use to check if service
++ /// run should stop.
++ TestDoneCallback test_done_cb_;
++};
++
++} // end of namespace ping_check
++} // end of namespace isc
++
++#endif
+diff --git a/src/hooks/dhcp/ping_check/tests/run_unittests.cc b/src/hooks/dhcp/ping_check/tests/run_unittests.cc
+new file mode 100644
+index 0000000000..d249e2362e
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/tests/run_unittests.cc
+@@ -0,0 +1,19 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++
++#include <log/logger_support.h>
++#include <gtest/gtest.h>
++
++int
++main(int argc, char* argv[]) {
++ ::testing::InitGoogleTest(&argc, argv);
++ isc::log::initLogger();
++ int result = RUN_ALL_TESTS();
++
++ return (result);
++}
+diff --git a/src/hooks/dhcp/ping_check/version.cc b/src/hooks/dhcp/ping_check/version.cc
+new file mode 100644
+index 0000000000..f2250ab126
+--- /dev/null
++++ b/src/hooks/dhcp/ping_check/version.cc
+@@ -0,0 +1,17 @@
++// Copyright (C) 2023-2025 Internet Systems Consortium, Inc. ("ISC")
++//
++// This Source Code Form is subject to the terms of the Mozilla Public
++// License, v. 2.0. If a copy of the MPL was not distributed with this
++// file, You can obtain one at http://mozilla.org/MPL/2.0/.
++
++#include <config.h>
++#include <hooks/hooks.h>
++
++extern "C" {
++
++/// @brief returns Kea hooks version.
++int version() {
++ return (KEA_HOOKS_VERSION);
++}
++
++}
+--
+2.39.5 (Apple Git-154)
+
diff --git a/scripts/package-build/keepalived/.gitignore b/scripts/package-build/keepalived/.gitignore
index fa96cd3f..b6513f29 100644
--- a/scripts/package-build/keepalived/.gitignore
+++ b/scripts/package-build/keepalived/.gitignore
@@ -1,7 +1 @@
-keepalived/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/keepalived/
diff --git a/scripts/package-build/keepalived/package.toml b/scripts/package-build/keepalived/package.toml
index ad1008e6..3f5ec071 100644
--- a/scripts/package-build/keepalived/package.toml
+++ b/scripts/package-build/keepalived/package.toml
@@ -1,4 +1,4 @@
[[packages]]
name = "keepalived"
-commit_id = "debian/1%2.2.8-1"
+commit_id = "debian/1%2.3.2-1"
scm_url = "https://salsa.debian.org/debian/pkg-keepalived.git"
diff --git a/scripts/package-build/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch b/scripts/package-build/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch
deleted file mode 100644
index b099dc7b..00000000
--- a/scripts/package-build/keepalived/patches/0001-vrrp-Set-sysctl-arp_ignore-to-1-on-IPv6-VMACs.patch
+++ /dev/null
@@ -1,129 +0,0 @@
-From af4aa758c3512bec8233549e138b03741c5404f9 Mon Sep 17 00:00:00 2001
-From: Quentin Armitage <quentin@armitage.org.uk>
-Date: Sat, 14 Oct 2023 15:37:19 +0100
-Subject: [PATCH] vrrp: Set sysctl arp_ignore to 1 on IPv6 VMACs
-
-Setting arp_ignore to 1 ensures that the VMAC interface does not respond
-to ARP requests for IPv4 addresses not configured on the VMAC.
-
-Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
----
- keepalived/include/vrrp_if_config.h | 2 +-
- keepalived/vrrp/vrrp_if_config.c | 28 ++++++++++++++++++++--------
- keepalived/vrrp/vrrp_vmac.c | 5 ++---
- 3 files changed, 23 insertions(+), 12 deletions(-)
-
-diff --git a/keepalived/include/vrrp_if_config.h b/keepalived/include/vrrp_if_config.h
-index 35465cd..c35e56e 100644
---- a/keepalived/include/vrrp_if_config.h
-+++ b/keepalived/include/vrrp_if_config.h
-@@ -34,7 +34,7 @@ extern void set_promote_secondaries(interface_t*);
- extern void reset_promote_secondaries(interface_t*);
- #ifdef _HAVE_VRRP_VMAC_
- extern void restore_rp_filter(void);
--extern void set_interface_parameters(const interface_t*, interface_t*);
-+extern void set_interface_parameters(const interface_t*, interface_t*, sa_family_t);
- extern void reset_interface_parameters(interface_t*);
- extern void link_set_ipv6(const interface_t*, bool);
- #endif
-diff --git a/keepalived/vrrp/vrrp_if_config.c b/keepalived/vrrp/vrrp_if_config.c
-index cfce7e2..fbfd34c 100644
---- a/keepalived/vrrp/vrrp_if_config.c
-+++ b/keepalived/vrrp/vrrp_if_config.c
-@@ -81,6 +81,11 @@ static sysctl_opts_t vmac_sysctl[] = {
- { 0, 0}
- };
-
-+static sysctl_opts_t vmac_sysctl_6[] = {
-+ { IPV4_DEVCONF_ARP_IGNORE, 1 },
-+ { 0, 0}
-+};
-+
- #endif
- #endif
-
-@@ -216,11 +221,14 @@ netlink_set_interface_flags(unsigned ifindex, const sysctl_opts_t *sys_opts)
-
- #ifdef _HAVE_VRRP_VMAC_
- static inline int
--netlink_set_interface_parameters(const interface_t *ifp, interface_t *base_ifp)
-+netlink_set_interface_parameters(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
-- if (netlink_set_interface_flags(ifp->ifindex, vmac_sysctl))
-+ if (netlink_set_interface_flags(ifp->ifindex, family == AF_INET6 ? vmac_sysctl_6 : vmac_sysctl))
- return -1;
-
-+ if (family == AF_INET6)
-+ return 0;
-+
- /* If the underlying interface is a MACVLAN that has been moved into
- * a separate network namespace from the parent, we can't access the
- * parent. */
-@@ -271,9 +279,9 @@ netlink_reset_interface_parameters(const interface_t* ifp)
- }
-
- static inline void
--set_interface_parameters_devconf(const interface_t *ifp, interface_t *base_ifp)
-+set_interface_parameters_devconf(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
-- if (netlink_set_interface_parameters(ifp, base_ifp))
-+ if (netlink_set_interface_parameters(ifp, base_ifp, family))
- log_message(LOG_INFO, "Unable to set parameters for %s", ifp->ifname);
- }
-
-@@ -310,11 +318,15 @@ reset_promote_secondaries_devconf(interface_t *ifp)
-
- #ifdef _HAVE_VRRP_VMAC_
- static inline void
--set_interface_parameters_sysctl(const interface_t *ifp, interface_t *base_ifp)
-+set_interface_parameters_sysctl(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
- unsigned val;
-
- set_sysctl("net/ipv4/conf", ifp->ifname, "arp_ignore", 1);
-+
-+ if (family == AF_INET6)
-+ return;
-+
- set_sysctl("net/ipv4/conf", ifp->ifname, "accept_local", 1);
- set_sysctl("net/ipv4/conf", ifp->ifname, "rp_filter", 0);
-
-@@ -524,15 +536,15 @@ restore_rp_filter(void)
- }
-
- void
--set_interface_parameters(const interface_t *ifp, interface_t *base_ifp)
-+set_interface_parameters(const interface_t *ifp, interface_t *base_ifp, sa_family_t family)
- {
- if (all_rp_filter == UINT_MAX)
- clear_rp_filter();
-
- #ifdef _HAVE_IPV4_DEVCONF_
-- set_interface_parameters_devconf(ifp, base_ifp);
-+ set_interface_parameters_devconf(ifp, base_ifp, family);
- #else
-- set_interface_parameters_sysctl(ifp, base_ifp);
-+ set_interface_parameters_sysctl(ifp, base_ifp, family);
- #endif
- }
-
-diff --git a/keepalived/vrrp/vrrp_vmac.c b/keepalived/vrrp/vrrp_vmac.c
-index e5ff0e9..021953a 100644
---- a/keepalived/vrrp/vrrp_vmac.c
-+++ b/keepalived/vrrp/vrrp_vmac.c
-@@ -407,10 +407,9 @@ netlink_link_add_vmac(vrrp_t *vrrp, const interface_t *old_interface)
- if (!ifp->ifindex)
- return false;
-
-- if (vrrp->family == AF_INET && create_interface) {
-+ if (create_interface) {
- /* Set the necessary kernel parameters to make macvlans work for us */
--// If this saves current base_ifp's settings, we need to be careful if multiple VMACs on same i/f
-- set_interface_parameters(ifp, ifp->base_ifp);
-+ set_interface_parameters(ifp, ifp->base_ifp, vrrp->family);
- }
-
- #ifdef _WITH_FIREWALL_
---
-2.34.1
-
diff --git a/scripts/package-build/libnss-mapuser/.gitignore b/scripts/package-build/libnss-mapuser/.gitignore
new file mode 100644
index 00000000..15657c19
--- /dev/null
+++ b/scripts/package-build/libnss-mapuser/.gitignore
@@ -0,0 +1 @@
+/libnss-mapuser/
diff --git a/scripts/package-build/libnss-mapuser/build.py b/scripts/package-build/libnss-mapuser/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/libnss-mapuser/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/libnss-mapuser/package.toml b/scripts/package-build/libnss-mapuser/package.toml
new file mode 100644
index 00000000..20ff65b4
--- /dev/null
+++ b/scripts/package-build/libnss-mapuser/package.toml
@@ -0,0 +1,9 @@
+[[packages]]
+name = "libnss-mapuser"
+commit_id = "current"
+scm_url = "https://github.com/vyos/libnss-mapuser.git"
+
+[dependencies]
+packages = [
+ "libaudit-dev"
+]
diff --git a/scripts/package-build/libpam-radius-auth/.gitignore b/scripts/package-build/libpam-radius-auth/.gitignore
new file mode 100644
index 00000000..b6ba8742
--- /dev/null
+++ b/scripts/package-build/libpam-radius-auth/.gitignore
@@ -0,0 +1 @@
+/libpam-radius-auth/
diff --git a/scripts/package-build/libpam-radius-auth/build.py b/scripts/package-build/libpam-radius-auth/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/libpam-radius-auth/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/libpam-radius-auth/package.toml b/scripts/package-build/libpam-radius-auth/package.toml
new file mode 100644
index 00000000..d2c760c8
--- /dev/null
+++ b/scripts/package-build/libpam-radius-auth/package.toml
@@ -0,0 +1,10 @@
+[[packages]]
+name = "libpam-radius-auth"
+commit_id = "current"
+scm_url = "https://github.com/vyos/libpam-radius-auth.git"
+
+[dependencies]
+packages = [
+ "libpam-dev",
+ "libaudit-dev"
+]
diff --git a/scripts/package-build/linux-kernel/.gitignore b/scripts/package-build/linux-kernel/.gitignore
index 0a18ea8c..f1fb5374 100644
--- a/scripts/package-build/linux-kernel/.gitignore
+++ b/scripts/package-build/linux-kernel/.gitignore
@@ -9,6 +9,7 @@
/ovpn-dco
/nat-rtsp*
/jool*
+/ipt-netflow*
/qat*
/QAT*
*.tar.gz
@@ -18,13 +19,12 @@
# Intel Driver source
i40e-*/
igb-*/
-ixgbe-*/
-ixgbevf-*/
+ethernet-linux-*/
vyos-intel-*/
vyos-linux-firmware*/
kernel-vars
r8152-*.tar.bz2
-
+ephemeral.*
*.buildinfo
*.build
*.changes
diff --git a/scripts/package-build/linux-kernel/README.md b/scripts/package-build/linux-kernel/README.md
index 56954e5a..927e880c 100644
--- a/scripts/package-build/linux-kernel/README.md
+++ b/scripts/package-build/linux-kernel/README.md
@@ -5,9 +5,9 @@
# About
-VyOS runs on a custom Linux Kernel (which is 4.19) at the time of this writing.
-This repository holds a Jenkins Pipeline which is used to build the Custom
-Kernel (x86_64/amd64 at the moment) and all required out-of tree modules.
+VyOS runs on a custom Linux Kernel (which is 6.6) at the time of this writing.
+This repository holds build scripts that are used to build the Custom Kernel
+(x86_64/amd64 at the moment) and all required out-of tree modules.
VyOS does not utilize the build in Intel Kernel drivers for its NICs as those
Kernels sometimes lack features e.g. configurable receive-side-scaling queues.
@@ -33,9 +33,3 @@ VyOS utilizes several Out-of-Tree modules (e.g. WireGuard, Accel-PPP and Intel
network interface card drivers). Module source code is retrieved from the
upstream repository and - when needed - patched so it can be build using this
pipeline.
-
-In the past VyOS maintainers had a fork of the Linux Kernel, WireGuard and
-Accel-PPP. This is fine but increases maintenance effort. By utilizing vanilla
-repositories upgrading to new versions is very easy - only the branch/commit/tag
-used when cloning the repository via [Jenkinsfile](Jenkinsfile) needs to be
-adjusted.
diff --git a/scripts/package-build/linux-kernel/arch b/scripts/package-build/linux-kernel/arch
deleted file mode 120000
index f5f81fdc..00000000
--- a/scripts/package-build/linux-kernel/arch
+++ /dev/null
@@ -1 +0,0 @@
-../../../packages/linux-kernel/arch \ No newline at end of file
diff --git a/packages/linux-kernel/arch/arm64/configs/vyos_defconfig b/scripts/package-build/linux-kernel/arch/arm64/configs/vyos_defconfig
index 7b49f05f..e6ea3893 100644
--- a/packages/linux-kernel/arch/arm64/configs/vyos_defconfig
+++ b/scripts/package-build/linux-kernel/arch/arm64/configs/vyos_defconfig
@@ -234,7 +234,7 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_IO_URING=y
+# CONFIG_IO_URING is not set
CONFIG_ADVISE_SYSCALLS=y
CONFIG_MEMBARRIER=y
CONFIG_KALLSYMS=y
@@ -1735,7 +1735,7 @@ CONFIG_RFKILL_INPUT=y
# CONFIG_CAIF is not set
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
-# CONFIG_PSAMPLE is not set
+CONFIG_PSAMPLE=y
# CONFIG_NET_IFE is not set
CONFIG_LWTUNNEL=y
CONFIG_LWTUNNEL_BPF=y
@@ -1975,7 +1975,7 @@ CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_QCOM_SCM=y
# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set
CONFIG_SYSFB=y
-# CONFIG_SYSFB_SIMPLEFB is not set
+CONFIG_SYSFB_SIMPLEFB=y
CONFIG_TI_SCI_PROTOCOL=y
CONFIG_TURRIS_MOX_RWTM=m
# CONFIG_ARM_FFA_TRANSPORT is not set
@@ -3353,7 +3353,8 @@ CONFIG_SERIAL_8250_TEGRA=y
# Non-8250 serial port support
#
# CONFIG_SERIAL_AMBA_PL010 is not set
-# CONFIG_SERIAL_AMBA_PL011 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set
# CONFIG_SERIAL_MESON is not set
# CONFIG_SERIAL_TEGRA is not set
@@ -3401,7 +3402,7 @@ CONFIG_TTY_PRINTK=m
CONFIG_TTY_PRINTK_LEVEL=6
# CONFIG_PRINTER is not set
# CONFIG_PPDEV is not set
-CONFIG_VIRTIO_CONSOLE=m
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DMI_DECODE=y
CONFIG_IPMI_PLAT_DATA=y
@@ -4495,8 +4496,141 @@ CONFIG_VIDEO_CMDLINE=y
# CONFIG_AUXDISPLAY is not set
# CONFIG_PANEL is not set
# CONFIG_TEGRA_HOST1X is not set
-# CONFIG_DRM is not set
+CONFIG_DRM=y
+# CONFIG_DRM_DEBUG_MM is not set
+CONFIG_DRM_KMS_HELPER=y
+# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
+CONFIG_DRM_FBDEV_EMULATION=y
+CONFIG_DRM_FBDEV_OVERALLOC=100
+# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
+# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
+CONFIG_DRM_TTM=y
+CONFIG_DRM_TTM_HELPER=y
+CONFIG_DRM_GEM_SHMEM_HELPER=y
+
+#
+# I2C encoder or helper chips
+#
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
+# CONFIG_DRM_I2C_NXP_TDA998X is not set
+# CONFIG_DRM_I2C_NXP_TDA9950 is not set
+# end of I2C encoder or helper chips
+
+#
+# ARM devices
+#
+# CONFIG_DRM_HDLCD is not set
+# CONFIG_DRM_MALI_DISPLAY is not set
+# CONFIG_DRM_KOMEDA is not set
+# end of ARM devices
+
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_AMDGPU is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_VGEM is not set
+# CONFIG_DRM_VKMS is not set
+# CONFIG_DRM_ROCKCHIP is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_RCAR_DU is not set
+# CONFIG_DRM_RZG2L_MIPI_DSI is not set
+# CONFIG_DRM_SHMOBILE is not set
+# CONFIG_DRM_SUN4I is not set
+CONFIG_DRM_QXL=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_DRM_VIRTIO_GPU_KMS=y
+# CONFIG_DRM_MSM is not set
+# CONFIG_DRM_TEGRA is not set
+CONFIG_DRM_PANEL=y
+
+#
+# Display Panels
+#
+# CONFIG_DRM_PANEL_ARM_VERSATILE is not set
+# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set
+# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set
+# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set
+# end of Display Panels
+
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PANEL_BRIDGE=y
+
+#
+# Display Interface Bridges
+#
+# CONFIG_DRM_CHIPONE_ICN6211 is not set
+# CONFIG_DRM_CHRONTEL_CH7033 is not set
+# CONFIG_DRM_DISPLAY_CONNECTOR is not set
+# CONFIG_DRM_FSL_LDB is not set
+# CONFIG_DRM_ITE_IT6505 is not set
+# CONFIG_DRM_LONTIUM_LT8912B is not set
+# CONFIG_DRM_LONTIUM_LT9211 is not set
+# CONFIG_DRM_LONTIUM_LT9611 is not set
+# CONFIG_DRM_LONTIUM_LT9611UXC is not set
+# CONFIG_DRM_ITE_IT66121 is not set
+# CONFIG_DRM_LVDS_CODEC is not set
+# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set
+# CONFIG_DRM_NWL_MIPI_DSI is not set
+# CONFIG_DRM_NXP_PTN3460 is not set
+# CONFIG_DRM_PARADE_PS8622 is not set
+# CONFIG_DRM_PARADE_PS8640 is not set
+# CONFIG_DRM_SAMSUNG_DSIM is not set
+# CONFIG_DRM_SIL_SII8620 is not set
+# CONFIG_DRM_SII902X is not set
+# CONFIG_DRM_SII9234 is not set
+# CONFIG_DRM_SIMPLE_BRIDGE is not set
+# CONFIG_DRM_THINE_THC63LVD1024 is not set
+# CONFIG_DRM_TOSHIBA_TC358762 is not set
+# CONFIG_DRM_TOSHIBA_TC358764 is not set
+# CONFIG_DRM_TOSHIBA_TC358767 is not set
+# CONFIG_DRM_TOSHIBA_TC358768 is not set
+# CONFIG_DRM_TOSHIBA_TC358775 is not set
+# CONFIG_DRM_TI_DLPC3433 is not set
+# CONFIG_DRM_TI_TFP410 is not set
+# CONFIG_DRM_TI_SN65DSI83 is not set
+# CONFIG_DRM_TI_SN65DSI86 is not set
+# CONFIG_DRM_TI_TPD12S015 is not set
+# CONFIG_DRM_ANALOGIX_ANX6345 is not set
+# CONFIG_DRM_ANALOGIX_ANX78XX is not set
+# CONFIG_DRM_ANALOGIX_ANX7625 is not set
+# CONFIG_DRM_I2C_ADV7511 is not set
+# CONFIG_DRM_CDNS_DSI is not set
+# CONFIG_DRM_CDNS_MHDP8546 is not set
+# CONFIG_DRM_IMX8QM_LDB is not set
+# CONFIG_DRM_IMX8QXP_LDB is not set
+# CONFIG_DRM_IMX8QXP_PIXEL_COMBINER is not set
+# CONFIG_DRM_IMX8QXP_PIXEL_LINK_TO_DPI is not set
+# end of Display Interface Bridges
+
+# CONFIG_DRM_IMX_DCSS is not set
+# CONFIG_DRM_IMX_LCDC is not set
+# CONFIG_DRM_V3D is not set
+# CONFIG_DRM_LOONGSON is not set
+# CONFIG_DRM_ETNAVIV is not set
+# CONFIG_DRM_HISI_HIBMC is not set
+# CONFIG_DRM_HISI_KIRIN is not set
+# CONFIG_DRM_LOGICVC is not set
+# CONFIG_DRM_MXSFB is not set
+# CONFIG_DRM_IMX_LCDIF is not set
+# CONFIG_DRM_MESON is not set
+# CONFIG_DRM_ARCPGU is not set
+# CONFIG_DRM_BOCHS is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_GM12U320 is not set
+CONFIG_DRM_SIMPLEDRM=y
+# CONFIG_DRM_PL111 is not set
+# CONFIG_DRM_XEN_FRONTEND is not set
+# CONFIG_DRM_LIMA is not set
+# CONFIG_DRM_PANFROST is not set
+# CONFIG_DRM_TIDSS is not set
+# CONFIG_DRM_GUD is not set
+# CONFIG_DRM_SSD130X is not set
+# CONFIG_DRM_HYPERV is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
@@ -5362,7 +5496,9 @@ CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-# CONFIG_VDPA is not set
+CONFIG_VIRTIO_VDPA=m
+CONFIG_VDPA=m
+CONFIG_VHOST_VDPA=m
CONFIG_VHOST_IOTLB=m
CONFIG_VHOST_TASK=y
CONFIG_VHOST=m
@@ -6194,7 +6330,7 @@ CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_FAT_DEFAULT_UTF8=y
-# CONFIG_EXFAT_FS is not set
+CONFIG_EXFAT_FS=m
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS3_FS is not set
# end of DOS/FAT/EXFAT/NT Filesystems
diff --git a/packages/linux-kernel/arch/x86/configs/vyos_defconfig b/scripts/package-build/linux-kernel/arch/x86/configs/vyos_defconfig
index 37becb4c..de3b84aa 100644
--- a/packages/linux-kernel/arch/x86/configs/vyos_defconfig
+++ b/scripts/package-build/linux-kernel/arch/x86/configs/vyos_defconfig
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 6.6.48 Kernel Configuration
+# Linux/x86 6.6.89 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (Debian 12.2.0-14) 12.2.0"
CONFIG_CC_IS_GCC=y
@@ -122,15 +122,14 @@ CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_DEFAULT_ON=y
# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
# CONFIG_BPF_PRELOAD is not set
+# CONFIG_BPF_LSM is not set
# end of BPF subsystem
-CONFIG_PREEMPT_BUILD=y
+CONFIG_PREEMPT_NONE_BUILD=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-CONFIG_PREEMPT_COUNT=y
-CONFIG_PREEMPTION=y
-CONFIG_PREEMPT_DYNAMIC=y
+# CONFIG_PREEMPT_DYNAMIC is not set
# CONFIG_SCHED_CORE is not set
#
@@ -154,11 +153,9 @@ CONFIG_CPU_ISOLATION=y
# RCU Subsystem
#
CONFIG_TREE_RCU=y
-CONFIG_PREEMPT_RCU=y
# CONFIG_RCU_EXPERT is not set
CONFIG_TREE_SRCU=y
CONFIG_TASKS_RCU_GENERIC=y
-CONFIG_TASKS_RCU=y
CONFIG_TASKS_TRACE_RCU=y
CONFIG_RCU_STALL_COMMON=y
CONFIG_RCU_NEED_SEGCBLIST=y
@@ -260,7 +257,7 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_IO_URING=y
+# CONFIG_IO_URING is not set
CONFIG_ADVISE_SYSCALLS=y
CONFIG_MEMBARRIER=y
CONFIG_KALLSYMS=y
@@ -492,7 +489,6 @@ CONFIG_PHYSICAL_ALIGN=0x200000
CONFIG_DYNAMIC_MEMORY_LAYOUT=y
CONFIG_RANDOMIZE_MEMORY=y
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
-# CONFIG_ADDRESS_MASKING is not set
CONFIG_HOTPLUG_CPU=y
# CONFIG_COMPAT_VDSO is not set
# CONFIG_LEGACY_VSYSCALL_XONLY is not set
@@ -846,6 +842,7 @@ CONFIG_FUNCTION_ALIGNMENT=16
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
+CONFIG_MODULE_SIG_FORMAT=y
CONFIG_MODULES=y
# CONFIG_MODULE_DEBUG is not set
CONFIG_MODULE_FORCE_LOAD=y
@@ -855,7 +852,15 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_ASM_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_MODULE_SIG is not set
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_ALL=y
+# CONFIG_MODULE_SIG_SHA1 is not set
+# CONFIG_MODULE_SIG_SHA224 is not set
+# CONFIG_MODULE_SIG_SHA256 is not set
+# CONFIG_MODULE_SIG_SHA384 is not set
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_MODULE_SIG_HASH="sha512"
CONFIG_MODULE_COMPRESS_NONE=y
# CONFIG_MODULE_COMPRESS_GZIP is not set
# CONFIG_MODULE_COMPRESS_XZ is not set
@@ -919,7 +924,11 @@ CONFIG_IOSCHED_BFQ=y
CONFIG_PADATA=y
CONFIG_ASN1=y
-CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+CONFIG_INLINE_READ_UNLOCK=y
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+CONFIG_INLINE_WRITE_UNLOCK=y
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
CONFIG_MUTEX_SPIN_ON_OWNER=y
CONFIG_RWSEM_SPIN_ON_OWNER=y
@@ -1176,6 +1185,7 @@ CONFIG_IPV6_SEG6_HMAC=y
CONFIG_IPV6_SEG6_BPF=y
# CONFIG_IPV6_RPL_LWTUNNEL is not set
# CONFIG_IPV6_IOAM6_LWTUNNEL is not set
+# CONFIG_NETLABEL is not set
CONFIG_MPTCP=y
CONFIG_INET_MPTCP_DIAG=m
CONFIG_MPTCP_IPV6=y
@@ -1468,6 +1478,7 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
+# CONFIG_IP_NF_SECURITY is not set
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
@@ -1502,6 +1513,7 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
+# CONFIG_IP6_NF_SECURITY is not set
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
@@ -1768,7 +1780,7 @@ CONFIG_RFKILL_INPUT=y
# CONFIG_CAIF is not set
# CONFIG_CEPH_LIB is not set
# CONFIG_NFC is not set
-# CONFIG_PSAMPLE is not set
+CONFIG_PSAMPLE=y
# CONFIG_NET_IFE is not set
CONFIG_LWTUNNEL=y
CONFIG_LWTUNNEL_BPF=y
@@ -1786,8 +1798,6 @@ CONFIG_ETHTOOL_NETLINK=y
#
# Device Drivers
#
-CONFIG_HAVE_EISA=y
-# CONFIG_EISA is not set
CONFIG_HAVE_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
@@ -4406,6 +4416,7 @@ CONFIG_HID_GENERIC=m
# CONFIG_HID_THRUSTMASTER is not set
# CONFIG_HID_UDRAW_PS3 is not set
# CONFIG_HID_U2FZERO is not set
+# CONFIG_HID_UNIVERSAL_PIDFF is not set
# CONFIG_HID_WACOM is not set
# CONFIG_HID_WIIMOTE is not set
# CONFIG_HID_XINMO is not set
@@ -4413,6 +4424,7 @@ CONFIG_HID_GENERIC=m
# CONFIG_HID_ZYDACRON is not set
# CONFIG_HID_SENSOR_HUB is not set
# CONFIG_HID_ALPS is not set
+# CONFIG_HID_MCP2200 is not set
# CONFIG_HID_MCP2221 is not set
# end of Special HID drivers
@@ -5010,18 +5022,27 @@ CONFIG_VIRTIO_PCI_LIB_LEGACY=m
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_PCI_LEGACY=y
+CONFIG_VIRTIO_VDPA=m
# CONFIG_VIRTIO_PMEM is not set
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-# CONFIG_VDPA is not set
+CONFIG_VDPA=m
+# CONFIG_VDPA_USER is not set
+# CONFIG_IFCVF is not set
+# CONFIG_MLX5_VDPA_NET is not set
+# CONFIG_MLX5_VDPA_STEERING_DEBUG is not set
+# CONFIG_VP_VDPA is not set
+# CONFIG_ALIBABA_ENI_VDPA is not set
+# CONFIG_SNET_VDPA is not set
CONFIG_VHOST_IOTLB=m
CONFIG_VHOST_TASK=y
CONFIG_VHOST=m
CONFIG_VHOST_MENU=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+CONFIG_VHOST_VDPA=m
# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
#
@@ -5399,7 +5420,6 @@ CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY_USER=y
-CONFIG_INOTIFY_STACKFS=y
# CONFIG_FANOTIFY is not set
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=m
@@ -5443,7 +5463,8 @@ CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_FAT_DEFAULT_UTF8=y
-# CONFIG_EXFAT_FS is not set
+CONFIG_EXFAT_FS=m
+CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS3_FS is not set
# end of DOS/FAT/EXFAT/NT Filesystems
@@ -5465,9 +5486,9 @@ CONFIG_TMPFS_XATTR=y
# CONFIG_TMPFS_INODE64 is not set
# CONFIG_TMPFS_QUOTA is not set
CONFIG_HUGETLBFS=y
+# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set
CONFIG_HUGETLB_PAGE=y
CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y
-# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set
CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
CONFIG_CONFIGFS_FS=m
CONFIG_EFIVAR_FS=m
@@ -5572,7 +5593,6 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
# CONFIG_UNICODE is not set
-CONFIG_IO_WQ=y
# end of File systems
#
@@ -5585,12 +5605,31 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
CONFIG_SECURITY_DMESG_RESTRICT=y
-# CONFIG_SECURITY is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
+CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_INFINIBAND is not set
+# CONFIG_SECURITY_PATH is not set
# CONFIG_INTEL_TXT is not set
-CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY is not set
CONFIG_FORTIFY_SOURCE=y
# CONFIG_STATIC_USERMODEHELPER is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_LOADPIN is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_SECURITY_SAFESETID is not set
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
+# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set
+# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set
+# CONFIG_SECURITY_LANDLOCK is not set
+# CONFIG_INTEGRITY is not set
# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
@@ -5888,6 +5927,9 @@ CONFIG_SIGNED_PE_FILE_VERIFICATION=y
#
# Certificates for signature checking
#
+CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
+CONFIG_MODULE_SIG_KEY_TYPE_RSA=y
+# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set
CONFIG_SYSTEM_TRUSTED_KEYRING=y
CONFIG_SYSTEM_TRUSTED_KEYS=""
# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
@@ -6063,7 +6105,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
# end of printk and dmesg options
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MISC=y
+# CONFIG_DEBUG_MISC is not set
#
# Compile-time checks and compiler options
@@ -6141,7 +6183,7 @@ CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
# CONFIG_DEBUG_VM_PGTABLE is not set
CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
# CONFIG_DEBUG_VIRTUAL is not set
-CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_PER_CPU_MAPS is not set
CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y
# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set
@@ -6192,7 +6234,6 @@ CONFIG_SCHEDSTATS=y
# end of Scheduler Debugging
# CONFIG_DEBUG_TIMEKEEPING is not set
-CONFIG_DEBUG_PREEMPT=y
#
# Lock Debugging (spinlocks, mutexes, etc...)
@@ -6274,7 +6315,6 @@ CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
-# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_HWLAT_TRACER is not set
# CONFIG_OSNOISE_TRACER is not set
@@ -6327,7 +6367,7 @@ CONFIG_IO_DELAY_0X80=y
# CONFIG_CPA_DEBUG is not set
# CONFIG_DEBUG_ENTRY is not set
# CONFIG_DEBUG_NMI_SELFTEST is not set
-CONFIG_X86_DEBUG_FPU=y
+# CONFIG_X86_DEBUG_FPU is not set
# CONFIG_PUNIT_ATOM_DEBUG is not set
CONFIG_UNWINDER_ORC=y
# CONFIG_UNWINDER_FRAME_POINTER is not set
diff --git a/scripts/package-build/linux-kernel/build-accel-ppp.sh b/scripts/package-build/linux-kernel/build-accel-ppp.sh
index 1685ff8d..a2f8df52 100755
--- a/scripts/package-build/linux-kernel/build-accel-ppp.sh
+++ b/scripts/package-build/linux-kernel/build-accel-ppp.sh
@@ -13,6 +13,10 @@ if [ ! -f ${KERNEL_VAR_FILE} ]; then
exit 1
fi
+cd ${ACCEL_SRC}
+git reset --hard HEAD
+git clean --force -d -x
+
PATCH_DIR=${CWD}/patches/accel-ppp
if [ -d $PATCH_DIR ]; then
cd ${ACCEL_SRC}
@@ -36,6 +40,10 @@ cmake -DBUILD_IPOE_DRIVER=TRUE \
-DMODULES_KDIR=${KERNEL_VERSION}${KERNEL_SUFFIX} \
-DCPACK_TYPE=Debian12 ..
make
+
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh .
+
cpack -G DEB
# rename resulting Debian package according git description
diff --git a/scripts/package-build/linux-kernel/build-intel-ixgbe.sh b/scripts/package-build/linux-kernel/build-intel-ixgbe.sh
deleted file mode 100755
index 5f45c62a..00000000
--- a/scripts/package-build/linux-kernel/build-intel-ixgbe.sh
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/bin/sh
-CWD=$(pwd)
-KERNEL_VAR_FILE=${CWD}/kernel-vars
-
-if ! dpkg-architecture -iamd64; then
- echo "Intel ixgbe is only buildable on amd64 platforms"
- exit 0
-fi
-
-if [ ! -f ${KERNEL_VAR_FILE} ]; then
- echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
- exit 1
-fi
-
-. ${KERNEL_VAR_FILE}
-
-url="https://sourceforge.net/projects/e1000/files/ixgbe%20stable/5.20.3/ixgbe-5.20.3.tar.gz"
-
-cd ${CWD}
-
-DRIVER_FILE=$(basename ${url} | sed -e s/tar_0/tar/)
-DRIVER_DIR="${DRIVER_FILE%.tar.gz}"
-DRIVER_NAME="ixgbe"
-DRIVER_VERSION=$(echo ${DRIVER_DIR} | awk -F${DRIVER_NAME} '{print $2}' | sed 's/^-//')
-DRIVER_VERSION_EXTRA=""
-
-# Build up Debian related variables required for packaging
-DEBIAN_ARCH=$(dpkg --print-architecture)
-DEBIAN_DIR="${CWD}/vyos-intel-${DRIVER_NAME}_${DRIVER_VERSION}_${DEBIAN_ARCH}"
-DEBIAN_CONTROL="${DEBIAN_DIR}/DEBIAN/control"
-DEBIAN_POSTINST="${CWD}/vyos-intel-ixgbe.postinst"
-
-# Fetch Intel driver source from SourceForge
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-curl -L -o ${DRIVER_FILE} ${url}
-if [ "$?" -ne "0" ]; then
- exit 1
-fi
-
-# Unpack archive
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-mkdir -p ${DRIVER_DIR}
-tar -C ${DRIVER_DIR} --strip-components=1 -xf ${DRIVER_FILE}
-
-cd ${DRIVER_DIR}/src
-if [ -z $KERNEL_DIR ]; then
- echo "KERNEL_DIR not defined"
- exit 1
-fi
-
-# See https://lore.kernel.org/lkml/f90837d0-810e-5772-7841-28d47c44d260@intel.com/
-echo "I: remove pci_enable_pcie_error_reporting() code no longer present in Kernel"
-sed -i '/.*pci_disable_pcie_error_reporting(pdev);/d' ixgbe_main.c
-sed -i '/.*pci_enable_pcie_error_reporting(pdev);/d' ixgbe_main.c
-
-# See https://vyos.dev/T6155
-echo "I: always enable allow_unsupported_sfp for all NICs by default"
-patch -l -p1 < ../../patches/ixgbe/allow_unsupported_sfp.patch
-
-# See https://vyos.dev/T6162
-echo "I: add 1000BASE-BX support"
-patch -l -p1 < ../../patches/ixgbe/add_1000base-bx_support.patch
-
-echo "I: Compile Kernel module for Intel ${DRIVER_NAME} driver"
-make KSRC=${KERNEL_DIR} INSTALL_MOD_PATH=${DEBIAN_DIR} INSTALL_FW_PATH=${DEBIAN_DIR} -j $(getconf _NPROCESSORS_ONLN) install
-
-if [ "x$?" != "x0" ]; then
- exit 1
-fi
-
-if [ -f ${DEBIAN_DIR}.deb ]; then
- rm ${DEBIAN_DIR}.deb
-fi
-
-# build Debian package
-echo "I: Building Debian package vyos-intel-${DRIVER_NAME}"
-cd ${CWD}
-
-# delete non required files which are also present in the kernel package
-# und thus lead to duplicated files
-find ${DEBIAN_DIR} -name "modules.*" | xargs rm -f
-
-echo "#!/bin/sh" > ${DEBIAN_POSTINST}
-echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
-
-fpm --input-type dir --output-type deb --name vyos-intel-${DRIVER_NAME} \
- --version ${DRIVER_VERSION} --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "Vendor based driver for Intel ${DRIVER_NAME}" \
- --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
- --license "GPL2" -C ${DEBIAN_DIR} --after-install ${DEBIAN_POSTINST}
-
-echo "I: Cleanup ${DRIVER_NAME} source"
-cd ${CWD}
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-if [ -d ${DEBIAN_DIR} ]; then
- rm -rf ${DEBIAN_DIR}
-fi
diff --git a/scripts/package-build/linux-kernel/build-intel-ixgbevf.sh b/scripts/package-build/linux-kernel/build-intel-nic.sh
index a965e0de..3e8bbb37 100755
--- a/scripts/package-build/linux-kernel/build-intel-ixgbevf.sh
+++ b/scripts/package-build/linux-kernel/build-intel-nic.sh
@@ -3,7 +3,7 @@ CWD=$(pwd)
KERNEL_VAR_FILE=${CWD}/kernel-vars
if ! dpkg-architecture -iamd64; then
- echo "Intel ixgbevf is only buildable on amd64 platforms"
+ echo "Intel drivers only buildable on amd64 platforms"
exit 0
fi
@@ -14,51 +14,39 @@ fi
. ${KERNEL_VAR_FILE}
-url="https://sourceforge.net/projects/e1000/files/ixgbevf%20stable/4.18.9/ixgbevf-4.18.9.tar.gz"
+if [ -z $KERNEL_DIR ]; then
+ echo "KERNEL_DIR not defined"
+ exit 1
+fi
-cd ${CWD}
+DRIVER_NAME=$1
+cd ${CWD}/ethernet-linux-${DRIVER_NAME}
+if [ -d .git ]; then
+ git clean --force -d -x
+ git reset --hard origin/main
+fi
-DRIVER_FILE=$(basename ${url} | sed -e s/tar_0/tar/)
-DRIVER_DIR="${DRIVER_FILE%.tar.gz}"
-DRIVER_NAME="ixgbevf"
-DRIVER_VERSION=$(echo ${DRIVER_DIR} | awk -F${DRIVER_NAME} '{print $2}' | sed 's/^-//')
-DRIVER_VERSION_EXTRA=""
+DRIVER_VERSION=$(git describe | sed s/^v//)
# Build up Debian related variables required for packaging
DEBIAN_ARCH=$(dpkg --print-architecture)
DEBIAN_DIR="${CWD}/vyos-intel-${DRIVER_NAME}_${DRIVER_VERSION}_${DEBIAN_ARCH}"
DEBIAN_CONTROL="${DEBIAN_DIR}/DEBIAN/control"
-DEBIAN_POSTINST="${CWD}/vyos-intel-ixgbevf.postinst"
-
-# Fetch Intel driver source from SourceForge
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-curl -L -o ${DRIVER_FILE} ${url}
-if [ "$?" -ne "0" ]; then
- exit 1
-fi
-
-# Unpack archive
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-mkdir -p ${DRIVER_DIR}
-tar -C ${DRIVER_DIR} --strip-components=1 -xf ${DRIVER_FILE}
-
-cd ${DRIVER_DIR}/src
-if [ -z $KERNEL_DIR ]; then
- echo "KERNEL_DIR not defined"
- exit 1
+DEBIAN_POSTINST="${CWD}/vyos-intel-${DRIVER_NAME}.postinst"
+
+# See https://vyos.dev/T6155
+# See https://vyos.dev/T6162
+PATCH_DIR=${CWD}/patches/${DRIVER_NAME}
+if [ -d $PATCH_DIR ]; then
+ for patch in $(ls ${PATCH_DIR})
+ do
+ echo "I: Apply patch: ${PATCH_DIR}/${patch}"
+ patch -p1 < ${PATCH_DIR}/${patch}
+ done
fi
-# See https://lore.kernel.org/lkml/f90837d0-810e-5772-7841-28d47c44d260@intel.com/
-echo "I: remove pci_enable_pcie_error_reporting() code no longer present in Kernel"
-sed -i '/.*pci_disable_pcie_error_reporting(pdev);/d' ixgbevf_main.c
-sed -i '/.*pci_enable_pcie_error_reporting(pdev);/d' ixgbevf_main.c
-
echo "I: Compile Kernel module for Intel ${DRIVER_NAME} driver"
-make KSRC=${KERNEL_DIR} INSTALL_MOD_PATH=${DEBIAN_DIR} INSTALL_FW_PATH=${DEBIAN_DIR} -j $(getconf _NPROCESSORS_ONLN) install
+make KSRC=${KERNEL_DIR} INSTALL_MOD_PATH=${DEBIAN_DIR} INSTALL_FW_PATH=${DEBIAN_DIR} -j $(getconf _NPROCESSORS_ONLN) -C src install
if [ "x$?" != "x0" ]; then
exit 1
@@ -72,6 +60,9 @@ fi
echo "I: Building Debian package vyos-intel-${DRIVER_NAME}"
cd ${CWD}
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh ${DEBIAN_DIR}
+
# delete non required files which are also present in the kernel package
# und thus lead to duplicated files
find ${DEBIAN_DIR} -name "modules.*" | xargs rm -f
@@ -85,16 +76,3 @@ fpm --input-type dir --output-type deb --name vyos-intel-${DRIVER_NAME} \
--description "Vendor based driver for Intel ${DRIVER_NAME}" \
--depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
--license "GPL2" -C ${DEBIAN_DIR} --after-install ${DEBIAN_POSTINST}
-
-echo "I: Cleanup ${DRIVER_NAME} source"
-cd ${CWD}
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-if [ -d ${DEBIAN_DIR} ]; then
- rm -rf ${DEBIAN_DIR}
-fi
-
diff --git a/scripts/package-build/linux-kernel/build-intel-qat.sh b/scripts/package-build/linux-kernel/build-intel-qat.sh
index 765cea3f..c2c364a9 100755
--- a/scripts/package-build/linux-kernel/build-intel-qat.sh
+++ b/scripts/package-build/linux-kernel/build-intel-qat.sh
@@ -14,7 +14,7 @@ fi
. ${KERNEL_VAR_FILE}
-url="https://dev.packages.vyos.net/source-mirror/QAT.L.4.24.0-00005.tar.gz"
+url="https://packages.vyos.net/source-mirror/QAT.L.4.24.0-00005.tar.gz"
cd ${CWD}
@@ -84,6 +84,9 @@ fi
echo "I: Building Debian package vyos-intel-${DRIVER_NAME}"
cd ${CWD}
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh ${DEBIAN_DIR}
+
# delete non required files which are also present in the kernel package
# und thus lead to duplicated files
find ${DEBIAN_DIR} -name "modules.*" | xargs rm -f
@@ -98,14 +101,17 @@ fpm --input-type dir --output-type deb --name vyos-intel-${DRIVER_NAME} \
--depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
--license "GPL2" -C ${DEBIAN_DIR} --after-install ${DEBIAN_POSTINST}
-echo "I: Cleanup ${DRIVER_NAME} source"
-cd ${CWD}
-if [ -e ${DRIVER_FILE} ]; then
- rm -f ${DRIVER_FILE}
-fi
-if [ -d ${DRIVER_DIR} ]; then
- rm -rf ${DRIVER_DIR}
-fi
-if [ -d ${DEBIAN_DIR} ]; then
- rm -rf ${DEBIAN_DIR}
-fi
+# echo "I: Cleanup ${DRIVER_NAME} source"
+# cd ${CWD}
+# if [ -e ${DRIVER_FILE} ]; then
+# rm -f ${DRIVER_FILE}
+# fi
+# if [ -d ${DRIVER_DIR} ]; then
+# rm -rf ${DRIVER_DIR}
+# fi
+# if [ -d ${DEBIAN_DIR} ]; then
+# rm -rf ${DEBIAN_DIR}
+# fi
+# if [ -f ${DEBIAN_POSTINST} ]; then
+# rm -f ${DEBIAN_POSTINST}
+# fi
diff --git a/scripts/package-build/linux-kernel/build-ipt-netflow.sh b/scripts/package-build/linux-kernel/build-ipt-netflow.sh
new file mode 100755
index 00000000..9245a416
--- /dev/null
+++ b/scripts/package-build/linux-kernel/build-ipt-netflow.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+CWD=$(pwd)
+KERNEL_VAR_FILE=${CWD}/kernel-vars
+
+IPT_NETFLOW_SRC=${CWD}/ipt-netflow
+if [ ! -d ${IPT_NETFLOW_SRC} ]; then
+ echo "ipt_NETFLOW source not found"
+ exit 1
+fi
+
+if [ ! -f ${KERNEL_VAR_FILE} ]; then
+ echo "Kernel variable file '${KERNEL_VAR_FILE}' does not exist, run ./build_kernel.sh first"
+ exit 1
+fi
+
+cd ${IPT_NETFLOW_SRC}
+if [ -d .git ]; then
+ git reset --hard HEAD
+ git clean --force -d -x
+fi
+
+. ${KERNEL_VAR_FILE}
+
+DRIVER_VERSION=$(git describe | sed s/^v//)
+
+# Build up Debian related variables required for packaging
+DEBIAN_ARCH=$(dpkg --print-architecture)
+DEBIAN_DIR="tmp/"
+DEBIAN_CONTROL="${DEBIAN_DIR}/DEBIAN/control"
+DEBIAN_POSTINST="${CWD}/vyos-ipt-netflow.postinst"
+
+./configure --enable-aggregation --kdir=${KERNEL_DIR}
+make all
+
+if [ "x$?" != "x0" ]; then
+ exit 1
+fi
+
+if [ -f ${DEBIAN_DIR}.deb ]; then
+ rm ${DEBIAN_DIR}.deb
+fi
+
+if [ ! -d ${DEBIAN_DIR} ]; then
+ mkdir -p ${DEBIAN_DIR}
+fi
+
+# build Debian package
+echo "I: Building Debian package vyos-ipt-netflow"
+cp ipt_NETFLOW.ko ${DEBIAN_DIR}
+cp libipt_NETFLOW.so ${DEBIAN_DIR}
+cp libip6t_NETFLOW.so ${DEBIAN_DIR}
+
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh ${DEBIAN_DIR}
+
+echo "#!/bin/sh" > ${DEBIAN_POSTINST}
+echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
+
+cd ${CWD}
+
+fpm --input-type dir --output-type deb --name vyos-ipt-netflow \
+ --version ${DRIVER_VERSION} --deb-compression gz \
+ --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
+ --description "ipt_NETFLOW module" \
+ --depends linux-image-${KERNEL_VERSION}${KERNEL_SUFFIX} \
+ --license "GPL2" -C ${IPT_NETFLOW_SRC}/tmp --after-install ${DEBIAN_POSTINST} \
+ ipt_NETFLOW.ko=/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/extra/ipt_NETFLOW.ko \
+ libipt_NETFLOW.so=/lib/$(uname -m)-linux-gnu/xtables/libipt_NETFLOW.so \
+ libip6t_NETFLOW.so=/lib/$(uname -m)-linux-gnu/xtables/libip6t_NETFLOW.so
diff --git a/scripts/package-build/linux-kernel/build-jool.py b/scripts/package-build/linux-kernel/build-jool.py
index 570293f5..3d2c3d6a 100755
--- a/scripts/package-build/linux-kernel/build-jool.py
+++ b/scripts/package-build/linux-kernel/build-jool.py
@@ -29,9 +29,8 @@ def add_depends(package_dir: str, package_name: str,
# find kernel version and source path
arch: str = find_arch()
defaults_file: str = Path('../../../data/defaults.toml').read_text()
-architecture_file: str = Path(f'../../../data/architectures/{arch}.toml').read_text()
KERNEL_VER: str = toml_loads(defaults_file).get('kernel_version')
-KERNEL_FLAVOR: str = toml_loads(architecture_file).get('kernel_flavor')
+KERNEL_FLAVOR: str = toml_loads(defaults_file).get('kernel_flavor')
KERNEL_SRC: str = Path.cwd().as_posix() + '/linux'
# define variables
@@ -66,7 +65,7 @@ MODULES_DIR := extra
# main packaging script based on dh7 syntax
%:
- dh $@
+ dh $@
override_dh_clean:
dh_clean --exclude=debian/{PACKAGE_NAME}.substvars
@@ -88,7 +87,7 @@ override_dh_auto_install:
install -D -m 644 src/mod/common/jool_common.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/jool_common.ko
install -D -m 644 src/mod/nat64/jool.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/jool.ko
install -D -m 644 src/mod/siit/jool_siit.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/jool_siit.ko
-
+ ${{KERNEL_DIR}}/../sign-modules.sh ${{PACKAGE_BUILD_DIR}}/lib
'''
bild_rules = Path(f'{PACKAGE_DIR}/debian/rules')
bild_rules.write_text(build_rules_text)
diff --git a/scripts/package-build/linux-kernel/build-kernel.sh b/scripts/package-build/linux-kernel/build-kernel.sh
index 2c02f5c3..62dd7829 100755
--- a/scripts/package-build/linux-kernel/build-kernel.sh
+++ b/scripts/package-build/linux-kernel/build-kernel.sh
@@ -9,16 +9,20 @@ if [ ! -d ${KERNEL_SRC} ]; then
exit 1
fi
-echo "I: Copy Kernel config (x86_64_vyos_defconfig) to Kernel Source"
-cp -rv arch/ ${KERNEL_SRC}/
-
cd ${KERNEL_SRC}
-echo "I: clean modified files"
-git reset --hard HEAD
+if [ -d .git ]; then
+ echo "I: Clean modified files - reset Git repo"
+ git reset --hard HEAD
+ git clean --force -d -x
+fi
+
+echo "I: Copy Kernel config (x86_64_vyos_defconfig) to Kernel Source"
+cp -rv ${CWD}/arch/ .
KERNEL_VERSION=$(make kernelversion)
-KERNEL_SUFFIX=-$(dpkg --print-architecture)-vyos
+KERNEL_SUFFIX=-$(awk -F "= " '/kernel_flavor/ {print $2}' ../../../../data/defaults.toml | tr -d \")
+KERNEL_CONFIG=arch/x86/configs/vyos_defconfig
# VyOS requires some small Kernel Patches - apply them here
# It's easier to habe them here and make use of the upstream
@@ -31,26 +35,54 @@ do
patch -p1 < ${PATCH_DIR}/${patch}
done
+# Change name of Signing Cert
+sed -i -e "s/CN =.*/CN=VyOS Networks build time autogenerated Kernel key/" certs/default_x509.genkey
+
+TRUSTED_KEYS_FILE=trusted_keys.pem
+# start with empty key file
+echo -n "" > $TRUSTED_KEYS_FILE
+GIT_ROOT=$(git rev-parse --show-toplevel)
+CERTS=$(find ${GIT_ROOT}/data/certificates -name "*.pem" -type f || true)
+if [ ! -z "${CERTS}" ]; then
+ # add known public keys to Kernel certificate chain
+ for file in $CERTS; do
+ cat $file >> $TRUSTED_KEYS_FILE
+ done
+ # Force Kernel module signing and embed public keys
+ echo "CONFIG_SYSTEM_TRUSTED_KEYRING" >> $KERNEL_CONFIG
+ echo "CONFIG_SYSTEM_TRUSTED_KEYS=\"$TRUSTED_KEYS_FILE\"" >> $KERNEL_CONFIG
+fi
+
echo "I: make vyos_defconfig"
# Select Kernel configuration - currently there is only one
make vyos_defconfig
echo "I: Generate environment file containing Kernel variable"
+EPHEMERAL_KEY="/tmp/ephemeral.key"
+EPHEMERAL_PEM="/tmp/ephemeral.pem"
cat << EOF >${CWD}/kernel-vars
#!/bin/sh
export KERNEL_VERSION=${KERNEL_VERSION}
export KERNEL_SUFFIX=${KERNEL_SUFFIX}
export KERNEL_DIR=${CWD}/${KERNEL_SRC}
+export EPHEMERAL_KEY=${EPHEMERAL_KEY}
+export EPHEMERAL_CERT=${EPHEMERAL_PEM}
EOF
echo "I: Build Debian Kernel package"
touch .scmversion
make bindeb-pkg BUILD_TOOLS=1 LOCALVERSION=${KERNEL_SUFFIX} KDEB_PKGVERSION=${KERNEL_VERSION}-1 -j $(getconf _NPROCESSORS_ONLN)
+# Back to the old Kernel build-scripts directory
cd $CWD
-if [[ $? == 0 ]]; then
- for package in $(ls linux-*.deb)
- do
- ln -sf linux-kernel/$package ..
- done
+EPHEMERAL_KERNEL_KEY=$(grep -E "^CONFIG_MODULE_SIG_KEY=" ${KERNEL_SRC}/$KERNEL_CONFIG | awk -F= '{print $2}' | tr -d \")
+if test -f "${EPHEMERAL_KEY}"; then
+ rm -f ${EPHEMERAL_KEY}
+fi
+if test -f "${EPHEMERAL_PEM}"; then
+ rm -f ${EPHEMERAL_PEM}
+fi
+if test -f "${KERNEL_SRC}/${EPHEMERAL_KERNEL_KEY}"; then
+ openssl rsa -in ${KERNEL_SRC}/${EPHEMERAL_KERNEL_KEY} -out ${EPHEMERAL_KEY}
+ openssl x509 -in ${KERNEL_SRC}/${EPHEMERAL_KERNEL_KEY} -out ${EPHEMERAL_PEM}
fi
diff --git a/packages/linux-kernel/build-mellanox-ofed.sh b/scripts/package-build/linux-kernel/build-mellanox-ofed.sh
index a157ee61..3f8a50f0 100755
--- a/packages/linux-kernel/build-mellanox-ofed.sh
+++ b/scripts/package-build/linux-kernel/build-mellanox-ofed.sh
@@ -117,6 +117,18 @@ cp $(find $CWD/$DRIVER_DIR/DEBS/$DEB_DISTRO -type f | grep '\.deb$') "$CWD/"
echo "I: Cleanup ${DRIVER_NAME} source"
cd ${CWD}
+
+# Sign modules
+DEB_NAME=$(ls mlnx-ofed-kernel-modules_*)
+TMP_DIR="tmp-ofed-sign"
+dpkg-deb --raw-extract ${DEB_NAME} ${TMP_DIR}
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh ${TMP_DIR}
+# Cleanup and repack DEB
+rm -f ${DEB_NAME}
+dpkg-deb --build ${TMP_DIR} ${DEB_NAME}
+rm -rf ${TMP_DIR}
+
if [ -f ${DRIVER_FILE} ]; then
rm -f ${DRIVER_FILE}
fi
diff --git a/scripts/package-build/linux-kernel/build-nat-rtsp.sh b/scripts/package-build/linux-kernel/build-nat-rtsp.sh
index ec7d19a6..33f1311d 100755
--- a/scripts/package-build/linux-kernel/build-nat-rtsp.sh
+++ b/scripts/package-build/linux-kernel/build-nat-rtsp.sh
@@ -15,7 +15,10 @@ fi
. ${KERNEL_VAR_FILE}
-cd ${SRC} && make KERNELDIR=$KERNEL_DIR
+cd ${SRC}
+git reset --hard HEAD
+git clean --force -d -x
+make KERNELDIR=$KERNEL_DIR
# Copy binary to package directory
DEBIAN_DIR=tmp/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/extra
@@ -26,6 +29,9 @@ DEBIAN_POSTINST="${CWD}/vyos-nat-rtsp.postinst"
echo "#!/bin/sh" > ${DEBIAN_POSTINST}
echo "/sbin/depmod -a ${KERNEL_VERSION}${KERNEL_SUFFIX}" >> ${DEBIAN_POSTINST}
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh ${DEBIAN_DIR}
+
# Build Debian Package
fpm --input-type dir --output-type deb --name nat-rtsp \
--version $(git describe --tags --always) --deb-compression gz \
@@ -36,3 +42,7 @@ fpm --input-type dir --output-type deb --name nat-rtsp \
--license "GPL2" --chdir tmp
mv *.deb ..
+
+if [ -f ${DEBIAN_POSTINST} ]; then
+ rm -f ${DEBIAN_POSTINST}
+fi
diff --git a/scripts/package-build/linux-kernel/build-openvpn-dco.sh b/scripts/package-build/linux-kernel/build-openvpn-dco.sh
index fd427825..518729ee 100755
--- a/scripts/package-build/linux-kernel/build-openvpn-dco.sh
+++ b/scripts/package-build/linux-kernel/build-openvpn-dco.sh
@@ -15,13 +15,19 @@ fi
. ${KERNEL_VAR_FILE}
-cd ${SRC} && make KERNEL_SRC=$KERNEL_DIR
+cd ${SRC}
+git reset --hard HEAD
+git clean --force -d -x
+make KERNEL_SRC=$KERNEL_DIR
# Copy binary to package directory
DEBIAN_DIR=tmp/lib/modules/${KERNEL_VERSION}${KERNEL_SUFFIX}/extra
mkdir -p ${DEBIAN_DIR}
cp drivers/net/ovpn-dco/ovpn-dco-v2.ko ${DEBIAN_DIR}
+# Sign generated Kernel modules
+${CWD}/sign-modules.sh ${DEBIAN_DIR}
+
# Build Debian Package
fpm --input-type dir --output-type deb --name openvpn-dco \
--version $(git describe | sed s/^v//) --deb-compression gz \
diff --git a/packages/linux-kernel/build-jool.py b/scripts/package-build/linux-kernel/build-realtek-r8152.py
index 3f8fd3a5..0113eafc 100755
--- a/packages/linux-kernel/build-jool.py
+++ b/scripts/package-build/linux-kernel/build-realtek-r8152.py
@@ -1,19 +1,17 @@
#!/usr/bin/env python3
+import os
from tomllib import loads as toml_loads
from requests import get
from pathlib import Path
from subprocess import run
-def find_arch() -> str:
- tmp=run(['dpkg-architecture', '-q', 'DEB_HOST_ARCH'], capture_output=True)
- return tmp.stdout.decode().strip()
+CWD = os.getcwd()
# dependency modifier
def add_depends(package_dir: str, package_name: str,
depends: list[str]) -> None:
"""Add dependencies to a package
-
Args:
package_dir (str): a directory where package sources are located
package_name (str): a name of package
@@ -27,18 +25,17 @@ def add_depends(package_dir: str, package_name: str,
# find kernel version and source path
-arch: str = find_arch()
-defaults_file: str = Path('../../data/defaults.toml').read_text()
+defaults_file: str = Path('../../../data/defaults.toml').read_text()
+architecture_file: str = Path('../../../data/architectures/amd64.toml').read_text()
KERNEL_VER: str = toml_loads(defaults_file).get('kernel_version')
KERNEL_FLAVOR: str = toml_loads(defaults_file).get('kernel_flavor')
KERNEL_SRC: str = Path.cwd().as_posix() + '/linux'
-
# define variables
-PACKAGE_NAME: str = 'jool'
-PACKAGE_VERSION: str = '4.1.9+bf4c7e3669'
+PACKAGE_NAME: str = 'vyos-drivers-realtek-r8152'
+PACKAGE_VERSION: str = '2.18.1'
PACKAGE_DIR: str = f'{PACKAGE_NAME}-{PACKAGE_VERSION}'
-SOURCES_ARCHIVE: str = 'jool-4.1.9+bf4c7e3669.tar.gz'
-SOURCES_URL: str = f'https://github.com/NICMx/Jool/archive/7f08c42c615ed63cf0fdc1522d91aa0809f6d990.tar.gz'
+SOURCES_ARCHIVE: str = 'r8152-2.18.1.tar.bz2'
+SOURCES_URL: str = f'https://packages.vyos.net/source-mirror/r8152-2.18.1.tar.bz2'
# download sources
sources_archive = Path(SOURCES_ARCHIVE)
@@ -56,42 +53,43 @@ add_depends(PACKAGE_DIR, PACKAGE_NAME,
[f'linux-image-{KERNEL_VER}-{KERNEL_FLAVOR}'])
# configure build rules
-build_rules_text: str = f'''#!/usr/bin/make -f
+build_rules_text: str = '''#!/usr/bin/make -f
# config
-export KERNEL_DIR := {KERNEL_SRC}
+export KERNELDIR := {KERNEL_SRC}
PACKAGE_BUILD_DIR := debian/{PACKAGE_NAME}
KVER := {KERNEL_VER}-{KERNEL_FLAVOR}
-MODULES_DIR := extra
-
+MODULES_DIR := updates/drivers/net/usb
# main packaging script based on dh7 syntax
%:
- dh $@
+\tdh $@
override_dh_clean:
- dh_clean --exclude=debian/{PACKAGE_NAME}.substvars
+\tdh_clean --exclude=debian/{PACKAGE_NAME}.substvars
override_dh_prep:
- dh_prep --exclude=debian/{PACKAGE_NAME}.substvars
+\tdh_prep --exclude=debian/{PACKAGE_NAME}.substvars
-# override_dh_auto_clean:
-# make -C src/mod clean
+override_dh_auto_clean:
+\tmake clean
override_dh_auto_build:
- dh_auto_build $@
- make -C ${{KERNEL_DIR}} M=$$PWD/src/mod/common modules
- make -C ${{KERNEL_DIR}} M=$$PWD/src/mod/nat64 modules
- make -C ${{KERNEL_DIR}} M=$$PWD/src/mod/siit modules
+\techo "KERNELDIR=${{KERNELDIR}}"
+\techo "CURDIR=${{CURDIR}}"
+\tmake -C ${{KERNELDIR}} M=${{CURDIR}} modules
override_dh_auto_install:
- dh_auto_install $@
- install -D -m 644 src/mod/common/jool_common.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/jool_common.ko
- install -D -m 644 src/mod/nat64/jool.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/jool.ko
- install -D -m 644 src/mod/siit/jool_siit.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/jool_siit.ko
+\tinstall -D -m 644 r8152.ko ${{PACKAGE_BUILD_DIR}}/lib/modules/${{KVER}}/${{MODULES_DIR}}/r8152.ko
+\t${{KERNELDIR}}/../sign-modules.sh ${{PACKAGE_BUILD_DIR}}/lib
+\tinstall -D -m 644 50-usb-realtek-net.rules ${{PACKAGE_BUILD_DIR}}/etc/udev/rules.d/50-usb-realtek-net.rules
+'''.format(KERNEL_SRC=KERNEL_SRC, PACKAGE_NAME=PACKAGE_NAME, KERNEL_VER=KERNEL_VER, KERNEL_FLAVOR=KERNEL_FLAVOR)
-'''
-bild_rules = Path(f'{PACKAGE_DIR}/debian/rules')
-bild_rules.write_text(build_rules_text)
+build_rules_path = Path(f'{PACKAGE_DIR}/debian/rules')
+build_rules_path.write_text(build_rules_text, encoding='utf-8')
# build a package
debuild_cmd: list[str] = ['debuild']
-run(debuild_cmd, cwd=PACKAGE_DIR)
+run(debuild_cmd, cwd=PACKAGE_DIR, check=True)
+
+# Sign generated Kernel modules
+clean_cmd: list[str] = ['rm', '-rf', PACKAGE_DIR]
+run(clean_cmd, cwd=CWD, check=True)
diff --git a/scripts/package-build/linux-kernel/build.py b/scripts/package-build/linux-kernel/build.py
index 1bcab686..af610079 100755
--- a/scripts/package-build/linux-kernel/build.py
+++ b/scripts/package-build/linux-kernel/build.py
@@ -63,6 +63,40 @@ def clone_or_update_repo(repo_dir: Path, scm_url: str, commit_id: str) -> None:
run(['git', 'checkout', commit_id], cwd=repo_dir, check=True)
+def create_tarball(package_name, source_dir=None):
+ """Creates a .tar.gz archive of the specified directory.
+
+ Args:
+ package_name (str): The name of the package. This will also be the name of the output tarball.
+ source_dir (str, optional): The directory to be archived. If not provided, defaults to `package_name`.
+
+ Raises:
+ FileNotFoundError: If the specified `source_dir` does not exist.
+ Exception: If an error occurs during tarball creation.
+
+ Example:
+ >>> create_tarball("linux-6.6.56")
+ I: Tarball created: linux-6.6.56.tar.gz
+
+ >>> create_tarball("my-package", "/path/to/source")
+ I: Tarball created: my-package.tar.gz
+ """
+ # Use package_name as the source directory if source_dir is not provided
+ source_dir = source_dir or package_name
+ output_tarball = f"{package_name}.tar.gz"
+
+ # Check if the source directory exists
+ if not os.path.isdir(source_dir):
+ raise FileNotFoundError(f"Directory '{source_dir}' does not exist.")
+
+ # Create the tarball
+ try:
+ shutil.make_archive(base_name=output_tarball.replace('.tar.gz', ''), format='gztar', root_dir=source_dir)
+ print(f"I: Tarball created: {output_tarball}")
+ except Exception as e:
+ print(f"I: Failed to create tarball for {package_name}: {e}")
+
+
def build_package(package: dict, dependencies: list) -> None:
"""Build a package from the repository
@@ -88,20 +122,32 @@ def build_package(package: dict, dependencies: list) -> None:
# Execute the build command
if package['build_cmd'] == 'build_kernel':
build_kernel(package['kernel_version'])
+ create_tarball(f'{package["name"]}-{package["kernel_version"]}', f'linux-{package["kernel_version"]}')
elif package['build_cmd'] == 'build_linux_firmware':
build_linux_firmware(package['commit_id'], package['scm_url'])
+ create_tarball(f'{package["name"]}-{package["commit_id"]}', f'{package["name"]}')
elif package['build_cmd'] == 'build_accel_ppp':
build_accel_ppp(package['commit_id'], package['scm_url'])
+ create_tarball(f'{package["name"]}-{package["commit_id"]}', f'{package["name"]}')
elif package['build_cmd'] == 'build_intel_qat':
build_intel_qat()
+ elif package['build_cmd'] == 'build_intel_igb':
+ build_intel(package['name'], package['commit_id'], package['scm_url'])
elif package['build_cmd'] == 'build_intel_ixgbe':
- build_intel_ixgbe()
+ build_intel(package['name'], package['commit_id'], package['scm_url'])
elif package['build_cmd'] == 'build_intel_ixgbevf':
- build_intel_ixgbevf()
+ build_intel(package['name'], package['commit_id'], package['scm_url'])
+ elif package['build_cmd'] == 'build_mellanox_ofed':
+ build_mellanox_ofed()
+ elif package['build_cmd'] == 'build_realtek_r8152':
+ build_realtek_r8152()
elif package['build_cmd'] == 'build_jool':
build_jool()
+ elif package['build_cmd'] == 'build_ipt_netflow':
+ build_ipt_netflow(package['commit_id'], package['scm_url'])
elif package['build_cmd'] == 'build_openvpn_dco':
build_openvpn_dco(package['commit_id'], package['scm_url'])
+ create_tarball(f'{package["name"]}-{package["commit_id"]}', f'{package["name"]}')
elif package['build_cmd'] == 'build_nat_rtsp':
build_nat_rtsp(package['commit_id'], package['scm_url'])
else:
@@ -173,20 +219,32 @@ def build_intel_qat():
run(['./build-intel-qat.sh'], check=True)
-def build_intel_ixgbe():
- """Build Intel IXGBE"""
- run(['./build-intel-ixgbe.sh'], check=True)
+def build_intel(driver_name: str, commit_id: str, scm_url: str):
+ """Build Intel driver from Git repository"""
+ repo_dir = Path(f'ethernet-linux-{driver_name}')
+ clone_or_update_repo(repo_dir, scm_url, commit_id)
+ run(['./build-intel-nic.sh', driver_name], check=True)
+
+def build_mellanox_ofed():
+ """Build Mellanox OFED"""
+ run(['sudo', './build-mellanox-ofed.sh'], check=True)
-def build_intel_ixgbevf():
- """Build Intel IXGBEVF"""
- run(['./build-intel-ixgbevf.sh'], check=True)
+
+def build_realtek_r8152():
+ """Build Realtek r8152"""
+ run(['sudo', './build-realtek-r8152.py'], check=True)
def build_jool():
"""Build Jool"""
run(['echo y | ./build-jool.py'], check=True, shell=True)
+def build_ipt_netflow(commit_id, scm_url):
+ """Build ipt_NETFLOW"""
+ repo_dir = Path('ipt-netflow')
+ clone_or_update_repo(repo_dir, scm_url, commit_id)
+ run(['./build-ipt-netflow.sh'], check=True, shell=True)
def build_openvpn_dco(commit_id, scm_url):
"""Build OpenVPN DCO"""
diff --git a/scripts/package-build/linux-kernel/package.toml b/scripts/package-build/linux-kernel/package.toml
index 8b030da0..0bbd6785 100644
--- a/scripts/package-build/linux-kernel/package.toml
+++ b/scripts/package-build/linux-kernel/package.toml
@@ -22,7 +22,6 @@ commit_id = "1.13.0"
scm_url = "https://github.com/accel-ppp/accel-ppp.git"
build_cmd = "build_accel_ppp"
-
[[packages]]
name = "ovpn-dco"
commit_id = "v0.2.20231117"
@@ -35,7 +34,6 @@ commit_id = "475af0a"
scm_url = "https://github.com/maru-sama/rtsp-linux.git"
build_cmd = "build_nat_rtsp"
-
[[packages]]
name = "qat"
commit_id = ""
@@ -43,15 +41,21 @@ scm_url = ""
build_cmd = "build_intel_qat"
[[packages]]
+name = "igb"
+commit_id = "v5.18.7"
+scm_url = "https://github.com/intel/ethernet-linux-igb"
+build_cmd = "build_intel_igb"
+
+[[packages]]
name = "ixgbe"
-commit_id = ""
-scm_url = ""
+commit_id = "v6.0.5"
+scm_url = "https://github.com/intel/ethernet-linux-ixgbe"
build_cmd = "build_intel_ixgbe"
[[packages]]
name = "ixgbevf"
-commit_id = ""
-scm_url = ""
+commit_id = "v5.0.2"
+scm_url = "http://github.com/intel/ethernet-linux-ixgbevf"
build_cmd = "build_intel_ixgbevf"
[[packages]]
@@ -60,3 +64,20 @@ commit_id = ""
scm_url = ""
build_cmd = "build_jool"
+[[packages]]
+name = "mlnx"
+commit_id = ""
+scm_url = ""
+build_cmd = "build_mellanox_ofed"
+
+[[packages]]
+name = "realtek-r8152"
+commit_id = ""
+scm_url = ""
+build_cmd = "build_realtek_r8152"
+
+[[packages]]
+name = "ipt-netflow"
+commit_id = "0eb2092e93"
+scm_url = "https://github.com/aabc/ipt-netflow"
+build_cmd = "build_ipt_netflow"
diff --git a/scripts/package-build/linux-kernel/patches b/scripts/package-build/linux-kernel/patches
deleted file mode 120000
index fd016d35..00000000
--- a/scripts/package-build/linux-kernel/patches
+++ /dev/null
@@ -1 +0,0 @@
-../../../packages/linux-kernel/patches \ No newline at end of file
diff --git a/packages/linux-kernel/patches/accel-ppp/0001-L2TP-Include-Calling-Number-to-Calling-Station-ID-RA.patch b/scripts/package-build/linux-kernel/patches/accel-ppp/0001-L2TP-Include-Calling-Number-to-Calling-Station-ID-RA.patch
index 0c3141a0..0c3141a0 100644
--- a/packages/linux-kernel/patches/accel-ppp/0001-L2TP-Include-Calling-Number-to-Calling-Station-ID-RA.patch
+++ b/scripts/package-build/linux-kernel/patches/accel-ppp/0001-L2TP-Include-Calling-Number-to-Calling-Station-ID-RA.patch
diff --git a/scripts/package-build/linux-kernel/patches/accel-ppp/0002-Radius-Dns-Server-IPv6-Address.patch b/scripts/package-build/linux-kernel/patches/accel-ppp/0002-Radius-Dns-Server-IPv6-Address.patch
new file mode 100644
index 00000000..a8991801
--- /dev/null
+++ b/scripts/package-build/linux-kernel/patches/accel-ppp/0002-Radius-Dns-Server-IPv6-Address.patch
@@ -0,0 +1,195 @@
+From: Ben Hardill <ben@hardill.me.uk>
+Date: Tue, 13 Mar 2025 05:00:00 +0000
+Subject: [PATCH] PPPoE: IPv6 DNS from Radius - managing the DNS-Server-IPv6-Address attribute
+
+Patch authored by Ben Hardill from
+https://github.com/accel-ppp/accel-ppp/pull/69
+---
+diff --git a/accel-pppd/include/ap_session.h b/accel-pppd/include/ap_session.h
+index 70515133..507eae04 100644
+--- a/accel-pppd/include/ap_session.h
++++ b/accel-pppd/include/ap_session.h
+@@ -84,6 +84,7 @@ struct ap_session
+ struct ipv4db_item_t *ipv4;
+ struct ipv6db_item_t *ipv6;
+ struct ipv6db_prefix_t *ipv6_dp;
++ struct ipv6db_item_t *ipv6_dns;
+ char *ipv4_pool_name;
+ char *ipv6_pool_name;
+ char *dpv6_pool_name;
+diff --git a/accel-pppd/ipv6/dhcpv6.c b/accel-pppd/ipv6/dhcpv6.c
+index 158771b1..1ef48132 100644
+--- a/accel-pppd/ipv6/dhcpv6.c
++++ b/accel-pppd/ipv6/dhcpv6.c
+@@ -214,19 +214,41 @@ static void insert_status(struct dhcpv6_packet *pkt, struct dhcpv6_option *opt,
+ status->code = htons(code);
+ }
+
+-static void insert_oro(struct dhcpv6_packet *reply, struct dhcpv6_option *opt)
++static void insert_oro(struct dhcpv6_packet *reply, struct dhcpv6_option *opt, struct ap_session *ses)
+ {
+ struct dhcpv6_option *opt1;
+- int i, j;
++ int i = 0, j = 0, k = 0;
+ uint16_t *ptr;
+ struct in6_addr addr, *addr_ptr;
++ struct ipv6db_addr_t *dns;
+
+ for (i = ntohs(opt->hdr->len) / 2, ptr = (uint16_t *)opt->hdr->data; i; i--, ptr++) {
+ if (ntohs(*ptr) == D6_OPTION_DNS_SERVERS) {
+- if (conf_dns_count) {
+- opt1 = dhcpv6_option_alloc(reply, D6_OPTION_DNS_SERVERS, conf_dns_count * sizeof(addr));
+- for (j = 0, addr_ptr = (struct in6_addr *)opt1->hdr->data; j < conf_dns_count; j++, addr_ptr++)
+- memcpy(addr_ptr, conf_dns + j, sizeof(addr));
++ if (ses->ipv6_dns && !list_empty(&ses->ipv6_dns->addr_list)) {
++ list_for_each_entry(dns, &ses->ipv6_dns->addr_list, entry) {
++ j++;
++ }
++ if (j >= 3) {
++ j = 3;
++ }
++ opt1 = dhcpv6_option_alloc(reply, D6_OPTION_DNS_SERVERS, j * sizeof(addr));
++ addr_ptr = (struct in6_addr *)opt1->hdr->data;
++ list_for_each_entry(dns, &ses->ipv6_dns->addr_list, entry) {
++ if (k < j) {
++ memcpy(addr_ptr, &dns->addr, sizeof(addr));
++ k++;
++ addr_ptr++;
++ } else {
++ break;
++ }
++ }
++
++ } else {
++ if (conf_dns_count) {
++ opt1 = dhcpv6_option_alloc(reply, D6_OPTION_DNS_SERVERS, conf_dns_count * sizeof(addr));
++ for (j = 0, addr_ptr = (struct in6_addr *)opt1->hdr->data; j < conf_dns_count; j++, addr_ptr++)
++ memcpy(addr_ptr, conf_dns + j, sizeof(addr));
++ }
+ }
+ } else if (ntohs(*ptr) == D6_OPTION_DOMAIN_LIST) {
+ if (conf_dnssl_size) {
+@@ -434,7 +456,10 @@ static void dhcpv6_send_reply(struct dhcpv6_packet *req, struct dhcpv6_pd *pd, i
+
+ // Option Request
+ } else if (ntohs(opt->hdr->code) == D6_OPTION_ORO) {
+- insert_oro(reply, opt);
++ if (ses->ipv6_dns &&!list_empty(&ses->ipv6_dns->addr_list)) {
++ log_ppp_info2("User specific IPv6 DNS entries\n");
++ }
++ insert_oro(reply, opt, ses);
+
+ } else if (ntohs(opt->hdr->code) == D6_OPTION_RAPID_COMMIT) {
+ if (req->hdr->type == D6_SOLICIT)
+@@ -594,7 +619,7 @@ static void dhcpv6_send_reply2(struct dhcpv6_packet *req, struct dhcpv6_pd *pd,
+ }
+ // Option Request
+ } else if (ntohs(opt->hdr->code) == D6_OPTION_ORO)
+- insert_oro(reply, opt);
++ insert_oro(reply, opt, ses);
+ }
+
+ opt1 = dhcpv6_option_alloc(reply, D6_OPTION_PREFERENCE, 1);
+diff --git a/accel-pppd/ipv6/nd.c b/accel-pppd/ipv6/nd.c
+index 297e4d63..b3054274 100644
+--- a/accel-pppd/ipv6/nd.c
++++ b/accel-pppd/ipv6/nd.c
+@@ -174,7 +174,32 @@ static void ipv6_nd_send_ra(struct ipv6_nd_handler_t *h, struct sockaddr_in6 *ds
+ rinfo++;
+ }*/
+
+- if (conf_dns_count) {
++ if (ses->ipv6_dns && !list_empty(&ses->ipv6_dns->addr_list)) {
++ int i = 0, j = 0;
++ struct ipv6db_addr_t *dns;
++
++ list_for_each_entry(dns, &ses->ipv6_dns->addr_list, entry) {
++ i++;
++ }
++ if (i >= 3) {
++ i = 3;
++ }
++ rdnssinfo = (struct nd_opt_rdnss_info_local *)pinfo;
++ memset(rdnssinfo, 0, sizeof(*rdnssinfo));
++ rdnssinfo->nd_opt_rdnssi_type = ND_OPT_RDNSS_INFORMATION;
++ rdnssinfo->nd_opt_rdnssi_len = 1 + 2 * i;
++ rdnssinfo->nd_opt_rdnssi_lifetime = htonl(conf_rdnss_lifetime);
++ rdnss_addr = (struct in6_addr *)rdnssinfo->nd_opt_rdnssi;
++ list_for_each_entry(dns, &ses->ipv6_dns->addr_list, entry) {
++ if (j < i) {
++ memcpy(rdnss_addr, &dns->addr, sizeof(*rdnss_addr));
++ j++;
++ rdnss_addr++;
++ } else {
++ break;
++ }
++ }
++ } else if (conf_dns_count) {
+ rdnssinfo = (struct nd_opt_rdnss_info_local *)pinfo;
+ memset(rdnssinfo, 0, sizeof(*rdnssinfo));
+ rdnssinfo->nd_opt_rdnssi_type = ND_OPT_RDNSS_INFORMATION;
+diff --git a/accel-pppd/radius/radius.c b/accel-pppd/radius/radius.c
+index 786faa56..1379b0b2 100644
+--- a/accel-pppd/radius/radius.c
++++ b/accel-pppd/radius/radius.c
+@@ -403,6 +403,12 @@ int rad_proc_attrs(struct rad_req_t *req)
+ case Framed_IPv6_Route:
+ rad_add_framed_ipv6_route(attr->val.string, rpd);
+ break;
++ case DNS_Server_IPv6_Address:
++ a = _malloc(sizeof(*a));
++ memset(a, 0, sizeof(*a));
++ a->addr = attr->val.ipv6addr;
++ list_add_tail(&a->entry, &rpd->ipv6_dns.addr_list);
++ break;
+ }
+ }
+
+@@ -420,6 +426,9 @@ int rad_proc_attrs(struct rad_req_t *req)
+ if (!rpd->ses->ipv6_dp && !list_empty(&rpd->ipv6_dp.prefix_list))
+ rpd->ses->ipv6_dp = &rpd->ipv6_dp;
+
++ if (!rpd->ses->ipv6_dns && !list_empty(&rpd->ipv6_dns.addr_list))
++ rpd->ses->ipv6_dns = &rpd->ipv6_dns;
++
+ return res;
+ }
+
+@@ -584,10 +593,12 @@ static void ses_starting(struct ap_session *ses)
+ INIT_LIST_HEAD(&rpd->plugin_list);
+ INIT_LIST_HEAD(&rpd->ipv6_addr.addr_list);
+ INIT_LIST_HEAD(&rpd->ipv6_dp.prefix_list);
++ INIT_LIST_HEAD(&rpd->ipv6_dns.addr_list);
+
+ rpd->ipv4_addr.owner = &ipdb;
+ rpd->ipv6_addr.owner = &ipdb;
+ rpd->ipv6_dp.owner = &ipdb;
++ rpd->ipv6_dns.owner = &ipdb;
+
+ list_add_tail(&rpd->pd.entry, &ses->pd_list);
+
+@@ -764,6 +775,12 @@ static void ses_finished(struct ap_session *ses)
+ _free(a);
+ }
+
++ while (!list_empty(&rpd->ipv6_dns.addr_list)) {
++ a = list_entry(rpd->ipv6_dns.addr_list.next, typeof(*a), entry);
++ list_del(&a->entry);
++ _free(a);
++ }
++
+ fr6 = rpd->fr6;
+ while (fr6) {
+ struct framed_ip6_route *next = fr6->next;
+diff --git a/accel-pppd/radius/radius_p.h b/accel-pppd/radius/radius_p.h
+index 988f154f..eaa5acb0 100644
+--- a/accel-pppd/radius/radius_p.h
++++ b/accel-pppd/radius/radius_p.h
+@@ -65,6 +65,7 @@ struct radius_pd_t {
+ struct ipv4db_item_t ipv4_addr;
+ struct ipv6db_item_t ipv6_addr;
+ struct ipv6db_prefix_t ipv6_dp;
++ struct ipv6db_item_t ipv6_dns;
+ int acct_interim_interval;
+ int acct_interim_jitter;
+
diff --git a/packages/linux-kernel/patches/ixgbe/allow_unsupported_sfp.patch b/scripts/package-build/linux-kernel/patches/ixgbe/0001-ixgbe-always-enable-support-for-unsupported-SFP-modu.patch
index 647fe4d5..3f2cbb4f 100644
--- a/packages/linux-kernel/patches/ixgbe/allow_unsupported_sfp.patch
+++ b/scripts/package-build/linux-kernel/patches/ixgbe/0001-ixgbe-always-enable-support-for-unsupported-SFP-modu.patch
@@ -1,16 +1,16 @@
-From 4f6c1dc3c48a1b2fa7c06206e6366bcfaa33f3f7 Mon Sep 17 00:00:00 2001
+From a3ebb453f4a8c95fe3674d09646edb93946d450a Mon Sep 17 00:00:00 2001
From: Christian Breunig <christian@breunig.cc>
-Date: Fri, 22 Mar 2024 11:33:27 +0000
+Date: Sat, 15 Feb 2025 09:17:10 +0100
Subject: [PATCH] ixgbe: always enable support for unsupported SFP+ modules
---
- ixgbe_param.c | 10 +++++++---
+ src/ixgbe_param.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
-diff --git a/ixgbe_param.c b/ixgbe_param.c
-index 71197b7..dac33ca 100644
---- a/ixgbe_param.c
-+++ b/ixgbe_param.c
+diff --git a/src/ixgbe_param.c b/src/ixgbe_param.c
+index bba03ae..3f29492 100644
+--- a/src/ixgbe_param.c
++++ b/src/ixgbe_param.c
@@ -307,7 +307,7 @@ IXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 0 = off");
* Default Value: 0
*/
@@ -20,7 +20,7 @@ index 71197b7..dac33ca 100644
/* Enable/disable support for DMA coalescing
*
-@@ -1133,8 +1133,8 @@ void ixgbe_check_options(struct ixgbe_adapter *adapter)
+@@ -1135,8 +1135,8 @@ void ixgbe_check_options(struct ixgbe_adapter *adapter)
struct ixgbe_option opt = {
.type = enable_option,
.name = "allow_unsupported_sfp",
@@ -31,7 +31,7 @@ index 71197b7..dac33ca 100644
};
#ifdef module_param_array
if (num_allow_unsupported_sfp > bd) {
-@@ -1150,7 +1150,11 @@ void ixgbe_check_options(struct ixgbe_adapter *adapter)
+@@ -1152,7 +1152,11 @@ void ixgbe_check_options(struct ixgbe_adapter *adapter)
}
#ifdef module_param_array
} else {
@@ -44,5 +44,5 @@ index 71197b7..dac33ca 100644
#endif
}
--
-2.39.2
+2.39.5
diff --git a/packages/linux-kernel/patches/ixgbe/add_1000base-bx_support.patch b/scripts/package-build/linux-kernel/patches/ixgbe/0002-BACKPORT-linux-v6.9-PATCH-ixgbe-Add-1000BASE-BX-supp.patch
index 6c536c38..924c248b 100644
--- a/packages/linux-kernel/patches/ixgbe/add_1000base-bx_support.patch
+++ b/scripts/package-build/linux-kernel/patches/ixgbe/0002-BACKPORT-linux-v6.9-PATCH-ixgbe-Add-1000BASE-BX-supp.patch
@@ -1,7 +1,7 @@
-From 02491fc5cb9bfd0905cfa481d3a6156167fa1720 Mon Sep 17 00:00:00 2001
-From: Ernesto Castellotti <ernesto@castellotti.net>
-Date: Sat, 23 Mar 2024 12:57:56 +0100
-Subject: [BACKPORT linux v6.9] [PATCH] ixgbe: Add 1000BASE-BX support
+From 0ef6088d0d93fcda7adee59fe675f96bcae36c13 Mon Sep 17 00:00:00 2001
+From: Christian Breunig <christian@breunig.cc>
+Date: Sat, 15 Feb 2025 09:17:35 +0100
+Subject: [PATCH] [BACKPORT linux v6.9] [PATCH] ixgbe: Add 1000BASE-BX support
Added support for 1000BASE-BX, i.e. Gigabit Ethernet over single strand
of single-mode fiber.
@@ -94,17 +94,17 @@ Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Link: https://lore.kernel.org/r/20240301184806.2634508-3-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
- ixgbe_82599.c | 4 +++-
- ixgbe_ethtool.c | 4 ++++
- ixgbe_phy.c | 33 +++++++++++++++++++++++++++++----
- ixgbe_phy.h | 2 ++
- ixgbe_type.h | 2 ++
+ src/ixgbe_82599.c | 4 +++-
+ src/ixgbe_ethtool.c | 4 ++++
+ src/ixgbe_phy.c | 33 +++++++++++++++++++++++++++++----
+ src/ixgbe_phy.h | 2 ++
+ src/ixgbe_type.h | 2 ++
5 files changed, 40 insertions(+), 5 deletions(-)
-diff --git a/ixgbe_82599.c b/ixgbe_82599.c
-index 75e368f..b0a10de 100644
---- a/ixgbe_82599.c
-+++ b/ixgbe_82599.c
+diff --git a/src/ixgbe_82599.c b/src/ixgbe_82599.c
+index c95fc4f..a5c74df 100644
+--- a/src/ixgbe_82599.c
++++ b/src/ixgbe_82599.c
@@ -395,7 +395,9 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
@@ -116,10 +116,10 @@ index 75e368f..b0a10de 100644
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
goto out;
-diff --git a/ixgbe_ethtool.c b/ixgbe_ethtool.c
-index 7ada455..fb16f3c 100644
---- a/ixgbe_ethtool.c
-+++ b/ixgbe_ethtool.c
+diff --git a/src/ixgbe_ethtool.c b/src/ixgbe_ethtool.c
+index e983035..7dc9343 100644
+--- a/src/ixgbe_ethtool.c
++++ b/src/ixgbe_ethtool.c
@@ -412,6 +412,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
@@ -138,11 +138,11 @@ index 7ada455..fb16f3c 100644
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
-diff --git a/ixgbe_phy.c b/ixgbe_phy.c
-index 647fdba..0f39fd8 100644
---- a/ixgbe_phy.c
-+++ b/ixgbe_phy.c
-@@ -1266,6 +1266,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+diff --git a/src/ixgbe_phy.c b/src/ixgbe_phy.c
+index 3d99a88..3632234 100644
+--- a/src/ixgbe_phy.c
++++ b/src/ixgbe_phy.c
+@@ -1268,6 +1268,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_1g = 0;
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
@@ -150,7 +150,7 @@ index 647fdba..0f39fd8 100644
u8 cable_tech = 0;
u8 cable_spec = 0;
u16 enforce_sfp = 0;
-@@ -1309,6 +1310,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+@@ -1311,6 +1312,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
@@ -163,7 +163,7 @@ index 647fdba..0f39fd8 100644
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
-@@ -1391,6 +1398,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+@@ -1393,6 +1400,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
@@ -182,7 +182,7 @@ index 647fdba..0f39fd8 100644
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
-@@ -1481,7 +1500,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+@@ -1483,7 +1502,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
@@ -193,7 +193,7 @@ index 647fdba..0f39fd8 100644
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
-@@ -1500,7 +1521,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+@@ -1502,7 +1523,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
@@ -204,7 +204,7 @@ index 647fdba..0f39fd8 100644
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
-@@ -1819,12 +1842,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+@@ -1821,12 +1844,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
@@ -221,10 +221,10 @@ index 647fdba..0f39fd8 100644
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
-diff --git a/ixgbe_phy.h b/ixgbe_phy.h
-index 3ece00f..60c7574 100644
---- a/ixgbe_phy.h
-+++ b/ixgbe_phy.h
+diff --git a/src/ixgbe_phy.h b/src/ixgbe_phy.h
+index b6ddb2e..29c4645 100644
+--- a/src/ixgbe_phy.h
++++ b/src/ixgbe_phy.h
@@ -18,6 +18,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
@@ -241,11 +241,11 @@ index 3ece00f..60c7574 100644
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-diff --git a/ixgbe_type.h b/ixgbe_type.h
-index d85bd9b..fbe2e66 100644
---- a/ixgbe_type.h
-+++ b/ixgbe_type.h
-@@ -3705,6 +3705,8 @@ enum ixgbe_sfp_type {
+diff --git a/src/ixgbe_type.h b/src/ixgbe_type.h
+index 1700599..403687c 100644
+--- a/src/ixgbe_type.h
++++ b/src/ixgbe_type.h
+@@ -3722,6 +3722,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
@@ -255,5 +255,5 @@ index d85bd9b..fbe2e66 100644
ixgbe_sfp_type_unknown = 0xFFFF
};
--
-2.44.0
+2.39.5
diff --git a/packages/linux-kernel/patches/kernel/0001-linkstate-ip-device-attribute.patch b/scripts/package-build/linux-kernel/patches/kernel/0001-linkstate-ip-device-attribute.patch
index bedcec6e..7bd0b04b 100644
--- a/packages/linux-kernel/patches/kernel/0001-linkstate-ip-device-attribute.patch
+++ b/scripts/package-build/linux-kernel/patches/kernel/0001-linkstate-ip-device-attribute.patch
@@ -88,10 +88,10 @@ index cf592d7b630f..e8915701aa73 100644
};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index bc74f131fe4d..9cdd5b50f9b2 100644
+index c33b1ecc591e..7576d51cd16d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
-@@ -2595,6 +2595,7 @@ static struct devinet_sysctl_table {
+@@ -2609,6 +2609,7 @@ static struct devinet_sysctl_table {
"route_localnet"),
DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
"drop_unicast_in_l2_multicast"),
@@ -100,10 +100,10 @@ index bc74f131fe4d..9cdd5b50f9b2 100644
};
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index a9358c796a81..7e39846f556b 100644
+index 8360939acf85..b13832a08d28 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
-@@ -5657,6 +5657,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+@@ -5674,6 +5674,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
@@ -111,7 +111,7 @@ index a9358c796a81..7e39846f556b 100644
}
static inline size_t inet6_ifla6_size(void)
-@@ -7086,6 +7087,13 @@ static const struct ctl_table addrconf_sysctl[] = {
+@@ -7103,6 +7104,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra1 = (void *)SYSCTL_ZERO,
.extra2 = (void *)SYSCTL_ONE,
},
@@ -126,10 +126,10 @@ index a9358c796a81..7e39846f556b 100644
.procname = "ioam6_id",
.data = &ipv6_devconf.ioam6_id,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index eb3afaee62e8..0f8670e74cc7 100644
+index 5715d54f3d0b..e88971b512ba 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
-@@ -679,6 +679,14 @@ static inline void rt6_probe(struct fib6_nh *fib6_nh)
+@@ -682,6 +682,14 @@ static inline void rt6_probe(struct fib6_nh *fib6_nh)
}
#endif
@@ -144,7 +144,7 @@ index eb3afaee62e8..0f8670e74cc7 100644
/*
* Default Router Selection (RFC 2461 6.3.6)
*/
-@@ -720,6 +728,8 @@ static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
+@@ -723,6 +731,8 @@ static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
if (!m && (strict & RT6_LOOKUP_F_IFACE))
return RT6_NUD_FAIL_HARD;
@@ -154,5 +154,5 @@ index eb3afaee62e8..0f8670e74cc7 100644
m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
#endif
--
-2.39.2
+2.39.5
diff --git a/packages/linux-kernel/patches/kernel/0002-inotify-support-for-stackable-filesystems.patch b/scripts/package-build/linux-kernel/patches/kernel/0002-inotify-support-for-stackable-filesystems.patch
index b19a8d25..b19a8d25 100644
--- a/packages/linux-kernel/patches/kernel/0002-inotify-support-for-stackable-filesystems.patch
+++ b/scripts/package-build/linux-kernel/patches/kernel/0002-inotify-support-for-stackable-filesystems.patch
diff --git a/scripts/package-build/linux-kernel/sign-modules.sh b/scripts/package-build/linux-kernel/sign-modules.sh
new file mode 100755
index 00000000..cfb368eb
--- /dev/null
+++ b/scripts/package-build/linux-kernel/sign-modules.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+BASE_DIR=$(dirname $0)
+MODULE_DIR=$1
+. ${BASE_DIR}/kernel-vars
+
+SIGN_FILE="${KERNEL_DIR}/scripts/sign-file"
+
+if [ -f ${EPHEMERAL_KEY} ] && [ -f ${EPHEMERAL_CERT} ]; then
+ find ${MODULE_DIR} -type f -name \*.ko | while read MODULE; do
+ echo "I: Signing ${MODULE} ..."
+ ${SIGN_FILE} sha512 ${EPHEMERAL_KEY} ${EPHEMERAL_CERT} ${MODULE}
+ done
+fi
+
diff --git a/scripts/package-build/ndppd/.gitignore b/scripts/package-build/ndppd/.gitignore
index 2b71e9fb..4983088e 100644
--- a/scripts/package-build/ndppd/.gitignore
+++ b/scripts/package-build/ndppd/.gitignore
@@ -1,7 +1 @@
-ndppd/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/ndppd/
diff --git a/scripts/package-build/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch b/scripts/package-build/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch
deleted file mode 100644
index df6d2e5c..00000000
--- a/scripts/package-build/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From b148ba055245cec5007ee91dd3ffbfeb58d49c5a Mon Sep 17 00:00:00 2001
-From: Henning Surmeier <me@hensur.de>
-Date: Sun, 9 Jan 2022 20:35:15 +0100
-Subject: [PATCH 1/2] skip route table if there is no auto rule
-
----
- src/ndppd.cc | 3 ++-
- src/rule.cc | 8 ++++++++
- src/rule.h | 4 ++++
- 3 files changed, 14 insertions(+), 1 deletion(-)
-
-diff --git a/src/ndppd.cc b/src/ndppd.cc
-index bec9656..b303721 100644
---- a/src/ndppd.cc
-+++ b/src/ndppd.cc
-@@ -304,7 +304,8 @@ int main(int argc, char* argv[], char* env[])
- t1.tv_sec = t2.tv_sec;
- t1.tv_usec = t2.tv_usec;
-
-- route::update(elapsed_time);
-+ if (rule::any_auto())
-+ route::update(elapsed_time);
- session::update_all(elapsed_time);
- }
-
-diff --git a/src/rule.cc b/src/rule.cc
-index 9e72480..a1e8376 100644
---- a/src/rule.cc
-+++ b/src/rule.cc
-@@ -24,6 +24,8 @@
-
- NDPPD_NS_BEGIN
-
-+bool rule::_any_aut = false;
-+
- rule::rule()
- {
- }
-@@ -49,6 +51,7 @@ ptr<rule> rule::create(const ptr<proxy>& pr, const address& addr, bool aut)
- ru->_pr = pr;
- ru->_addr = addr;
- ru->_aut = aut;
-+ _any_aut = _any_aut || aut;
-
- logger::debug()
- << "rule::create() if=" << pr->ifa()->name().c_str() << ", addr=" << addr
-@@ -57,6 +60,11 @@ ptr<rule> rule::create(const ptr<proxy>& pr, const address& addr, bool aut)
- return ru;
- }
-
-+bool rule::any_auto()
-+{
-+ return _any_aut;
-+}
-+
- const address& rule::addr() const
- {
- return _addr;
-diff --git a/src/rule.h b/src/rule.h
-index 6663066..ca2aa36 100644
---- a/src/rule.h
-+++ b/src/rule.h
-@@ -42,6 +42,8 @@ public:
-
- bool check(const address& addr) const;
-
-+ static bool any_auto();
-+
- private:
- weak_ptr<rule> _ptr;
-
-@@ -53,6 +55,8 @@ private:
-
- bool _aut;
-
-+ static bool _any_aut;
-+
- rule();
- };
-
---
-2.34.1
-
diff --git a/scripts/package-build/ndppd/patches/0002-set-vyos-version.patch b/scripts/package-build/ndppd/patches/0002-set-vyos-version.patch
deleted file mode 100644
index 3fef87c4..00000000
--- a/scripts/package-build/ndppd/patches/0002-set-vyos-version.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From b0789cf679b0179d37e22f5a936af273d982abeb Mon Sep 17 00:00:00 2001
-From: Henning Surmeier <me@hensur.de>
-Date: Tue, 11 Jan 2022 13:05:47 +0100
-Subject: [PATCH 2/2] set -vyos version
-
----
- src/ndppd.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/ndppd.h b/src/ndppd.h
-index 008726c..61ed950 100644
---- a/src/ndppd.h
-+++ b/src/ndppd.h
-@@ -21,7 +21,7 @@
- #define NDPPD_NS_BEGIN namespace ndppd {
- #define NDPPD_NS_END }
-
--#define NDPPD_VERSION "0.2.4"
-+#define NDPPD_VERSION "0.2.5-vyos"
-
- #include <assert.h>
-
---
-2.34.1
-
diff --git a/packages/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch b/scripts/package-build/ndppd/patches/ndppd/0001-skip-route-table-if-there-is-no-auto-rule.patch
index df6d2e5c..df6d2e5c 100644
--- a/packages/ndppd/patches/0001-skip-route-table-if-there-is-no-auto-rule.patch
+++ b/scripts/package-build/ndppd/patches/ndppd/0001-skip-route-table-if-there-is-no-auto-rule.patch
diff --git a/packages/ndppd/patches/0002-set-vyos-version.patch b/scripts/package-build/ndppd/patches/ndppd/0002-set-vyos-version.patch
index 3fef87c4..3fef87c4 100644
--- a/packages/ndppd/patches/0002-set-vyos-version.patch
+++ b/scripts/package-build/ndppd/patches/ndppd/0002-set-vyos-version.patch
diff --git a/scripts/package-build/net-snmp/.gitignore b/scripts/package-build/net-snmp/.gitignore
index 67811e63..ce30b515 100644
--- a/scripts/package-build/net-snmp/.gitignore
+++ b/scripts/package-build/net-snmp/.gitignore
@@ -1,6 +1 @@
-net-snmp/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/net-snmp/
diff --git a/scripts/package-build/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch b/scripts/package-build/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch
deleted file mode 100644
index b6dcd77a..00000000
--- a/scripts/package-build/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch
+++ /dev/null
@@ -1,119 +0,0 @@
-From f5ae6baf0018abda9dedc368fe6d52c0d7a8ab8f Mon Sep 17 00:00:00 2001
-From: Philippe Troin <phil+github-commits@fifi.org>
-Date: Sat, 3 Feb 2024 10:30:30 -0800
-Subject: [PATCH] Add Linux 6.7 compatibility parsing /proc/net/snmp
-
-Linux 6.7 adds a new OutTransmits field to Ip in /proc/net/snmp.
-This breaks the hard-coded assumptions about the Ip line length.
-Add compatibility to parse Linux 6.7 Ip header while keep support
-for previous versions.
----
- .../ip-mib/data_access/systemstats_linux.c | 46 +++++++++++++++----
- 1 file changed, 37 insertions(+), 9 deletions(-)
-
-diff --git a/agent/mibgroup/ip-mib/data_access/systemstats_linux.c b/agent/mibgroup/ip-mib/data_access/systemstats_linux.c
-index 49e0a34d5c..f04e828a94 100644
---- a/agent/mibgroup/ip-mib/data_access/systemstats_linux.c
-+++ b/agent/mibgroup/ip-mib/data_access/systemstats_linux.c
-@@ -36,7 +36,7 @@ netsnmp_access_systemstats_arch_init(void)
- }
-
- /*
-- /proc/net/snmp
-+ /proc/net/snmp - Linux 6.6 and lower
-
- Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates
- Ip: 2 64 7083534 0 0 0 0 0 6860233 6548963 0 0 1 286623 63322 1 259920 0 0
-@@ -49,6 +49,26 @@ netsnmp_access_systemstats_arch_init(void)
-
- Udp: InDatagrams NoPorts InErrors OutDatagrams
- Udp: 1491094 122 0 1466178
-+*
-+ /proc/net/snmp - Linux 6.7 and higher
-+
-+ Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates OutTransmits
-+ Ip: 1 64 50859058 496 0 37470604 0 0 20472980 7515791 1756 0 0 7264 3632 0 3548 0 7096 44961424
-+
-+ Icmp: InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutRateLimitGlobal OutRateLimitHost OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps
-+ Icmp: 114447 2655 0 17589 0 0 0 0 66905 29953 0 0 0 0 143956 0 0 572 16610 484 0 0 0 59957 66905 0 0 0 0
-+
-+ IcmpMsg: InType0 InType3 InType8 OutType0 OutType3 OutType8 OutType11
-+ IcmpMsg: 29953 17589 66905 66905 16610 59957 484
-+
-+ Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors
-+ Tcp: 1 200 120000 -1 17744 13525 307 3783 6 18093137 9277788 3499 8 7442 0
-+
-+ Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti MemErrors
-+ Udp: 2257832 1422 0 2252835 0 0 0 84 0
-+
-+ UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti MemErrors
-+ UdpLite: 0 0 0 0 0 0 0 0 0
- */
-
-
-@@ -101,10 +121,10 @@ _systemstats_v4(netsnmp_container* container, u_int load_flags)
- FILE *devin;
- char line[1024];
- netsnmp_systemstats_entry *entry = NULL;
-- int scan_count;
-+ int scan_count, expected_scan_count;
- char *stats, *start = line;
- int len;
-- unsigned long long scan_vals[19];
-+ unsigned long long scan_vals[20];
-
- DEBUGMSGTL(("access:systemstats:container:arch", "load v4 (flags %x)\n",
- load_flags));
-@@ -126,10 +146,17 @@ _systemstats_v4(netsnmp_container* container, u_int load_flags)
- */
- NETSNMP_IGNORE_RESULT(fgets(line, sizeof(line), devin));
- len = strlen(line);
-- if (224 != len) {
-+ switch (len) {
-+ case 224:
-+ expected_scan_count = 19;
-+ break;
-+ case 237:
-+ expected_scan_count = 20;
-+ break;
-+ default:
- fclose(devin);
- snmp_log(LOG_ERR, "systemstats_linux: unexpected header length in /proc/net/snmp."
-- " %d != 224\n", len);
-+ " %d not in { 224, 237 } \n", len);
- return -4;
- }
-
-@@ -178,20 +205,20 @@ _systemstats_v4(netsnmp_container* container, u_int load_flags)
- memset(scan_vals, 0x0, sizeof(scan_vals));
- scan_count = sscanf(stats,
- "%llu %llu %llu %llu %llu %llu %llu %llu %llu %llu"
-- "%llu %llu %llu %llu %llu %llu %llu %llu %llu",
-+ "%llu %llu %llu %llu %llu %llu %llu %llu %llu %llu",
- &scan_vals[0],&scan_vals[1],&scan_vals[2],
- &scan_vals[3],&scan_vals[4],&scan_vals[5],
- &scan_vals[6],&scan_vals[7],&scan_vals[8],
- &scan_vals[9],&scan_vals[10],&scan_vals[11],
- &scan_vals[12],&scan_vals[13],&scan_vals[14],
- &scan_vals[15],&scan_vals[16],&scan_vals[17],
-- &scan_vals[18]);
-+ &scan_vals[18],&scan_vals[19]);
- DEBUGMSGTL(("access:systemstats", " read %d values\n", scan_count));
-
-- if(scan_count != 19) {
-+ if(scan_count != expected_scan_count) {
- snmp_log(LOG_ERR,
- "error scanning systemstats data (expected %d, got %d)\n",
-- 19, scan_count);
-+ expected_scan_count, scan_count);
- netsnmp_access_systemstats_entry_free(entry);
- return -4;
- }
-@@ -223,6 +250,7 @@ _systemstats_v4(netsnmp_container* container, u_int load_flags)
- entry->stats.HCOutFragFails.high = scan_vals[17] >> 32;
- entry->stats.HCOutFragCreates.low = scan_vals[18] & 0xffffffff;
- entry->stats.HCOutFragCreates.high = scan_vals[18] >> 32;
-+ /* entry->stats. = scan_vals[19]; / * OutTransmits */
-
- entry->stats.columnAvail[IPSYSTEMSTATSTABLE_HCINRECEIVES] = 1;
- entry->stats.columnAvail[IPSYSTEMSTATSTABLE_INHDRERRORS] = 1;
diff --git a/packages/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch b/scripts/package-build/net-snmp/patches/net-snmp/add-linux-6.7-compatibility-parsing.patch
index b6dcd77a..b6dcd77a 100644
--- a/packages/net-snmp/patches/add-linux-6.7-compatibility-parsing.patch
+++ b/scripts/package-build/net-snmp/patches/net-snmp/add-linux-6.7-compatibility-parsing.patch
diff --git a/scripts/package-build/netfilter/.gitignore b/scripts/package-build/netfilter/.gitignore
index 9bf39f82..ea401bf3 100644
--- a/scripts/package-build/netfilter/.gitignore
+++ b/scripts/package-build/netfilter/.gitignore
@@ -1,8 +1,2 @@
/pkg-libnftnl/
/pkg-nftables/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
diff --git a/scripts/package-build/netfilter/build.py b/scripts/package-build/netfilter/build.py
index 9737b7d3..3c76af73 100755..120000
--- a/scripts/package-build/netfilter/build.py
+++ b/scripts/package-build/netfilter/build.py
@@ -1,189 +1 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2024 VyOS maintainers and contributors
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 or later as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import glob
-import shutil
-import toml
-import os
-
-from argparse import ArgumentParser
-from pathlib import Path
-from subprocess import run, CalledProcessError
-
-
-def ensure_dependencies(dependencies: list) -> None:
- """Ensure Debian build dependencies are met"""
- if not dependencies:
- print("I: No additional dependencies to install")
- return
-
- print("I: Ensure Debian build dependencies are met")
- run(['sudo', 'apt-get', 'update'], check=True)
- run(['sudo', 'apt-get', 'install', '-y'] + dependencies, check=True)
-
-
-def apply_patches(repo_dir: Path, patch_dir: Path, package_name: str) -> None:
- """Apply patches from the patch directory to the repository"""
- package_patch_dir = patch_dir / package_name
- if package_patch_dir.exists() and package_patch_dir.is_dir():
- patches = list(package_patch_dir.glob('*'))
- else:
- print(f"I: No patch directory found for {package_name} in {patch_dir}")
- return
-
- # Filter out directories from patches list
- patches = [patch for patch in patches if patch.is_file()]
-
- if not patches:
- print(f"I: No patches found in {package_patch_dir}")
- return
-
- debian_patches_dir = repo_dir / 'debian/patches'
- debian_patches_dir.mkdir(parents=True, exist_ok=True)
-
- series_file = debian_patches_dir / 'series'
- with series_file.open('a') as series:
- for patch in patches:
- patch_dest = debian_patches_dir / patch.name
- try:
- # Ensure the patch file exists before copying
- if patch.exists():
- shutil.copy(patch, patch_dest)
- series.write(patch.name + '\n')
- print(f"I: Applied patch: {patch.name}")
- else:
- print(f"W: Patch file {patch} not found, skipping")
- except FileNotFoundError:
- print(f"W: Patch file {patch} not found, skipping")
-
-
-def prepare_package(repo_dir: Path, install_data: str) -> None:
- """Prepare a package"""
- if not install_data:
- print("I: No install data provided, skipping package preparation")
- return
-
- try:
- install_file = repo_dir / 'debian/install'
- install_file.parent.mkdir(parents=True, exist_ok=True)
- install_file.write_text(install_data)
- print("I: Prepared package")
- except Exception as e:
- print(f"Failed to prepare package: {e}")
- raise
-
-
-def build_package(package: dict, dependencies: list, patch_dir: Path) -> None:
- """Build a package from the repository
-
- Args:
- package (dict): Package information
- dependencies (list): List of additional dependencies
- patch_dir (Path): Directory containing patches
- """
- repo_name = package['name']
- repo_dir = Path(repo_name)
-
- try:
- # Clone the repository if it does not exist
- if not repo_dir.exists():
- run(['git', 'clone', package['scm_url'], str(repo_dir)], check=True)
-
- # Check out the specific commit
- run(['git', 'checkout', package['commit_id']], cwd=repo_dir, check=True)
-
- # Ensure dependencies
- ensure_dependencies(dependencies)
-
- # Apply patches if any
- apply_patches(repo_dir, patch_dir, repo_name)
-
- # Prepare the package if required
- if package.get('prepare_package', False):
- prepare_package(repo_dir, package.get('install_data', ''))
-
- # Build dependency package and install it
- if (repo_dir / 'debian/control').exists():
- try:
- run('sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"', cwd=repo_dir, check=True, shell=True)
- run('sudo dpkg -i *build-deps*.deb', cwd=repo_dir, check=True, shell=True)
- except CalledProcessError as e:
- print(f"Failed to build package {repo_name}: {e}")
-
- # Build the package, check if we have build_cmd in the package.toml
- build_cmd = package.get('build_cmd', 'dpkg-buildpackage -uc -us -tc -b')
- run(build_cmd, cwd=repo_dir, check=True, shell=True)
-
- except CalledProcessError as e:
- print(f"Failed to build package {repo_name}: {e}")
- finally:
- # Clean up repository directory
- # shutil.rmtree(repo_dir, ignore_errors=True)
- pass
-
-
-def cleanup_build_deps(repo_dir: Path) -> None:
- """Clean up build dependency packages"""
- try:
- if repo_dir.exists():
- for file in glob.glob(str(repo_dir / '*build-deps*.deb')):
- os.remove(file)
- print("Cleaned up build dependency packages")
- except Exception as e:
- print(f"Error cleaning up build dependencies: {e}")
-
-
-def copy_packages(repo_dir: Path) -> None:
- """Copy generated .deb packages to the parent directory"""
- try:
- deb_files = glob.glob(str(repo_dir / '*.deb'))
- for deb_file in deb_files:
- shutil.copy(deb_file, repo_dir.parent)
- print(f'I: copy generated "{deb_file}" package')
- except Exception as e:
- print(f"Error copying packages: {e}")
-
-
-if __name__ == '__main__':
- # Prepare argument parser
- arg_parser = ArgumentParser()
- arg_parser.add_argument('--config',
- default='package.toml',
- help='Path to the package configuration file')
- arg_parser.add_argument('--patch-dir',
- default='patches',
- help='Path to the directory containing patches')
- args = arg_parser.parse_args()
-
- # Load package configuration
- with open(args.config, 'r') as file:
- config = toml.load(file)
-
- packages = config['packages']
- patch_dir = Path(args.patch_dir)
-
- for package in packages:
- dependencies = package.get('dependencies', {}).get('packages', [])
-
- # Build the package
- build_package(package, dependencies, patch_dir)
-
- # Clean up build dependency packages after build
- cleanup_build_deps(Path(package['name']))
-
- # Copy generated .deb packages to parent directory
- copy_packages(Path(package['name']))
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/node_exporter/.gitignore b/scripts/package-build/node_exporter/.gitignore
new file mode 100644
index 00000000..25d6ffd3
--- /dev/null
+++ b/scripts/package-build/node_exporter/.gitignore
@@ -0,0 +1 @@
+node_exporter/
diff --git a/scripts/package-build/node_exporter/build.py b/scripts/package-build/node_exporter/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/node_exporter/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/node_exporter/package.toml b/scripts/package-build/node_exporter/package.toml
new file mode 100644
index 00000000..4540bc82
--- /dev/null
+++ b/scripts/package-build/node_exporter/package.toml
@@ -0,0 +1,21 @@
+[[packages]]
+name = "node_exporter"
+commit_id = "v1.9.1"
+scm_url = "https://github.com/prometheus/node_exporter"
+
+build_cmd = """
+
+# Create the install directory
+mkdir -p debian/usr/sbin
+make build
+
+# Move the node_exporter binary to the install directory
+mv node_exporter debian/usr/sbin
+
+# Build the Debian package
+fpm --input-type dir --output-type deb --name node-exporter \
+ --version $(git describe --tags --always | cut -c2-) --deb-compression gz \
+ --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
+ --description "Prometheus exporter for machine metrics" \
+ --license Apache-2.0 -C debian --package ..
+"""
diff --git a/scripts/package-build/opennhrp/package.toml b/scripts/package-build/opennhrp/package.toml
deleted file mode 100644
index d647c072..00000000
--- a/scripts/package-build/opennhrp/package.toml
+++ /dev/null
@@ -1,21 +0,0 @@
-[[packages]]
-name = "opennhrp"
-commit_id = "613277f"
-scm_url = "https://git.code.sf.net/p/opennhrp/code"
-
-build_cmd = """
-make clean
-make
-
-install --directory debian/etc debian/usr/sbin
-install --mode 0644 etc/racoon-ph1dead.sh debian/etc
-install --mode 0644 etc/racoon-ph1down.sh debian/etc
-install --strip --mode 0755 nhrp/opennhrp debian/usr/sbin
-install --strip --mode 0755 nhrp/opennhrpctl debian/usr/sbin
-
-fpm --input-type dir --output-type deb --name opennhrp \
- --version $(git describe --always | cut -c2-) --deb-compression gz \
- --maintainer "VyOS Package Maintainers <maintainers@vyos.net>" \
- --description "NBMA Next Hop Resolution Protocol daemon" \
- --license "MIT" -C debian --package ..
-"""
diff --git a/scripts/package-build/openvpn-otp/.gitignore b/scripts/package-build/openvpn-otp/.gitignore
index 7f89da2b..90268525 100644
--- a/scripts/package-build/openvpn-otp/.gitignore
+++ b/scripts/package-build/openvpn-otp/.gitignore
@@ -1,6 +1 @@
-openvpn-otp/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/openvpn-otp/
diff --git a/scripts/package-build/openvpn-otp/package.toml b/scripts/package-build/openvpn-otp/package.toml
index 72209ad1..bdbc6d9d 100644
--- a/scripts/package-build/openvpn-otp/package.toml
+++ b/scripts/package-build/openvpn-otp/package.toml
@@ -1,6 +1,6 @@
[[packages]]
name = "openvpn-otp"
-commit_id = "master"
+commit_id = "9781ff1"
scm_url = "https://github.com/evgeny-gridasov/openvpn-otp"
# build_cmd = "cd ..; ./build-openvpn-otp.sh"
diff --git a/scripts/package-build/owamp/.gitignore b/scripts/package-build/owamp/.gitignore
index 4a97524e..c6efde63 100644
--- a/scripts/package-build/owamp/.gitignore
+++ b/scripts/package-build/owamp/.gitignore
@@ -1,6 +1 @@
-owamp/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/owamp/
diff --git a/scripts/package-build/pam_tacplus/.gitignore b/scripts/package-build/pam_tacplus/.gitignore
deleted file mode 100644
index 04e8d4e9..00000000
--- a/scripts/package-build/pam_tacplus/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-pam_tacplus/
-pam_tacplus-debian/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
diff --git a/scripts/package-build/pam_tacplus/package.toml b/scripts/package-build/pam_tacplus/package.toml
deleted file mode 100644
index 79b28544..00000000
--- a/scripts/package-build/pam_tacplus/package.toml
+++ /dev/null
@@ -1,19 +0,0 @@
-[[packages]]
-name = "pam_tacplus-debian"
-commit_id = "50c6fd7"
-scm_url = "https://github.com/kravietz/pam_tacplus-debian"
-build_cmd = "/bin/true"
-
-[[packages]]
-name = "pam_tacplus"
-#commit_id = "4f91b0d" # This commit cannot build the package
-commit_id = "b839c44"
-scm_url = "https://github.com/kravietz/pam_tacplus"
-
-#build_cmd = "sudo mk-build-deps --install --tool 'apt-get --yes --no-install-recommends'; cd ..; ./build.sh"
-build_cmd = """
-cp -a ../pam_tacplus-debian debian
-rm -f debian/compat
-sudo mk-build-deps --install --tool 'apt-get --yes --no-install-recommends'
-dpkg-buildpackage -uc -us -tc -b -d
-"""
diff --git a/scripts/package-build/pmacct/.gitignore b/scripts/package-build/pmacct/.gitignore
index 7007417a..65042174 100644
--- a/scripts/package-build/pmacct/.gitignore
+++ b/scripts/package-build/pmacct/.gitignore
@@ -1,6 +1 @@
-pmacct/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/pmacct/
diff --git a/scripts/package-build/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch b/scripts/package-build/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch
deleted file mode 100644
index cb5f7399..00000000
--- a/scripts/package-build/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 58900c9d0f98f224577c28dc2323061d33823f39 Mon Sep 17 00:00:00 2001
-From: Paolo Lucente <pl+github@pmacct.net>
-Date: Fri, 4 Mar 2022 22:07:29 +0000
-Subject: [PATCH] * fix, pmacctd: SEGV when ICMP/ICMPv6 traffic was processed
- and 'flows' primitive was enabled. To address Issue #586
-
----
- src/nl.c | 12 +++---------
- 1 file changed, 3 insertions(+), 9 deletions(-)
-
-diff --git a/src/nl.c b/src/nl.c
-index c42689ed..6a3da94b 100644
---- a/src/nl.c
-+++ b/src/nl.c
-@@ -1,6 +1,6 @@
- /*
- pmacct (Promiscuous mode IP Accounting package)
-- pmacct is Copyright (C) 2003-2021 by Paolo Lucente
-+ pmacct is Copyright (C) 2003-2022 by Paolo Lucente
- */
-
- /*
-@@ -293,10 +293,7 @@ int ip_handler(register struct packet_ptrs *pptrs)
- }
- }
- else {
-- if (pptrs->l4_proto != IPPROTO_ICMP) {
-- pptrs->tlh_ptr = dummy_tlhdr;
-- }
--
-+ pptrs->tlh_ptr = dummy_tlhdr;
- if (off < caplen) pptrs->payload_ptr = ptr;
- }
-
-@@ -479,10 +476,7 @@ int ip6_handler(register struct packet_ptrs *pptrs)
- }
- }
- else {
-- if (pptrs->l4_proto != IPPROTO_ICMPV6) {
-- pptrs->tlh_ptr = dummy_tlhdr;
-- }
--
-+ pptrs->tlh_ptr = dummy_tlhdr;
- if (off < caplen) pptrs->payload_ptr = ptr;
- }
-
---
-2.34.1
-
diff --git a/packages/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch b/scripts/package-build/pmacct/patches/pmacct/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch
index cb5f7399..cb5f7399 100644
--- a/packages/pmacct/patches/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch
+++ b/scripts/package-build/pmacct/patches/pmacct/0001-fix-pmacctd-SEGV-when-ICMP-ICMPv6-traffic-was-proces.patch
diff --git a/scripts/package-build/podman/.gitignore b/scripts/package-build/podman/.gitignore
index 22c40b0e..dfba60a6 100644
--- a/scripts/package-build/podman/.gitignore
+++ b/scripts/package-build/podman/.gitignore
@@ -1,7 +1 @@
-podman/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/podman/
diff --git a/scripts/package-build/podman/package.toml b/scripts/package-build/podman/package.toml
index 952af518..707f3d7e 100644
--- a/scripts/package-build/podman/package.toml
+++ b/scripts/package-build/podman/package.toml
@@ -20,7 +20,7 @@ fpm --input-type dir --output-type deb --name podman \
--license "Apache License 2.0" -C podman-v$VERSION --package ..
"""
-[packages.dependencies]
+[dependencies]
packages = [
"libseccomp-dev",
"libgpgme-dev"
diff --git a/scripts/package-build/pyhumps/.gitignore b/scripts/package-build/pyhumps/.gitignore
index 6a90d1c9..27979294 100644
--- a/scripts/package-build/pyhumps/.gitignore
+++ b/scripts/package-build/pyhumps/.gitignore
@@ -1,7 +1 @@
-humps/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-
+/humps/
diff --git a/scripts/package-build/radvd/.gitignore b/scripts/package-build/radvd/.gitignore
index 9c37832b..b3761965 100644
--- a/scripts/package-build/radvd/.gitignore
+++ b/scripts/package-build/radvd/.gitignore
@@ -1,6 +1 @@
-radvd/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/radvd/
diff --git a/scripts/package-build/radvd/package.toml b/scripts/package-build/radvd/package.toml
index e44afa18..83b9936b 100644
--- a/scripts/package-build/radvd/package.toml
+++ b/scripts/package-build/radvd/package.toml
@@ -1,6 +1,6 @@
[[packages]]
name = "radvd"
-commit_id = "f2de4764559"
+commit_id = "v2.20"
scm_url = "https://github.com/radvd-project/radvd"
#build_cmd = "cd ..; ./build.sh"
diff --git a/scripts/package-build/strongswan/.gitignore b/scripts/package-build/strongswan/.gitignore
index ec612740..e4c36e8f 100644
--- a/scripts/package-build/strongswan/.gitignore
+++ b/scripts/package-build/strongswan/.gitignore
@@ -1,6 +1 @@
-strongswan/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/strongswan/
diff --git a/scripts/package-build/strongswan/build-vici.sh b/scripts/package-build/strongswan/build-vici.sh
index 5ad0ee80..75b180f0 100755
--- a/scripts/package-build/strongswan/build-vici.sh
+++ b/scripts/package-build/strongswan/build-vici.sh
@@ -4,7 +4,7 @@ set -e
SRC="strongswan/src/libcharon/plugins/vici/python"
if [ ! -d ${SRC} ]; then
- echo "Source directory does not exists, please 'git clone'"
+ echo "Source directory does not exist, please 'git clone'"
exit 1
fi
@@ -28,30 +28,31 @@ Depends: \${misc:Depends}, \${python3:Depends}
Description: Native Python interface for strongSwan's VICI protocol
EOF
-
# Create rules file
-echo "I: create $SRC/rules"
+echo "I: create $SRC/debian/rules"
cat <<EOF > debian/rules
#!/usr/bin/make -f
%:
dh \$@ --with python3
EOF
-# Make the rules file executable
chmod +x debian/rules
echo '10' > debian/compat
+# Add the 'install' file to copy the vici package to the correct directory
+echo "I: create $SRC/debian/install"
+cat <<EOF > debian/install
+vici /usr/lib/python3/dist-packages/
+EOF
+
# Copy changelog
cp ../../../../../debian/changelog debian/
-
-ls -la
-pwd
-
-
+# Build the package
echo "I: Build Debian Package"
dpkg-buildpackage -uc -us -tc -b -d
+# Copy the resulting .deb packages
echo "I: copy packages"
-cp ../*.deb ../../../../../../
+cp ../*.deb ../../../../../../
diff --git a/scripts/package-build/strongswan/package.toml b/scripts/package-build/strongswan/package.toml
index 8cedd4ac..a5722062 100644
--- a/scripts/package-build/strongswan/package.toml
+++ b/scripts/package-build/strongswan/package.toml
@@ -13,7 +13,7 @@ dpkg-buildpackage -uc -us -tc -b -d
cd ..; ./build-vici.sh
"""
-[packages.dependencies]
+[dependencies]
packages = [
"bison",
"bzip2",
diff --git a/scripts/package-build/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch b/scripts/package-build/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch
deleted file mode 100644
index ceb47350..00000000
--- a/scripts/package-build/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch
+++ /dev/null
@@ -1,579 +0,0 @@
-From db627ec8a8e72bc6b23dc8ab00f4e6b4f448d01c Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
-Date: Mon, 21 Sep 2015 13:41:58 +0300
-Subject: [PATCH 1/3] charon: add optional source and remote overrides for
- initiate
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This introduces support for specifying optional IKE SA specific
-source and remote address for child sa initiation. This allows
-to initiate wildcard connection for known address via vici.
-
-In addition this allows impler implementation of trap-any patches
-and is a prerequisite for dmvpn support.
-
-Signed-off-by: Timo Teräs <timo.teras@iki.fi>
----
- src/charon-cmd/cmd/cmd_connection.c | 2 +-
- src/libcharon/control/controller.c | 42 +++++++++++-
- src/libcharon/control/controller.h | 3 +
- src/libcharon/plugins/stroke/stroke_control.c | 5 +-
- src/libcharon/plugins/vici/vici_config.c | 2 +-
- src/libcharon/plugins/vici/vici_control.c | 64 ++++++++++++++++---
- .../processing/jobs/start_action_job.c | 2 +-
- src/libcharon/sa/ike_sa_manager.c | 50 ++++++++++++++-
- src/libcharon/sa/ike_sa_manager.h | 8 ++-
- src/libcharon/sa/trap_manager.c | 44 +++++--------
- src/swanctl/commands/initiate.c | 40 +++++++++++-
- 11 files changed, 215 insertions(+), 47 deletions(-)
-
-diff --git a/src/charon-cmd/cmd/cmd_connection.c b/src/charon-cmd/cmd/cmd_connection.c
-index 2e2cb3c..b9369a8 100644
---- a/src/charon-cmd/cmd/cmd_connection.c
-+++ b/src/charon-cmd/cmd/cmd_connection.c
-@@ -439,7 +439,7 @@ static job_requeue_t initiate(private_cmd_connection_t *this)
- child_cfg = create_child_cfg(this, peer_cfg);
-
- if (charon->controller->initiate(charon->controller, peer_cfg, child_cfg,
-- controller_cb_empty, NULL, LEVEL_SILENT, 0, FALSE) != SUCCESS)
-+ NULL, NULL, controller_cb_empty, NULL, LEVEL_SILENT, 0, FALSE) != SUCCESS)
- {
- terminate(pid);
- }
-diff --git a/src/libcharon/control/controller.c b/src/libcharon/control/controller.c
-index 027f48e..4ce8616 100644
---- a/src/libcharon/control/controller.c
-+++ b/src/libcharon/control/controller.c
-@@ -15,6 +15,28 @@
- * for more details.
- */
-
-+/*
-+ * Copyright (C) 2014 Timo Teräs <timo.teras@iki.fi>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+ * THE SOFTWARE.
-+ */
-+
- #include "controller.h"
-
- #include <sys/types.h>
-@@ -107,6 +129,16 @@ struct interface_listener_t {
- */
- ike_sa_t *ike_sa;
-
-+ /**
-+ * Our host hint.
-+ */
-+ host_t *my_host;
-+
-+ /**
-+ * Other host hint.
-+ */
-+ host_t *other_host;
-+
- /**
- * unique ID, used for various methods
- */
-@@ -417,10 +449,15 @@ METHOD(job_t, initiate_execute, job_requeue_t,
- ike_sa_t *ike_sa;
- interface_listener_t *listener = &job->listener;
- peer_cfg_t *peer_cfg = listener->peer_cfg;
-+ host_t *my_host = listener->my_host;
-+ host_t *other_host = listener->other_host;
-
- ike_sa = charon->ike_sa_manager->checkout_by_config(charon->ike_sa_manager,
-- peer_cfg);
-+ peer_cfg, my_host, other_host);
- peer_cfg->destroy(peer_cfg);
-+ DESTROY_IF(my_host);
-+ DESTROY_IF(other_host);
-+
- if (!ike_sa)
- {
- DESTROY_IF(listener->child_cfg);
-@@ -499,6 +536,7 @@ METHOD(job_t, initiate_execute, job_requeue_t,
-
- METHOD(controller_t, initiate, status_t,
- private_controller_t *this, peer_cfg_t *peer_cfg, child_cfg_t *child_cfg,
-+ host_t *my_host, host_t *other_host,
- controller_cb_t callback, void *param, level_t max_level, u_int timeout,
- bool limits)
- {
-@@ -523,6 +561,8 @@ METHOD(controller_t, initiate, status_t,
- .status = FAILED,
- .child_cfg = child_cfg,
- .peer_cfg = peer_cfg,
-+ .my_host = my_host ? my_host->clone(my_host) : NULL,
-+ .other_host = other_host ? other_host->clone(other_host) : NULL,
- .lock = spinlock_create(),
- .options.limits = limits,
- },
-diff --git a/src/libcharon/control/controller.h b/src/libcharon/control/controller.h
-index 36a1d46..a130fbb 100644
---- a/src/libcharon/control/controller.h
-+++ b/src/libcharon/control/controller.h
-@@ -81,6 +81,8 @@ struct controller_t {
- *
- * @param peer_cfg peer_cfg to use for IKE_SA setup
- * @param child_cfg optional child_cfg to set up CHILD_SA from
-+ * @param my_host optional address hint for source
-+ * @param other_host optional address hint for destination
- * @param cb logging callback
- * @param param parameter to include in each call of cb
- * @param max_level maximum log level for which cb is invoked
-@@ -95,6 +97,7 @@ struct controller_t {
- */
- status_t (*initiate)(controller_t *this,
- peer_cfg_t *peer_cfg, child_cfg_t *child_cfg,
-+ host_t *my_host, host_t *other_host,
- controller_cb_t callback, void *param,
- level_t max_level, u_int timeout, bool limits);
-
-diff --git a/src/libcharon/plugins/stroke/stroke_control.c b/src/libcharon/plugins/stroke/stroke_control.c
-index 2824c93..21ff6b3 100644
---- a/src/libcharon/plugins/stroke/stroke_control.c
-+++ b/src/libcharon/plugins/stroke/stroke_control.c
-@@ -109,7 +109,7 @@ static void charon_initiate(private_stroke_control_t *this, peer_cfg_t *peer_cfg
- if (msg->output_verbosity < 0)
- {
- charon->controller->initiate(charon->controller, peer_cfg, child_cfg,
-- NULL, NULL, 0, 0, FALSE);
-+ NULL, NULL, NULL, NULL, 0, 0, FALSE);
- }
- else
- {
-@@ -117,7 +117,8 @@ static void charon_initiate(private_stroke_control_t *this, peer_cfg_t *peer_cfg
- status_t status;
-
- status = charon->controller->initiate(charon->controller,
-- peer_cfg, child_cfg, (controller_cb_t)stroke_log,
-+ peer_cfg, child_cfg, NULL, NULL,
-+ (controller_cb_t)stroke_log,
- &info, msg->output_verbosity, this->timeout, FALSE);
- switch (status)
- {
-diff --git a/src/libcharon/plugins/vici/vici_config.c b/src/libcharon/plugins/vici/vici_config.c
-index 5221225..b1486e3 100644
---- a/src/libcharon/plugins/vici/vici_config.c
-+++ b/src/libcharon/plugins/vici/vici_config.c
-@@ -2252,7 +2252,7 @@ static void run_start_action(private_vici_config_t *this, peer_cfg_t *peer_cfg,
- DBG1(DBG_CFG, "initiating '%s'", child_cfg->get_name(child_cfg));
- charon->controller->initiate(charon->controller,
- peer_cfg->get_ref(peer_cfg), child_cfg->get_ref(child_cfg),
-- NULL, NULL, 0, 0, FALSE);
-+ NULL, NULL, NULL, NULL, 0, 0, FALSE);
- }
- }
-
-diff --git a/src/libcharon/plugins/vici/vici_control.c b/src/libcharon/plugins/vici/vici_control.c
-index 1c236d2..811d8db 100644
---- a/src/libcharon/plugins/vici/vici_control.c
-+++ b/src/libcharon/plugins/vici/vici_control.c
-@@ -15,6 +15,28 @@
- * for more details.
- */
-
-+/*
-+ * Copyright (C) 2014 Timo Teräs <timo.teras@iki.fi>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+ * THE SOFTWARE.
-+ */
-+
- #include "vici_control.h"
- #include "vici_builder.h"
-
-@@ -173,9 +195,11 @@ static child_cfg_t* find_child_cfg(char *name, char *pname, peer_cfg_t **out)
- CALLBACK(initiate, vici_message_t*,
- private_vici_control_t *this, char *name, u_int id, vici_message_t *request)
- {
-+ vici_message_t* msg;
- peer_cfg_t *peer_cfg = NULL;
- child_cfg_t *child_cfg;
-- char *child, *ike, *type, *sa;
-+ host_t *my_host = NULL, *other_host = NULL;
-+ char *child, *ike, *type, *sa, *my_host_str, *other_host_str;
- int timeout;
- bool limits;
- controller_cb_t log_cb = NULL;
-@@ -189,6 +213,8 @@ CALLBACK(initiate, vici_message_t*,
- timeout = request->get_int(request, 0, "timeout");
- limits = request->get_bool(request, FALSE, "init-limits");
- log.level = request->get_int(request, 1, "loglevel");
-+ my_host_str = request->get_str(request, NULL, "my-host");
-+ other_host_str = request->get_str(request, NULL, "other-host");
-
- if (!child && !ike)
- {
-@@ -199,31 +225,52 @@ CALLBACK(initiate, vici_message_t*,
- log_cb = (controller_cb_t)log_vici;
- }
-
-+ if (my_host_str)
-+ {
-+ my_host = host_create_from_string(my_host_str, 0);
-+ }
-+ if (other_host_str)
-+ {
-+ other_host = host_create_from_string(other_host_str, 0);
-+ }
-+
-+
- type = child ? "CHILD_SA" : "IKE_SA";
- sa = child ?: ike;
-
- child_cfg = find_child_cfg(child, ike, &peer_cfg);
-
-- DBG1(DBG_CFG, "vici initiate %s '%s'", type, sa);
-+ DBG1(DBG_CFG, "vici initiate %s '%s', me %H, other %H, limits %d", type, sa, my_host, other_host, limits);
- if (!peer_cfg)
- {
-- return send_reply(this, "%s config '%s' not found", type, sa);
-+ msg = send_reply(this, "%s config '%s' not found", type, sa);
-+ goto ret;
- }
-- switch (charon->controller->initiate(charon->controller, peer_cfg, child_cfg,
-- log_cb, &log, log.level, timeout, limits))
-+ switch (charon->controller->initiate(charon->controller,
-+ peer_cfg, child_cfg,
-+ my_host, other_host,
-+ log_cb, &log, log.level, timeout, limits))
- {
- case SUCCESS:
-- return send_reply(this, NULL);
-+ msg = send_reply(this, NULL);
-+ break;
- case OUT_OF_RES:
-- return send_reply(this, "%s '%s' not established after %dms", type,
-+ msg = send_reply(this, "%s '%s' not established after %dms", type,
- sa, timeout);
-+ break;
- case INVALID_STATE:
-- return send_reply(this, "establishing %s '%s' not possible at the "
-+ msg = send_reply(this, "establishing %s '%s' not possible at the "
- "moment due to limits", type, sa);
-+ break;
- case FAILED:
- default:
-- return send_reply(this, "establishing %s '%s' failed", type, sa);
-+ msg = send_reply(this, "establishing %s '%s' failed", type, sa);
-+ break;
- }
-+ret:
-+ if (my_host) my_host->destroy(my_host);
-+ if (other_host) other_host->destroy(other_host);
-+ return msg;
- }
-
- /**
-diff --git a/src/libcharon/processing/jobs/start_action_job.c b/src/libcharon/processing/jobs/start_action_job.c
-index 122e5ce..dec458c 100644
---- a/src/libcharon/processing/jobs/start_action_job.c
-+++ b/src/libcharon/processing/jobs/start_action_job.c
-@@ -84,7 +84,7 @@ METHOD(job_t, execute, job_requeue_t,
- charon->controller->initiate(charon->controller,
- peer_cfg->get_ref(peer_cfg),
- child_cfg->get_ref(child_cfg),
-- NULL, NULL, 0, 0, FALSE);
-+ NULL, NULL, NULL, NULL, 0, 0, FALSE);
- }
- }
- children->destroy(children);
-diff --git a/src/libcharon/sa/ike_sa_manager.c b/src/libcharon/sa/ike_sa_manager.c
-index fc31c2a..51e28bc 100644
---- a/src/libcharon/sa/ike_sa_manager.c
-+++ b/src/libcharon/sa/ike_sa_manager.c
-@@ -16,6 +16,28 @@
- * for more details.
- */
-
-+/*
-+ * Copyright (C) 2014 Timo Teräs <timo.teras@iki.fi>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+ * THE SOFTWARE.
-+ */
-+
- #include <string.h>
- #include <inttypes.h>
-
-@@ -1497,7 +1519,8 @@ typedef struct {
- } config_entry_t;
-
- METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
-- private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
-+ private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg,
-+ host_t *my_host, host_t *other_host)
- {
- enumerator_t *enumerator;
- entry_t *entry;
-@@ -1508,7 +1531,17 @@ METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
- u_int segment;
- int i;
-
-- DBG2(DBG_MGR, "checkout IKE_SA by config");
-+ if (my_host && my_host->get_port(my_host) == 0)
-+ {
-+ my_host->set_port(my_host, IKEV2_UDP_PORT);
-+ }
-+ if (other_host && other_host->get_port(other_host) == 0)
-+ {
-+ other_host->set_port(other_host, IKEV2_UDP_PORT);
-+ }
-+
-+ DBG2(DBG_MGR, "checkout IKE_SA by config '%s', me %H, other %H",
-+ peer_cfg->get_name(peer_cfg), my_host, other_host);
-
- if (!this->reuse_ikesa && peer_cfg->get_ike_version(peer_cfg) != IKEV1)
- { /* IKE_SA reuse disabled by config (not possible for IKEv1) */
-@@ -1566,6 +1599,15 @@ METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
- continue;
- }
-
-+ if (my_host && !my_host->ip_equals(my_host, entry->ike_sa->get_my_host(entry->ike_sa)))
-+ {
-+ continue;
-+ }
-+ if (other_host && !other_host->ip_equals(other_host, entry->ike_sa->get_other_host(entry->ike_sa)))
-+ {
-+ continue;
-+ }
-+
- current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
- if (current_peer && current_peer->equals(current_peer, peer_cfg))
- {
-@@ -1592,6 +1634,10 @@ METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
- {
- ike_sa->set_peer_cfg(ike_sa, peer_cfg);
- checkout_new(this, ike_sa);
-+ if (my_host || other_host)
-+ {
-+ ike_sa->update_hosts(ike_sa, my_host, other_host, TRUE);
-+ }
- }
- }
- charon->bus->set_sa(charon->bus, ike_sa);
-diff --git a/src/libcharon/sa/ike_sa_manager.h b/src/libcharon/sa/ike_sa_manager.h
-index 004cc22..50f8246 100644
---- a/src/libcharon/sa/ike_sa_manager.h
-+++ b/src/libcharon/sa/ike_sa_manager.h
-@@ -123,7 +123,8 @@ struct ike_sa_manager_t {
- ike_sa_t* (*checkout_by_message) (ike_sa_manager_t* this, message_t *message);
-
- /**
-- * Checkout an IKE_SA for initiation by a peer_config.
-+ * Checkout an IKE_SA for initiation by a peer_config and optional
-+ * source and remote host addresses.
- *
- * To initiate, a CHILD_SA may be established within an existing IKE_SA.
- * This call checks for an existing IKE_SA by comparing the configuration.
-@@ -136,9 +137,12 @@ struct ike_sa_manager_t {
- * @note The peer_config is always set on the returned IKE_SA.
- *
- * @param peer_cfg configuration used to find an existing IKE_SA
-+ * @param my_host source host address for wildcard peer_cfg
-+ * @param other_host remote host address for wildcard peer_cfg
- * @return checked out/created IKE_SA
- */
-- ike_sa_t *(*checkout_by_config)(ike_sa_manager_t* this, peer_cfg_t *peer_cfg);
-+ ike_sa_t *(*checkout_by_config)(ike_sa_manager_t* this, peer_cfg_t *peer_cfg,
-+ host_t *my_host, host_t *other_host);
-
- /**
- * Reset initiator SPI.
-diff --git a/src/libcharon/sa/trap_manager.c b/src/libcharon/sa/trap_manager.c
-index d8d8a42..e7c906e 100644
---- a/src/libcharon/sa/trap_manager.c
-+++ b/src/libcharon/sa/trap_manager.c
-@@ -523,7 +523,7 @@ METHOD(trap_manager_t, acquire, void,
- peer_cfg_t *peer;
- child_cfg_t *child;
- ike_sa_t *ike_sa;
-- host_t *host;
-+ host_t *host, *my_host = NULL, *other_host = NULL;
- bool wildcard, ignore = FALSE;
-
- this->lock->read_lock(this->lock);
-@@ -600,37 +600,27 @@ METHOD(trap_manager_t, acquire, void,
- this->lock->unlock(this->lock);
-
- if (wildcard)
-- { /* the peer config would match IKE_SAs with other peers */
-- ike_sa = charon->ike_sa_manager->create_new(charon->ike_sa_manager,
-- peer->get_ike_version(peer), TRUE);
-- if (ike_sa)
-- {
-- ike_cfg_t *ike_cfg;
-- uint16_t port;
-- uint8_t mask;
--
-- ike_sa->set_peer_cfg(ike_sa, peer);
-- ike_cfg = ike_sa->get_ike_cfg(ike_sa);
--
-- port = ike_cfg->get_other_port(ike_cfg);
-- data->dst->to_subnet(data->dst, &host, &mask);
-- host->set_port(host, port);
-- ike_sa->set_other_host(ike_sa, host);
--
-- port = ike_cfg->get_my_port(ike_cfg);
-- data->src->to_subnet(data->src, &host, &mask);
-- host->set_port(host, port);
-- ike_sa->set_my_host(ike_sa, host);
--
-- charon->bus->set_sa(charon->bus, ike_sa);
-- }
-- }
-- else
- {
-- ike_sa = charon->ike_sa_manager->checkout_by_config(
-- charon->ike_sa_manager, peer);
-+ ike_cfg_t *ike_cfg;
-+ uint16_t port;
-+ uint8_t mask;
-+
-+ ike_cfg = peer->get_ike_cfg(peer);
-+
-+ port = ike_cfg->get_other_port(ike_cfg);
-+ data->dst->to_subnet(data->dst, &other_host, &mask);
-+ other_host->set_port(other_host, port);
-+
-+ port = ike_cfg->get_my_port(ike_cfg);
-+ data->src->to_subnet(data->src, &my_host, &mask);
-+ my_host->set_port(my_host, port);
- }
-+ ike_sa = charon->ike_sa_manager->checkout_by_config(
-+ charon->ike_sa_manager, peer,
-+ my_host, other_host);
- peer->destroy(peer);
-+ DESTROY_IF(my_host);
-+ DESTROY_IF(other_host);
-
- if (ike_sa)
- {
-diff --git a/src/swanctl/commands/initiate.c b/src/swanctl/commands/initiate.c
-index e0fffb9..dcaded5 100644
---- a/src/swanctl/commands/initiate.c
-+++ b/src/swanctl/commands/initiate.c
-@@ -14,6 +14,28 @@
- * for more details.
- */
-
-+/*
-+ * Copyright (C) 2014 Timo Teräs <timo.teras@iki.fi>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+ * THE SOFTWARE.
-+ */
-+
- #include "command.h"
-
- #include <errno.h>
-@@ -38,7 +60,7 @@ static int initiate(vici_conn_t *conn)
- vici_req_t *req;
- vici_res_t *res;
- command_format_options_t format = COMMAND_FORMAT_NONE;
-- char *arg, *child = NULL, *ike = NULL;
-+ char *arg, *child = NULL, *ike = NULL, *my_host = NULL, *other_host = NULL;
- int ret = 0, timeout = 0, level = 1;
-
- while (TRUE)
-@@ -65,6 +87,12 @@ static int initiate(vici_conn_t *conn)
- case 'l':
- level = atoi(arg);
- continue;
-+ case 'S':
-+ my_host = arg;
-+ continue;
-+ case 'R':
-+ other_host = arg;
-+ continue;
- case EOF:
- break;
- default:
-@@ -88,6 +116,14 @@ static int initiate(vici_conn_t *conn)
- {
- vici_add_key_valuef(req, "ike", "%s", ike);
- }
-+ if (my_host)
-+ {
-+ vici_add_key_valuef(req, "my-host", "%s", my_host);
-+ }
-+ if (other_host)
-+ {
-+ vici_add_key_valuef(req, "other-host", "%s", other_host);
-+ }
- if (timeout)
- {
- vici_add_key_valuef(req, "timeout", "%d", timeout * 1000);
-@@ -134,6 +170,8 @@ static void __attribute__ ((constructor))reg()
- {"help", 'h', 0, "show usage information"},
- {"child", 'c', 1, "initiate a CHILD_SA configuration"},
- {"ike", 'i', 1, "initiate an IKE_SA, or name of child's parent"},
-+ {"source", 'S', 1, "override source address"},
-+ {"remote", 'R', 1, "override remote address"},
- {"timeout", 't', 1, "timeout in seconds before detaching"},
- {"raw", 'r', 0, "dump raw response message"},
- {"pretty", 'P', 0, "dump raw response message in pretty print"},
diff --git a/scripts/package-build/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch b/scripts/package-build/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch
deleted file mode 100644
index 13e657e9..00000000
--- a/scripts/package-build/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch
+++ /dev/null
@@ -1,140 +0,0 @@
-From 39d537b875e907c63a54d5de8ba6d2ea0ede4604 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
-Date: Mon, 21 Sep 2015 13:42:05 +0300
-Subject: [PATCH 2/3] vici: send certificates for ike-sa events
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Signed-off-by: Timo Teräs <timo.teras@iki.fi>
----
- src/libcharon/plugins/vici/vici_query.c | 50 +++++++++++++++++++++----
- 1 file changed, 42 insertions(+), 8 deletions(-)
-
-diff --git a/src/libcharon/plugins/vici/vici_query.c b/src/libcharon/plugins/vici/vici_query.c
-index bacb7b101..19acc0789 100644
---- a/src/libcharon/plugins/vici/vici_query.c
-+++ b/src/libcharon/plugins/vici/vici_query.c
-@@ -402,7 +402,7 @@ static void list_vips(private_vici_query_t *this, vici_builder_t *b,
- * List details of an IKE_SA
- */
- static void list_ike(private_vici_query_t *this, vici_builder_t *b,
-- ike_sa_t *ike_sa, time_t now)
-+ ike_sa_t *ike_sa, time_t now, bool add_certs)
- {
- time_t t;
- ike_sa_id_t *id;
-@@ -411,6 +411,8 @@ static void list_ike(private_vici_query_t *this, vici_builder_t *b,
- uint32_t if_id;
- uint16_t alg, ks;
- host_t *host;
-+ auth_cfg_t *auth_cfg;
-+ enumerator_t *enumerator;
-
- b->add_kv(b, "uniqueid", "%u", ike_sa->get_unique_id(ike_sa));
- b->add_kv(b, "version", "%u", ike_sa->get_version(ike_sa));
-@@ -420,11 +422,43 @@ static void list_ike(private_vici_query_t *this, vici_builder_t *b,
- b->add_kv(b, "local-host", "%H", host);
- b->add_kv(b, "local-port", "%d", host->get_port(host));
- b->add_kv(b, "local-id", "%Y", ike_sa->get_my_id(ike_sa));
-+ if (add_certs)
-+ {
-+ enumerator = ike_sa->create_auth_cfg_enumerator(ike_sa, TRUE);
-+ if (enumerator->enumerate(enumerator, &auth_cfg))
-+ {
-+ certificate_t *cert = auth_cfg->get(auth_cfg, AUTH_RULE_SUBJECT_CERT);
-+ chunk_t encoding;
-+
-+ if (cert && cert->get_encoding(cert, CERT_ASN1_DER, &encoding))
-+ {
-+ b->add(b, VICI_KEY_VALUE, "local-cert-data", encoding);
-+ free(encoding.ptr);
-+ }
-+ }
-+ enumerator->destroy(enumerator);
-+ }
-
- host = ike_sa->get_other_host(ike_sa);
- b->add_kv(b, "remote-host", "%H", host);
- b->add_kv(b, "remote-port", "%d", host->get_port(host));
- b->add_kv(b, "remote-id", "%Y", ike_sa->get_other_id(ike_sa));
-+ if (add_certs)
-+ {
-+ enumerator = ike_sa->create_auth_cfg_enumerator(ike_sa, FALSE);
-+ if (enumerator->enumerate(enumerator, &auth_cfg))
-+ {
-+ certificate_t *cert = auth_cfg->get(auth_cfg, AUTH_RULE_SUBJECT_CERT);
-+ chunk_t encoding;
-+
-+ if (cert && cert->get_encoding(cert, CERT_ASN1_DER, &encoding))
-+ {
-+ b->add(b, VICI_KEY_VALUE, "remote-cert-data", encoding);
-+ free(encoding.ptr);
-+ }
-+ }
-+ enumerator->destroy(enumerator);
-+ }
-
- eap = ike_sa->get_other_eap_id(ike_sa);
-
-@@ -556,7 +590,7 @@ CALLBACK(list_sas, vici_message_t*,
- b = vici_builder_create();
- b->begin_section(b, ike_sa->get_name(ike_sa));
-
-- list_ike(this, b, ike_sa, now);
-+ list_ike(this, b, ike_sa, now, TRUE);
-
- b->begin_section(b, "child-sas");
- csas = ike_sa->create_child_sa_enumerator(ike_sa);
-@@ -1774,7 +1808,7 @@ METHOD(listener_t, ike_updown, bool,
- }
-
- b->begin_section(b, ike_sa->get_name(ike_sa));
-- list_ike(this, b, ike_sa, now);
-+ list_ike(this, b, ike_sa, now, up);
- b->end_section(b);
-
- this->dispatcher->raise_event(this->dispatcher,
-@@ -1799,10 +1833,10 @@ METHOD(listener_t, ike_rekey, bool,
- b = vici_builder_create();
- b->begin_section(b, old->get_name(old));
- b->begin_section(b, "old");
-- list_ike(this, b, old, now);
-+ list_ike(this, b, old, now, TRUE);
- b->end_section(b);
- b->begin_section(b, "new");
-- list_ike(this, b, new, now);
-+ list_ike(this, b, new, now, TRUE);
- b->end_section(b);
- b->end_section(b);
-
-@@ -1833,7 +1867,7 @@ METHOD(listener_t, ike_update, bool,
- b->add_kv(b, "remote-port", "%d", remote->get_port(remote));
-
- b->begin_section(b, ike_sa->get_name(ike_sa));
-- list_ike(this, b, ike_sa, now);
-+ list_ike(this, b, ike_sa, now, TRUE);
- b->end_section(b);
-
- this->dispatcher->raise_event(this->dispatcher,
-@@ -1863,7 +1897,7 @@ METHOD(listener_t, child_updown, bool,
- }
-
- b->begin_section(b, ike_sa->get_name(ike_sa));
-- list_ike(this, b, ike_sa, now);
-+ list_ike(this, b, ike_sa, now, up);
- b->begin_section(b, "child-sas");
-
- snprintf(buf, sizeof(buf), "%s-%u", child_sa->get_name(child_sa),
-@@ -1898,7 +1932,7 @@ METHOD(listener_t, child_rekey, bool,
- b = vici_builder_create();
-
- b->begin_section(b, ike_sa->get_name(ike_sa));
-- list_ike(this, b, ike_sa, now);
-+ list_ike(this, b, ike_sa, now, TRUE);
- b->begin_section(b, "child-sas");
-
- b->begin_section(b, old->get_name(old));
---
-2.38.1
-
diff --git a/scripts/package-build/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch b/scripts/package-build/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch
deleted file mode 100644
index 45aadc72..00000000
--- a/scripts/package-build/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch
+++ /dev/null
@@ -1,159 +0,0 @@
-From df6b501ed29b838efde0f1cb1c906ab9befc7b45 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
-Date: Mon, 21 Sep 2015 13:42:11 +0300
-Subject: [PATCH 3/3] vici: add support for individual sa state changes
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Useful for monitoring and tracking full SA.
-
-Signed-off-by: Timo Teräs <timo.teras@iki.fi>
----
- src/libcharon/plugins/vici/vici_query.c | 105 ++++++++++++++++++++++++
- 1 file changed, 105 insertions(+)
-
-diff --git a/src/libcharon/plugins/vici/vici_query.c b/src/libcharon/plugins/vici/vici_query.c
-index 19acc0789..e008885f7 100644
---- a/src/libcharon/plugins/vici/vici_query.c
-+++ b/src/libcharon/plugins/vici/vici_query.c
-@@ -1774,8 +1774,16 @@ static void manage_commands(private_vici_query_t *this, bool reg)
- this->dispatcher->manage_event(this->dispatcher, "ike-updown", reg);
- this->dispatcher->manage_event(this->dispatcher, "ike-rekey", reg);
- this->dispatcher->manage_event(this->dispatcher, "ike-update", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "ike-state-established", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "ike-state-destroying", reg);
- this->dispatcher->manage_event(this->dispatcher, "child-updown", reg);
- this->dispatcher->manage_event(this->dispatcher, "child-rekey", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "child-state-installing", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "child-state-installed", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "child-state-updating", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "child-state-rekeying", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "child-state-rekeyed", reg);
-+ this->dispatcher->manage_event(this->dispatcher, "child-state-destroying", reg);
- manage_command(this, "list-sas", list_sas, reg);
- manage_command(this, "list-policies", list_policies, reg);
- manage_command(this, "list-conns", list_conns, reg);
-@@ -1876,6 +1884,45 @@ METHOD(listener_t, ike_update, bool,
- return TRUE;
- }
-
-+METHOD(listener_t, ike_state_change, bool,
-+ private_vici_query_t *this, ike_sa_t *ike_sa, ike_sa_state_t state)
-+{
-+ char *event;
-+ vici_builder_t *b;
-+ time_t now;
-+
-+ switch (state)
-+ {
-+ case IKE_ESTABLISHED:
-+ event = "ike-state-established";
-+ break;
-+ case IKE_DESTROYING:
-+ event = "ike-state-destroying";
-+ break;
-+ default:
-+ return TRUE;
-+ }
-+
-+ if (!this->dispatcher->has_event_listeners(this->dispatcher, event))
-+ {
-+ return TRUE;
-+ }
-+
-+ now = time_monotonic(NULL);
-+
-+ b = vici_builder_create();
-+ b->begin_section(b, ike_sa->get_name(ike_sa));
-+ list_ike(this, b, ike_sa, now, state != IKE_DESTROYING);
-+ b->begin_section(b, "child-sas");
-+ b->end_section(b);
-+ b->end_section(b);
-+
-+ this->dispatcher->raise_event(this->dispatcher,
-+ event, 0, b->finalize(b));
-+
-+ return TRUE;
-+}
-+
- METHOD(listener_t, child_updown, bool,
- private_vici_query_t *this, ike_sa_t *ike_sa, child_sa_t *child_sa, bool up)
- {
-@@ -1955,6 +2002,62 @@ METHOD(listener_t, child_rekey, bool,
- return TRUE;
- }
-
-+METHOD(listener_t, child_state_change, bool,
-+ private_vici_query_t *this, ike_sa_t *ike_sa, child_sa_t *child_sa, child_sa_state_t state)
-+{
-+ char *event;
-+ vici_builder_t *b;
-+ time_t now;
-+
-+ switch (state)
-+ {
-+ case CHILD_INSTALLING:
-+ event = "child-state-installing";
-+ break;
-+ case CHILD_INSTALLED:
-+ event = "child-state-installed";
-+ break;
-+ case CHILD_UPDATING:
-+ event = "child-state-updating";
-+ break;
-+ case CHILD_REKEYING:
-+ event = "child-state-rekeying";
-+ break;
-+ case CHILD_REKEYED:
-+ event = "child-state-rekeyed";
-+ break;
-+ case CHILD_DESTROYING:
-+ event = "child-state-destroying";
-+ break;
-+ default:
-+ return TRUE;
-+ }
-+
-+ if (!this->dispatcher->has_event_listeners(this->dispatcher, event))
-+ {
-+ return TRUE;
-+ }
-+
-+ now = time_monotonic(NULL);
-+
-+ b = vici_builder_create();
-+ b->begin_section(b, ike_sa->get_name(ike_sa));
-+ list_ike(this, b, ike_sa, now, state != CHILD_DESTROYING);
-+ b->begin_section(b, "child-sas");
-+
-+ b->begin_section(b, child_sa->get_name(child_sa));
-+ list_child(this, b, child_sa, now);
-+ b->end_section(b);
-+
-+ b->end_section(b);
-+ b->end_section(b);
-+
-+ this->dispatcher->raise_event(this->dispatcher,
-+ event, 0, b->finalize(b));
-+
-+ return TRUE;
-+}
-+
- METHOD(vici_query_t, destroy, void,
- private_vici_query_t *this)
- {
-@@ -1975,8 +2078,10 @@ vici_query_t *vici_query_create(vici_dispatcher_t *dispatcher)
- .ike_updown = _ike_updown,
- .ike_rekey = _ike_rekey,
- .ike_update = _ike_update,
-+ .ike_state_change = _ike_state_change,
- .child_updown = _child_updown,
- .child_rekey = _child_rekey,
-+ .child_state_change = _child_state_change,
- },
- .destroy = _destroy,
- },
---
-2.38.1
-
diff --git a/scripts/package-build/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch b/scripts/package-build/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch
deleted file mode 100644
index 57a622e8..00000000
--- a/scripts/package-build/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch
+++ /dev/null
@@ -1,115 +0,0 @@
-From ee6c0b3ff6e3df5c7aef628621e19a813ff308ed Mon Sep 17 00:00:00 2001
-From: Christian Poessinger <christian@poessinger.com>
-Date: Tue, 27 Dec 2022 13:36:43 +0000
-Subject: [PATCH] VyOS: disable options enabled by Debian that are unused
-
-VyOS does not implement CLI options for all options exposed by Debian.
-
-The following options need to be disabled for the DMVPN patchset:
- - mediation
- - nm
-
-In addition we have no LED, LDAP and SQL configuration knows, thus we spare
-the plugins.
----
- debian/libcharon-extra-plugins.install | 3 ---
- debian/libstrongswan-extra-plugins.install | 3 ---
- debian/rules | 11 ++++++++++-
- debian/strongswan-nm.install | 2 --
- 4 files changed, 10 insertions(+), 9 deletions(-)
-
-diff --git a/debian/libcharon-extra-plugins.install b/debian/libcharon-extra-plugins.install
-index 94fbabd88..068708ecb 100644
---- a/debian/libcharon-extra-plugins.install
-+++ b/debian/libcharon-extra-plugins.install
-@@ -13,7 +13,6 @@ usr/lib/ipsec/plugins/libstrongswan-error-notify.so
- usr/lib/ipsec/plugins/libstrongswan-forecast.so
- usr/lib/ipsec/plugins/libstrongswan-ha.so
- usr/lib/ipsec/plugins/libstrongswan-kernel-libipsec.so
--usr/lib/ipsec/plugins/libstrongswan-led.so
- usr/lib/ipsec/plugins/libstrongswan-lookip.so
- #usr/lib/ipsec/plugins/libstrongswan-medsrv.so
- #usr/lib/ipsec/plugins/libstrongswan-medcli.so
-@@ -36,7 +35,6 @@ usr/share/strongswan/templates/config/plugins/error-notify.conf
- usr/share/strongswan/templates/config/plugins/forecast.conf
- usr/share/strongswan/templates/config/plugins/ha.conf
- usr/share/strongswan/templates/config/plugins/kernel-libipsec.conf
--usr/share/strongswan/templates/config/plugins/led.conf
- usr/share/strongswan/templates/config/plugins/lookip.conf
- #usr/share/strongswan/templates/config/plugins/medsrv.conf
- #usr/share/strongswan/templates/config/plugins/medcli.conf
-@@ -60,7 +58,6 @@ etc/strongswan.d/charon/error-notify.conf
- etc/strongswan.d/charon/forecast.conf
- etc/strongswan.d/charon/ha.conf
- etc/strongswan.d/charon/kernel-libipsec.conf
--etc/strongswan.d/charon/led.conf
- etc/strongswan.d/charon/lookip.conf
- #etc/strongswan.d/charon/medsrv.conf
- #etc/strongswan.d/charon/medcli.conf
-diff --git a/debian/libstrongswan-extra-plugins.install b/debian/libstrongswan-extra-plugins.install
-index 2846e2155..00cd0a146 100644
---- a/debian/libstrongswan-extra-plugins.install
-+++ b/debian/libstrongswan-extra-plugins.install
-@@ -8,7 +8,6 @@ usr/lib/ipsec/plugins/libstrongswan-ctr.so
- usr/lib/ipsec/plugins/libstrongswan-curl.so
- usr/lib/ipsec/plugins/libstrongswan-curve25519.so
- usr/lib/ipsec/plugins/libstrongswan-gcrypt.so
--usr/lib/ipsec/plugins/libstrongswan-ldap.so
- usr/lib/ipsec/plugins/libstrongswan-pkcs11.so
- usr/lib/ipsec/plugins/libstrongswan-test-vectors.so
- usr/lib/ipsec/plugins/libstrongswan-tpm.so
-@@ -20,7 +19,6 @@ usr/share/strongswan/templates/config/plugins/ctr.conf
- usr/share/strongswan/templates/config/plugins/curl.conf
- usr/share/strongswan/templates/config/plugins/curve25519.conf
- usr/share/strongswan/templates/config/plugins/gcrypt.conf
--usr/share/strongswan/templates/config/plugins/ldap.conf
- usr/share/strongswan/templates/config/plugins/pkcs11.conf
- usr/share/strongswan/templates/config/plugins/test-vectors.conf
- usr/share/strongswan/templates/config/plugins/tpm.conf
-@@ -31,7 +29,6 @@ etc/strongswan.d/charon/ctr.conf
- etc/strongswan.d/charon/curl.conf
- etc/strongswan.d/charon/curve25519.conf
- etc/strongswan.d/charon/gcrypt.conf
--etc/strongswan.d/charon/ldap.conf
- etc/strongswan.d/charon/pkcs11.conf
- etc/strongswan.d/charon/test-vectors.conf
- etc/strongswan.d/charon/tpm.conf
-diff --git a/debian/rules b/debian/rules
-index 2fed1f10f..fa0d21a0c 100755
---- a/debian/rules
-+++ b/debian/rules
-@@ -3,6 +3,15 @@ export DEB_LDFLAGS_MAINT_APPEND=-Wl,-O1
- #export DEB_LDFLAGS_MAINT_APPEND=-Wl,--as-needed -Wl,-O1 -Wl,-z,defs
- export DEB_BUILD_MAINT_OPTIONS=hardening=+all
-
-+CONFIGUREARGS_VYOS := --disable-warnings \
-+ --disable-ldap \
-+ --disable-led \
-+ --disable-nm \
-+ --disable-mediation \
-+ --disable-mysql \
-+ --disable-sqlite \
-+ --disable-sql
-+
- CONFIGUREARGS := --libdir=/usr/lib --libexecdir=/usr/lib \
- --enable-addrblock \
- --enable-agent \
-@@ -88,7 +97,7 @@ ifeq ($(DEB_HOST_ARCH_OS),kfreebsd)
- deb_systemdsystemunitdir = $(shell pkg-config --variable=systemdsystemunitdir systemd | sed s,^/,,)
-
- override_dh_auto_configure:
-- dh_auto_configure -- $(CONFIGUREARGS)
-+ dh_auto_configure -- $(CONFIGUREARGS) $(CONFIGUREARGS_VYOS)
-
- override_dh_auto_clean:
- dh_auto_clean
-diff --git a/debian/strongswan-nm.install b/debian/strongswan-nm.install
-index b0c05d94f..e69de29bb 100644
---- a/debian/strongswan-nm.install
-+++ b/debian/strongswan-nm.install
-@@ -1,2 +0,0 @@
--usr/lib/ipsec/charon-nm
--usr/share/dbus-1/system.d/nm-strongswan-service.conf
---
-2.30.2
-
diff --git a/packages/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch b/scripts/package-build/strongswan/patches/strongswan/0001-charon-add-optional-source-and-remote-overrides-for-.patch
index ceb47350..ceb47350 100644
--- a/packages/strongswan/patches/0001-charon-add-optional-source-and-remote-overrides-for-.patch
+++ b/scripts/package-build/strongswan/patches/strongswan/0001-charon-add-optional-source-and-remote-overrides-for-.patch
diff --git a/packages/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch b/scripts/package-build/strongswan/patches/strongswan/0002-vici-send-certificates-for-ike-sa-events.patch
index 13e657e9..13e657e9 100644
--- a/packages/strongswan/patches/0002-vici-send-certificates-for-ike-sa-events.patch
+++ b/scripts/package-build/strongswan/patches/strongswan/0002-vici-send-certificates-for-ike-sa-events.patch
diff --git a/packages/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch b/scripts/package-build/strongswan/patches/strongswan/0003-vici-add-support-for-individual-sa-state-changes.patch
index 45aadc72..45aadc72 100644
--- a/packages/strongswan/patches/0003-vici-add-support-for-individual-sa-state-changes.patch
+++ b/scripts/package-build/strongswan/patches/strongswan/0003-vici-add-support-for-individual-sa-state-changes.patch
diff --git a/packages/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch b/scripts/package-build/strongswan/patches/strongswan/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch
index 57a622e8..57a622e8 100644
--- a/packages/strongswan/patches/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch
+++ b/scripts/package-build/strongswan/patches/strongswan/0004-VyOS-disable-options-enabled-by-Debian-that-are-unus.patch
diff --git a/scripts/package-build/tacacs/.gitignore b/scripts/package-build/tacacs/.gitignore
new file mode 100644
index 00000000..3579fc4d
--- /dev/null
+++ b/scripts/package-build/tacacs/.gitignore
@@ -0,0 +1,3 @@
+/libnss-tacplus/
+/libpam-tacplus/
+/libtacplus-map/
diff --git a/scripts/package-build/tacacs/build.py b/scripts/package-build/tacacs/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/tacacs/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/tacacs/package.toml b/scripts/package-build/tacacs/package.toml
new file mode 100644
index 00000000..fde9df6e
--- /dev/null
+++ b/scripts/package-build/tacacs/package.toml
@@ -0,0 +1,24 @@
+[[packages]]
+name = "libtacplus-map"
+commit_id = "master"
+scm_url = "https://github.com/vyos/libtacplus-map.git"
+build_cmd = "dpkg-buildpackage -us -uc -tc -b"
+
+[[packages]]
+name = "libpam-tacplus"
+commit_id = "master"
+scm_url = "https://github.com/vyos/libpam-tacplus.git"
+build_cmd = "sudo dpkg -i ../libtacplus-map*.deb; dpkg-buildpackage -us -uc -tc -b"
+
+[[packages]]
+name = "libnss-tacplus"
+commit_id = "master"
+scm_url = "https://github.com/vyos/libnss-tacplus.git"
+build_cmd = "sudo dpkg -i ../libtac*.deb ../libpam-tacplus*.deb; dpkg-buildpackage -us -uc -tc -b"
+
+[dependencies]
+packages = [
+ "libpam-dev",
+ "autoconf-archive",
+ "libaudit-dev"
+]
diff --git a/scripts/package-build/telegraf/.gitignore b/scripts/package-build/telegraf/.gitignore
index bf2fcf43..f634da68 100644
--- a/scripts/package-build/telegraf/.gitignore
+++ b/scripts/package-build/telegraf/.gitignore
@@ -1,6 +1 @@
-telegraf/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
+/telegraf/
diff --git a/scripts/package-build/vpp/.gitignore b/scripts/package-build/vpp/.gitignore
new file mode 100644
index 00000000..38768675
--- /dev/null
+++ b/scripts/package-build/vpp/.gitignore
@@ -0,0 +1,2 @@
+/vpp/
+/vyos-vpp-patches/
diff --git a/scripts/package-build/vpp/build.py b/scripts/package-build/vpp/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/vpp/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/vpp/package.toml b/scripts/package-build/vpp/package.toml
new file mode 100644
index 00000000..1104e184
--- /dev/null
+++ b/scripts/package-build/vpp/package.toml
@@ -0,0 +1,35 @@
+[[packages]]
+name = "vyos-vpp-patches"
+commit_id = "current"
+scm_url = "https://github.com/vyos/vyos-vpp-patches"
+build_cmd = "/bin/true"
+apply_patches = false
+
+[[packages]]
+name = "vpp"
+commit_id = "stable/2410"
+scm_url = "https://github.com/FDio/vpp"
+# Skip apply patches by build.py as we use them in build_cmd
+apply_patches = false
+
+pre_build_hook = """
+mkdir -p ../patches/vpp/
+rsync -av ../vyos-vpp-patches/patches/vpp/ ../patches/vpp/
+"""
+
+build_cmd = """
+# Patches for vpp should applied here
+for patch in ../patches/vpp/*.patch; do
+ echo "I: build_cmd applying patch $patch..."
+ git -c user.email=maintainers@vyos.net -c user.name=vyos am "$patch" || { echo "Failed to apply patch $patch"; exit 1; }
+done
+
+make UNATTENDED=yes install-dep
+make pkg-deb
+cp build-root/*.deb ../
+"""
+
+[dependencies]
+packages = [
+ "llvm"
+]
diff --git a/scripts/package-build/vyos-1x/.gitignore b/scripts/package-build/vyos-1x/.gitignore
new file mode 100644
index 00000000..990c6351
--- /dev/null
+++ b/scripts/package-build/vyos-1x/.gitignore
@@ -0,0 +1,2 @@
+/vyos-1x/
+/vyos-vpp/
diff --git a/scripts/package-build/vyos-1x/build.py b/scripts/package-build/vyos-1x/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/vyos-1x/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/vyos-1x/package.toml b/scripts/package-build/vyos-1x/package.toml
new file mode 100644
index 00000000..6dcbadb3
--- /dev/null
+++ b/scripts/package-build/vyos-1x/package.toml
@@ -0,0 +1,11 @@
+[[packages]]
+name = "vyos-vpp"
+commit_id = "current"
+scm_url = "https://github.com/vyos/vyos-vpp.git"
+build_cmd = "/bin/true"
+
+[[packages]]
+name = "vyos-1x"
+commit_id = "current"
+scm_url = "https://github.com/vyos/vyos-1x.git"
+build_cmd = "rsync -av --exclude='.git' --exclude='.github' --exclude='README*' --exclude='LICENSE' --exclude='*.md' ../vyos-vpp/ ./; dpkg-buildpackage -us -uc -F"
diff --git a/scripts/package-build/waagent/.gitignore b/scripts/package-build/waagent/.gitignore
index 80401271..a91839ef 100644
--- a/scripts/package-build/waagent/.gitignore
+++ b/scripts/package-build/waagent/.gitignore
@@ -1,8 +1 @@
-waagent/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-*.tar.gz
-*.tar.xz
+/waagent/
diff --git a/scripts/package-build/waagent/package.toml b/scripts/package-build/waagent/package.toml
index d7343a7a..1a382baa 100644
--- a/scripts/package-build/waagent/package.toml
+++ b/scripts/package-build/waagent/package.toml
@@ -3,5 +3,5 @@ name = "waagent"
commit_id = "debian/2.9.1.1-2"
scm_url = "https://salsa.debian.org/cloud-team/waagent.git"
-[packages.dependencies]
+[dependencies]
packages = ["dpkg-source-gitarchive"]
diff --git a/scripts/package-build/wide-dhcpv6/.gitignore b/scripts/package-build/wide-dhcpv6/.gitignore
index 990f3c6c..b7f6e063 100644
--- a/scripts/package-build/wide-dhcpv6/.gitignore
+++ b/scripts/package-build/wide-dhcpv6/.gitignore
@@ -1,7 +1 @@
-wide-dhcpv6/
-*.buildinfo
-*.build
-*.changes
-*.deb
-*.dsc
-*.udeb
+/wide-dhcpv6/
diff --git a/scripts/package-build/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch b/scripts/package-build/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch
deleted file mode 100644
index c1e71f0c..00000000
--- a/scripts/package-build/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch
+++ /dev/null
@@ -1,230 +0,0 @@
-From 1e4a9a7b61090043924f2aa9359dcbc9f5e11bfc Mon Sep 17 00:00:00 2001
-From: Brandon Stepler <brandon@stepler.net>
-Date: Mon, 25 Jan 2021 14:18:57 +0000
-Subject: [PATCH] dhcpc6: support per-interface client DUIDs
-
----
- cfparse.y | 13 +++++++++++--
- cftoken.l | 10 ++++++++++
- config.c | 27 +++++++++++++++++++++++++++
- config.h | 3 ++-
- dhcp6c.c | 11 ++++++++---
- dhcp6c.conf.5 | 6 ++++++
- 6 files changed, 64 insertions(+), 6 deletions(-)
-
-diff --git a/cfparse.y b/cfparse.y
-index 9e685f4..244987c 100644
---- a/cfparse.y
-+++ b/cfparse.y
-@@ -116,6 +116,7 @@ static void cleanup_cflist __P((struct cf_list *));
- %token BCMCS_SERVERS BCMCS_NAME
- %token INFO_ONLY
- %token SCRIPT DELAYEDKEY
-+%token CLIENT_ID CLIENT_ID_DUID
- %token AUTHENTICATION PROTOCOL ALGORITHM DELAYED RECONFIG HMACMD5 MONOCOUNTER
- %token AUTHNAME RDM KEY
- %token KEYINFO REALM KEYID SECRET KEYNAME EXPIRE
-@@ -134,8 +135,8 @@ static void cleanup_cflist __P((struct cf_list *));
- struct dhcp6_poolspec *pool;
- }
-
--%type <str> IFNAME HOSTNAME AUTHNAME KEYNAME DUID_ID STRING QSTRING IAID
--%type <str> POOLNAME PROFILENAME
-+%type <str> IFNAME HOSTNAME CLIENT_ID_DUID AUTHNAME KEYNAME DUID_ID
-+%type <str> STRING QSTRING IAID POOLNAME PROFILENAME
- %type <num> NUMBER duration authproto authalg authrdm
- %type <list> declaration declarations dhcpoption ifparam ifparams
- %type <list> address_list address_list_ent dhcpoption_list
-@@ -639,6 +640,14 @@ dhcpoption:
- /* no value */
- $$ = l;
- }
-+ | CLIENT_ID CLIENT_ID_DUID
-+ {
-+ struct cf_list *l;
-+
-+ MAKE_CFLIST(l, DHCPOPT_CLIENT_ID, NULL, NULL);
-+ l->ptr = $2;
-+ $$ = l;
-+ }
- | AUTHENTICATION AUTHNAME
- {
- struct cf_list *l;
-diff --git a/cftoken.l b/cftoken.l
-index e266ac2..d7edd1f 100644
---- a/cftoken.l
-+++ b/cftoken.l
-@@ -119,6 +119,7 @@ ecl \}
- %s S_HOST
- %s S_DUID
- %s S_IA
-+%s S_CID
- %s S_AUTH
- %s S_KEY
- %s S_SECRET
-@@ -249,6 +250,15 @@ ecl \}
- /* duration */
- <S_CNF>infinity { DECHO; return (INFINITY); }
-
-+ /* client-id option */
-+<S_CNF>client-id { DECHO; BEGIN S_CID; return (CLIENT_ID); }
-+<S_CID>{duid} {
-+ DECHO;
-+ yylval.str = strdup(yytext);
-+ BEGIN S_CNF;
-+ return (CLIENT_ID_DUID);
-+}
-+
- /* authentication option */
- <S_CNF>authentication { DECHO; BEGIN S_AUTH; return (AUTHENTICATION); }
- <S_AUTH>{string} {
-diff --git a/config.c b/config.c
-index 70f6287..0cbe631 100644
---- a/config.c
-+++ b/config.c
-@@ -100,6 +100,7 @@ struct dhcp6_ifconf {
- struct dhcp6_ifconf *next;
-
- char *ifname;
-+ struct duid duid;
-
- /* configuration flags */
- u_long send_flags;
-@@ -1366,6 +1367,7 @@ configure_commit()
- /* commit interface configuration */
- for (ifp = dhcp6_if; ifp; ifp = ifp->next) {
- /* re-initialization */
-+ duidfree(&ifp->duid);
- ifp->send_flags = 0;
- ifp->allow_flags = 0;
- dhcp6_clear_list(&ifp->reqopt_list);
-@@ -1395,6 +1397,8 @@ configure_commit()
- }
-
- /* copy new configuration */
-+ ifp->duid = ifc->duid;
-+ ifc->duid.duid_id = NULL;
- ifp->send_flags = ifc->send_flags;
- ifp->allow_flags = ifc->allow_flags;
- dhcp6_copy_list(&ifp->reqopt_list, &ifc->reqopt_list);
-@@ -1505,6 +1509,7 @@ clear_ifconf(iflist)
- ifc_next = ifc->next;
-
- free(ifc->ifname);
-+ duidfree(&ifc->duid);
- dhcp6_clear_list(&ifc->reqopt_list);
-
- clear_iaconf(&ifc->iaconf_list);
-@@ -1635,6 +1640,28 @@ add_options(opcode, ifc, cfl0)
- return (-1);
- }
- break;
-+ case DHCPOPT_CLIENT_ID:
-+ if (opcode != DHCPOPTCODE_SEND) {
-+ debug_printf(LOG_ERR, FNAME,
-+ "invalid operation (%d) "
-+ "for option type (%d)",
-+ opcode, cfl->type);
-+ return (-1);
-+ }
-+ if (ifc->duid.duid_id != NULL) {
-+ debug_printf(LOG_ERR, FNAME, "%s:%d "
-+ "client-id is doubly specified on %s",
-+ configfilename, cfl->line, ifc->ifname);
-+ return (-1);
-+ }
-+ if ((configure_duid((char *)cfl->ptr,
-+ &ifc->duid)) != 0) {
-+ debug_printf(LOG_ERR, FNAME, "%s:%d "
-+ "failed to configure DUID for %s",
-+ configfilename, cfl->line, ifc->ifname);
-+ return (-1);
-+ }
-+ break;
- case DHCPOPT_AUTHINFO:
- if (opcode != DHCPOPTCODE_SEND) {
- debug_printf(LOG_ERR, FNAME,
-diff --git a/config.h b/config.h
-index 36a5aa3..cfcfdd5 100644
---- a/config.h
-+++ b/config.h
-@@ -69,6 +69,7 @@ struct dhcp6_if {
- u_int32_t linkid; /* to send link-local packets */
- /* multiple global address configuration is not supported now */
- struct in6_addr addr; /* global address */
-+ struct duid duid;
-
- /* configuration parameters */
- u_long send_flags;
-@@ -267,7 +268,7 @@ enum { DECL_SEND, DECL_ALLOW, DECL_INFO_ONLY, DECL_REQUEST, DECL_DUID,
- DECL_ADDRESS,
- DECL_RANGE, DECL_ADDRESSPOOL,
- IFPARAM_SLA_ID, IFPARAM_SLA_LEN, IFPARAM_IFID, IFPARAM_IFID_RAND,
-- DHCPOPT_RAPID_COMMIT, DHCPOPT_AUTHINFO,
-+ DHCPOPT_RAPID_COMMIT, DHCPOPT_CLIENT_ID, DHCPOPT_AUTHINFO,
- DHCPOPT_DNS, DHCPOPT_DNSNAME,
- DHCPOPT_IA_PD, DHCPOPT_IA_NA, DHCPOPT_NTP,
- DHCPOPT_REFRESHTIME,
-diff --git a/dhcp6c.c b/dhcp6c.c
-index 849835e..875a147 100644
---- a/dhcp6c.c
-+++ b/dhcp6c.c
-@@ -433,6 +433,11 @@ client6_start(ifp)
- }
- dhcp6_reset_timer(ev);
-
-+ if (!ifp->duid.duid_id && duidcpy(&ifp->duid, &client_duid)) {
-+ debug_printf(LOG_ERR, FNAME, "failed to copy client DUID");
-+ return (-1);
-+ }
-+
- return (0);
- }
-
-@@ -1249,7 +1254,7 @@ client6_send(ev)
- }
-
- /* client ID */
-- if (duidcpy(&optinfo.clientID, &client_duid)) {
-+ if (duidcpy(&optinfo.clientID, &ifp->duid)) {
- debug_printf(LOG_ERR, FNAME, "failed to copy client ID");
- goto end;
- }
-@@ -1533,7 +1538,7 @@ client6_recvadvert(ifp, dh6, len, optinfo)
- debug_printf(LOG_INFO, FNAME, "no client ID option");
- return (-1);
- }
-- if (duidcmp(&optinfo->clientID, &client_duid)) {
-+ if (duidcmp(&optinfo->clientID, &ifp->duid)) {
- debug_printf(LOG_INFO, FNAME, "client DUID mismatch");
- return (-1);
- }
-@@ -1805,7 +1810,7 @@ client6_recvreply(ifp, dh6, len, optinfo)
- debug_printf(LOG_INFO, FNAME, "no client ID option");
- return (-1);
- }
-- if (duidcmp(&optinfo->clientID, &client_duid)) {
-+ if (duidcmp(&optinfo->clientID, &ifp->duid)) {
- debug_printf(LOG_INFO, FNAME, "client DUID mismatch");
- return (-1);
- }
-diff --git a/dhcp6c.conf.5 b/dhcp6c.conf.5
-index 5693fb8..589510a 100644
---- a/dhcp6c.conf.5
-+++ b/dhcp6c.conf.5
-@@ -139,6 +139,12 @@ An
- statement for
- .Ar authname
- must be provided.
-+.It Ic client-id Ar ID
-+means the client's DHCP unique identifier
-+.Pq DUID .
-+.Ar ID
-+is a colon-separated hexadecimal sequence where each separated part
-+must be composed of two hexadecimal values.
- .El
- .\"
- .Sh Interface statement
---
-2.20.1
-
diff --git a/scripts/package-build/wide-dhcpv6/patches/0024-bind-to-single-socket.patch b/scripts/package-build/wide-dhcpv6/patches/0024-bind-to-single-socket.patch
deleted file mode 100644
index b5751325..00000000
--- a/scripts/package-build/wide-dhcpv6/patches/0024-bind-to-single-socket.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-diff --git a/dhcp6c.c b/dhcp6c.c
-index 1caaaa5..04ce9c5 100644
---- a/dhcp6c.c
-+++ b/dhcp6c.c
-@@ -217,6 +217,12 @@ main(argc, argv)
- argv[0]);
- exit(1);
- }
-+
-+ if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, argv[0], strlen(argv[0])) != 0) {
-+ debug_printf(LOG_ERR, FNAME, "failed to bind %s", argv[0]);
-+ exit(1);
-+ }
-+
- argv++;
- }
-
diff --git a/scripts/package-build/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch b/scripts/package-build/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch
deleted file mode 100644
index 32c15814..00000000
--- a/scripts/package-build/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From: 1vivy <1vivy@tutanota.com>
-Date: Sat, 22 Jul 2023 13:07:10 -0600
-Subject: wide-dhcpv6: T5387: Add a no release option '-n'.
-
-This prevents a release signal from being sent to the ISP causing a new PD or address to be allocated.
-
-Co-authored-by: MrLenin <909621+MrLenin@users.noreply.github.com>
-Co-authored-by: marjohn56 <martin@queens-park.com>
---- wide-dhcpv6.orig/common.h
-+++ wide-dhcpv6/common.h
-@@ -120,6 +120,7 @@ sysdep_sa_len (const struct sockaddr *sa
- extern int foreground;
- extern int debug_thresh;
- extern char *device;
-+extern int opt_norelease;
-
- /* search option for dhcp6_find_listval() */
- #define MATCHLIST_PREFIXLEN 0x1
---- wide-dhcpv6.orig/dhcp6c.8
-+++ wide-dhcpv6/dhcp6c.8
-@@ -88,6 +88,10 @@ is terminated. (suits for a use in shel
- Since the configuration is internally generated, you cannot provide a configuration in this mode. If you want to have different actions for the stateless DHCPv6 information, you should write an appropriate configuration and invoke
- .Nm
- without this option.
-+.It Fl n
-+Prevent Release message from being sent to DHCPv6 server when
-+.Nm
-+stops. This is useful for preventing a new address from being configured by the DHCPv6 server when restarting the DHCPv6 client.
- .It Fl p Ar pid-file
- Use
- .Ar pid-file
-@@ -109,18 +113,22 @@ or
- .Fl i
- option is specified.
- .Pp
--Upon receipt of the
--.Dv SIGHUP
-+Upon receipt of a
-+.Dv SIGHUP ,
-+.Dv SIGTERM ,
- or
--.Dv SIGTERM
--signals,
--.Nm
--will remove all stateful resources from the system.
--In the former case the daemon will then reinvoke itself,
--while it will stop running in the latter case.
--In either case,
-+.Dv SIGUSR1
-+signal,
- .Nm
--will send DHCPv6 Release messages to release resources assigned from servers.
-+will remove all stateful resources from the system. After that,
-+.Dv SIGHUP
-+reinitializes the daemon, and
-+.Dv SIGTERM
-+stops the daemon. In both cases, DHCPv6 Release message will be sent to release resources assigned from servers.
-+.Dv SIGUSR1
-+stops the daemon as
-+.Dv SIGTERM
-+does though DHCPv6 Release message will not be sent.
- .\"
- .Sh FILES
- .Bl -tag -width /etc/wide-dhcpv6/dhcp6c.conf -compact
---- wide-dhcpv6.orig/dhcp6c.c
-+++ wide-dhcpv6/dhcp6c.c
-@@ -84,6 +84,7 @@ static int exit_ok = 0;
- static sig_atomic_t sig_flags = 0;
- #define SIGF_TERM 0x1
- #define SIGF_HUP 0x2
-+#define SIGF_USR1 0x4
-
- const dhcp6_mode_t dhcp6_mode = DHCP6_MODE_CLIENT;
-
-@@ -108,6 +109,8 @@ static int ctldigestlen;
-
- static int infreq_mode = 0;
-
-+int opt_norelease;
-+
- static inline int get_val32 __P((char **, int *, u_int32_t *));
- static inline int get_ifname __P((char **, int *, char *, int));
-
-@@ -170,7 +173,7 @@ main(argc, argv)
- else
- progname++;
-
-- while ((ch = getopt(argc, argv, "c:dDfik:p:P:")) != -1) {
-+ while ((ch = getopt(argc, argv, "c:dDfik:np:P:")) != -1) {
- switch (ch) {
- case 'c':
- conffile = optarg;
-@@ -190,6 +193,9 @@ main(argc, argv)
- case 'k':
- ctlkeyfile = optarg;
- break;
-+ case 'n':
-+ opt_norelease = 1;
-+ break;
- case 'p':
- pid_file = optarg;
- break;
-@@ -395,6 +401,11 @@ client6_init()
- strerror(errno));
- exit(1);
- }
-+ if (signal(SIGUSR1, client6_signal) == SIG_ERR) {
-+ debug_printf(LOG_WARNING, FNAME, "failed to set signal: %s",
-+ strerror(errno));
-+ exit(1);
-+ }
- }
-
- int
-@@ -525,6 +536,13 @@ process_signals()
- free_resources(NULL);
- client6_startall(1);
- }
-+ if ((sig_flags & SIGF_USR1)) {
-+ debug_printf(LOG_INFO, FNAME, "exit without release");
-+ exit_ok = 1;
-+ opt_norelease = 1;
-+ free_resources(NULL);
-+ check_exit();
-+ }
-
- sig_flags = 0;
- }
-@@ -1171,6 +1189,9 @@ client6_signal(sig)
- case SIGHUP:
- sig_flags |= SIGF_HUP;
- break;
-+ case SIGUSR1:
-+ sig_flags |= SIGF_USR1;
-+ break;
- }
- }
-
---- wide-dhcpv6.orig/dhcp6c_ia.c
-+++ wide-dhcpv6/dhcp6c_ia.c
-@@ -420,7 +420,13 @@ release_all_ia(ifp)
- for (ia = TAILQ_FIRST(&iac->iadata); ia; ia = ia_next) {
- ia_next = TAILQ_NEXT(ia, link);
-
-- (void)release_ia(ia);
-+ if (opt_norelease == 0) {
-+ debug_printf(LOG_INFO, FNAME, "Start address "
-+ "release");
-+ (void)release_ia(ia);
-+ } else
-+ debug_printf(LOG_INFO, FNAME, "Bypassing address "
-+ "release because of -n flag");
-
- /*
- * The client MUST stop using all of the addresses
diff --git a/packages/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch b/scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0023-dhcpc6-support-per-interface-client-DUIDs.patch
index c1e71f0c..c1e71f0c 100644
--- a/packages/wide-dhcpv6/patches/0023-dhcpc6-support-per-interface-client-DUIDs.patch
+++ b/scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0023-dhcpc6-support-per-interface-client-DUIDs.patch
diff --git a/packages/wide-dhcpv6/patches/0024-bind-to-single-socket.patch b/scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0024-bind-to-single-socket.patch
index b5751325..b5751325 100644
--- a/packages/wide-dhcpv6/patches/0024-bind-to-single-socket.patch
+++ b/scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0024-bind-to-single-socket.patch
diff --git a/packages/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch b/scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0025-option-to-prevent-ia-release.patch
index 32c15814..32c15814 100644
--- a/packages/wide-dhcpv6/patches/0025-option-to-prevent-ia-release.patch
+++ b/scripts/package-build/wide-dhcpv6/patches/wide-dhcpv6/0025-option-to-prevent-ia-release.patch
diff --git a/scripts/package-build/xen-guest-agent/.gitignore b/scripts/package-build/xen-guest-agent/.gitignore
new file mode 100644
index 00000000..d34885ab
--- /dev/null
+++ b/scripts/package-build/xen-guest-agent/.gitignore
@@ -0,0 +1 @@
+/xen-guest-agent/
diff --git a/scripts/package-build/xen-guest-agent/build.py b/scripts/package-build/xen-guest-agent/build.py
new file mode 120000
index 00000000..3c76af73
--- /dev/null
+++ b/scripts/package-build/xen-guest-agent/build.py
@@ -0,0 +1 @@
+../build.py \ No newline at end of file
diff --git a/scripts/package-build/xen-guest-agent/package.toml b/scripts/package-build/xen-guest-agent/package.toml
new file mode 100644
index 00000000..213425df
--- /dev/null
+++ b/scripts/package-build/xen-guest-agent/package.toml
@@ -0,0 +1,34 @@
+[[packages]]
+name = "xen-guest-agent"
+commit_id = "0.4.0"
+scm_url = "https://gitlab.com/xen-project/xen-guest-agent"
+
+
+build_cmd = """
+# changelog
+cat <<EOF > debian/changelog
+xen-guest-agent (0.4.0) UNRELEASED; urgency=medium
+
+ * Upstream package
+
+ -- VyOS Maintainers <maintainers@vyos.io> Thu, 26 Sep 2024 12:35:47 +0000
+
+EOF
+
+# Apply the patch to modify the debian/rules file
+sed -i 's|../xen-guest-agent-$(UPSTREAM_VERSION)-linux-$(DEB_TARGET_GNU_CPU)|target/release/xen-guest-agent|' debian/rules
+
+sudo apt-get -y install --no-install-recommends libclang-dev libxen-dev
+# Install rust
+curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y --default-toolchain stable --profile default --no-modify-path
+
+# Set PATH for Cargo
+export PATH="$HOME/.cargo/bin:$PATH"
+rustup update
+cd xen-guest-agent
+cargo update
+
+# Build deb
+cargo build -F static --profile release
+dpkg-buildpackage -b -us -uc
+"""
diff --git a/scripts/utils/merge-flavors b/scripts/utils/merge-flavors
new file mode 100755
index 00000000..79f8180e
--- /dev/null
+++ b/scripts/utils/merge-flavors
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2024 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Purpose: merges multiple flavor files into one
+
+import sys
+
+import tomli
+import tomli_w
+
+def load_flavor(file_path):
+ with open(file_path, 'rb') as f:
+ flavor_def = tomli.load(f)
+
+ return flavor_def
+
+# XXX: at the moment, this script is only used
+# to produce a meta-flavor for collecting packages
+# used in multiple flavors,
+# so it ignores all other flavor fields for now
+def merge_flavors(l, r):
+ if 'packages' in r:
+ l['packages'] += r['packages']
+
+ for arch in r.get('architectures', []):
+ if arch not in l['architectures']:
+ l['architectures'][arch] = {}
+
+ if 'packages' not in l['architectures'][arch]:
+ l['architectures'][arch]['packages'] = []
+
+ if 'packages' in r['architectures'][arch]:
+ l['architectures'][arch]['packages'] += \
+ r['architectures'][arch]['packages']
+
+ return l
+
+if __name__ == '__main__':
+ if len(sys.argv) < 3:
+ print("Please specify a base flavor and a list of flavor files to merge!")
+ sys.exit(1)
+
+ base_flavor = load_flavor(sys.argv[1])
+
+ if 'architectures' not in base_flavor:
+ base_flavor['architectures'] = {}
+
+ if 'packages' not in base_flavor:
+ base_flavor['packages'] = []
+
+ flavor_files = sys.argv[2:]
+ flavor_defs = map(load_flavor, flavor_files)
+
+ for fd in flavor_defs:
+ merge_flavors(base_flavor, fd)
+
+ base_flavor['packages'] = list(set(base_flavor['packages']))
+ for arch in base_flavor.get('architectures'):
+ if 'packages' in base_flavor['architectures'][arch]:
+ base_flavor['architectures'][arch]['packages'] = \
+ list(set(base_flavor['architectures'][arch]['packages']))
+
+ print(tomli_w.dumps(base_flavor))
diff --git a/tools/cloud-init/AWS/config.boot.default b/tools/cloud-init/AWS/config.boot.default
index f115c442..92c3310b 100644
--- a/tools/cloud-init/AWS/config.boot.default
+++ b/tools/cloud-init/AWS/config.boot.default
@@ -10,7 +10,7 @@ system {
}
}
syslog {
- global {
+ local {
facility all {
level info
}
diff --git a/tools/container/config.boot.default b/tools/container/config.boot.default
index f115c442..92c3310b 100644
--- a/tools/container/config.boot.default
+++ b/tools/container/config.boot.default
@@ -10,7 +10,7 @@ system {
}
}
syslog {
- global {
+ local {
facility all {
level info
}
diff --git a/vars/README.md b/vars/README.md
deleted file mode 100644
index ca2f821f..00000000
--- a/vars/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Jenkins Build Library
-
-Instead of writing the same code over and over again, place the common
-parts into a Jenkins Library which is then consumed by every individual
-build Job. This not only makes it less complex, it also increases
-maintainability by several magnitudes.
diff --git a/vars/buildPackage.groovy b/vars/buildPackage.groovy
deleted file mode 100644
index 531e51d9..00000000
--- a/vars/buildPackage.groovy
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020-2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call(description=null, pkgList=null, buildCmd=null, buildArm=false, changesPattern="**", buildLabel="ec2_amd64") {
- // - description: Arbitrary text to print on Jenkins Job Description
- // instead of package name
- // - pkgList: Multiple packages can be build at once in a single Pipeline run
- // - buildCmd: replace default build command "dpkg-buildpackage -uc -us -tc -b"
- // with this custom version
- // - buildArm: package will also be build for the arm64 platform
- // - changesPattern: package will only be build if a change file matching this
- // pattern is found
- // - buildLabel: used to describe where the job should run. amd64 inside the
- // string will be replaced with arm64 when those builds are enabled.
- // Example: ec2_amd64 -> ec2_arm64 or foo_amd64 -> foo_arm64
-
- setDescription(description)
-
- pipeline {
- agent none
- options {
- disableConcurrentBuilds()
- timeout(time: 240, unit: 'MINUTES')
- timestamps()
- buildDiscarder(logRotator(numToKeepStr: '10'))
- }
- stages {
- stage('Define Agent') {
- agent {
- label "${buildLabel}"
- }
- when {
- anyOf {
- changeset "${changesPattern}"
- triggeredBy cause: "UserIdCause"
- }
- }
- steps {
- script {
- // create container name on demand
- def branchName = getGitBranchName()
- // Adjust PR target branch name so we can re-map it to the proper Docker image.
- if (isPullRequest())
- branchName = env.CHANGE_TARGET.toLowerCase()
- if (branchName.equals('master'))
- branchName = 'current'
-
- env.DOCKER_IMAGE = 'vyos/vyos-build:' + branchName
-
- // Get the current UID and GID from the jenkins agent to allow use of the same UID inside Docker
- env.USR_ID = sh(returnStdout: true, script: 'id -u').toString().trim()
- env.GRP_ID = sh(returnStdout: true, script: 'id -g').toString().trim()
- env.DOCKER_ARGS = '--sysctl net.ipv6.conf.lo.disable_ipv6=0 -e GOSU_UID=' + env.USR_ID + ' -e GOSU_GID=' + env.GRP_ID
- }
- }
- }
- stage('Build Code') {
- when {
- anyOf {
- changeset pattern: changesPattern, caseSensitive: true
- triggeredBy cause: "UserIdCause"
- }
- }
- parallel {
- stage('amd64') {
- agent {
- docker {
- label "${buildLabel}"
- args "${env.DOCKER_ARGS}"
- image "${env.DOCKER_IMAGE}"
- alwaysPull true
- reuseNode true
- }
- }
- steps {
- script {
- cloneAndBuild(description, 'amd64', pkgList, buildCmd)
- stash includes: '**/*.deb', name: 'binary-amd64', allowEmpty: true
- try {
- stash includes: '**/*.dsc', name: 'source-dsc'
- stash includes: '**/*.tar.*z', name: 'source-tar'
- } catch (e) {
- print "Stashing failed, ignoring - no source packages"
- currentBuild.result = 'SUCCESS'
- }
- }
- }
- post {
- cleanup {
- deleteDir()
- }
- }
- }
- stage('arm64') {
- agent {
- docker {
- label "${buildLabel.replace('amd64', 'arm64')}"
- args "${env.DOCKER_ARGS}"
- image "${env.DOCKER_IMAGE}-arm64"
- alwaysPull true
- reuseNode true
- }
- }
- when {
- equals expected: true, actual: buildArm
- }
- steps {
- script {
- cloneAndBuild(description, 'arm64', pkgList, buildCmd)
- stash includes: '**/*arm64.deb', name: 'binary-arm64', allowEmpty: true
- }
- }
- post {
- cleanup {
- deleteDir()
- }
- }
- }
- }
- }
- stage("Finalize") {
- when {
- anyOf {
- changeset pattern: changesPattern, caseSensitive: true
- triggeredBy cause: "UserIdCause"
- }
- }
- agent {
- label "${buildLabel}"
- }
- steps {
- script {
- // Unpack files for amd64, sources and arm64 if packages got build
- try {
- unstash 'binary-amd64'
- unstash 'binary-arm64'
- } catch (e) {
- print "Unstash failed, ignoring - could be because there exists no arm64 build"
- currentBuild.result = 'SUCCESS'
- }
- try {
- unstash 'source-dsc'
- unstash 'source-tar'
- } catch (e) {
- print "Unstash failed, ignoring - no source packages"
- currentBuild.result = 'SUCCESS'
- }
- if (isCustomBuild()) {
- echo "Build not started from official Git repository! Artifacts are not uploaded to external repository"
- return
- }
-
- echo "Uploading Artifacts to external repository"
- copyArtifacts fingerprintArtifacts: true, projectName: '${JOB_NAME}', selector: specific('${BUILD_NUMBER}')
-
- // build up some fancy groovy variables so we do not need to write/copy
- // every option over and over again!
- def RELEASE = getGitBranchName()
- if (getGitBranchName() == "master")
- RELEASE = 'current'
-
- def VYOS_REPO_PATH = '/home/sentrium/web/dev.packages.vyos.net/public_html/repositories/' + RELEASE
- if (getGitBranchName() == "crux")
- VYOS_REPO_PATH += '/vyos'
-
- def SSH_OPTS = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR'
- def SSH_REMOTE = env.DEV_PACKAGES_VYOS_NET_HOST // defined as global variable
- def SSH_DIR = '~/VyOS/' + RELEASE
-
- sshagent(['SSH-dev.packages.vyos.net']) {
-
- sh(script: "ssh ${SSH_OPTS} ${SSH_REMOTE} -t \"bash --login -c 'mkdir -p ${SSH_DIR}'\"")
-
- // Removing of source and binary packages should be BEFORE adding new ones. Else "reprepro [remove/removesrc]" command may remove [source/binary] package correspondingly (behavior depends on package links).
- // To omit this feature(bug?) do not merge removing-adding sequence by sources and binaries as it used to be
- files = findFiles(glob: '**/*.dsc')
- if (files) {
- echo "Remove deprecated source package(s) from the repository..."
- files.each { FILE ->
- def PACKAGE = sh(returnStdout: true, script: "cat ${FILE} | grep Source ").trim().tokenize(' ').last()
- sh(script: "ssh ${SSH_OPTS} ${SSH_REMOTE} -t \"uncron-add 'reprepro -v -b ${VYOS_REPO_PATH} removesrc ${RELEASE} ${PACKAGE}'\"")
- }
- }
- files = findFiles(glob: '**/*-build-deps_*.deb')
- if (files) {
- echo "Remove Debian build dependency files from the workspace..."
- files.each { FILE ->
- sh(script: "rm -f ${FILE}")
- }
- }
-
- files = findFiles(glob: '**/*.deb')
- if (files) {
- echo "Remove deprecated binary package(s) from the repository..."
- files.each { FILE ->
- // NOTE: Groovy is a pain in the ass and " quotes differ from ', so all shell code must use " in the beginning
- def PACKAGE = sh(returnStdout: true, script: "dpkg-deb -f ${FILE} Package").trim()
- def PACKAGE_ARCH = sh(returnStdout: true, script: "dpkg-deb -f ${FILE} Architecture").trim()
- def ARCH = ''
- if (PACKAGE_ARCH != 'all')
- ARCH = '-A ' + PACKAGE_ARCH
- sh(script: "ssh ${SSH_OPTS} ${SSH_REMOTE} -t \"uncron-add 'reprepro -v -b ${VYOS_REPO_PATH} ${ARCH} remove ${RELEASE} ${PACKAGE}'\"")
- }
- }
-
- files = findFiles(glob: '**/*.tar.*z')
- if (files) {
- echo "Uploading tarball package(s) to the repository..."
- files.each { FILE ->
- sh(script: "scp ${SSH_OPTS} ${FILE} ${SSH_REMOTE}:${SSH_DIR}")
- }
- }
-
- files = findFiles(glob: '**/*.dsc')
- if (files) {
- echo "Uploading *.dsc package(s) to the repository..."
- files.each { FILE ->
- def PACKAGE = sh(returnStdout: true, script: "cat ${FILE} | grep Source ").trim().tokenize(' ').last()
- sh(script: "scp ${SSH_OPTS} ${FILE} ${SSH_REMOTE}:${SSH_DIR}")
- def FILENAME = FILE.toString().tokenize('/').last()
- sh(script: "ssh ${SSH_OPTS} ${SSH_REMOTE} -t \"uncron-add 'reprepro -v -b ${VYOS_REPO_PATH} includedsc ${RELEASE} ${SSH_DIR}/${FILENAME}'\"")
- }
- }
-
- files = findFiles(glob: '**/*.deb')
- if (files) {
- echo "Uploading binary package(s) to the repository ..."
- files.each { FILE ->
- // NOTE: Groovy is a pain in the ass and " quotes differ from ', so all shell code must use " in the beginning
- def PACKAGE = sh(returnStdout: true, script: "dpkg-deb -f ${FILE} Package").trim()
- def PACKAGE_ARCH = sh(returnStdout: true, script: "dpkg-deb -f ${FILE} Architecture").trim()
- def ARCH = ''
- if (PACKAGE_ARCH != 'all')
- ARCH = '-A ' + PACKAGE_ARCH
- sh(script: "scp ${SSH_OPTS} ${FILE} ${SSH_REMOTE}:${SSH_DIR}")
- // Packages like FRR produce their binary in a nested path e.g. packages/frr/frr-rpki-rtrlib-dbgsym_7.5_arm64.deb,
- // thus we will only extract the filename portion from FILE as the binary is scp'ed to SSH_DIR without any subpath.
- def FILENAME = FILE.toString().tokenize('/').last()
- sh(script: "ssh ${SSH_OPTS} ${SSH_REMOTE} -t \"uncron-add 'reprepro -v -b ${VYOS_REPO_PATH} ${ARCH} includedeb ${RELEASE} ${SSH_DIR}/${FILENAME}'\"")
- }
- sh(script: "ssh ${SSH_OPTS} ${SSH_REMOTE} -t \"uncron-add 'reprepro -v -b ${VYOS_REPO_PATH} deleteunreferenced'\"")
- }
- }
- }
- }
- post {
- cleanup {
- deleteDir()
- }
- }
- }
- }
- }
-}
diff --git a/vars/cloneAndBuild.groovy b/vars/cloneAndBuild.groovy
deleted file mode 100644
index f4114a68..00000000
--- a/vars/cloneAndBuild.groovy
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2021 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call(description, architecture, pkgList, buildCmd) {
- // package build must be done in "any" subdir. Without it the Debian build system
- // is unable to generate the *.deb files in the sources parent directory, which
- // will cause a "Permission denied" error.
- dir ("build-${architecture}") {
- // cleanup
- deleteDir()
-
- // checkout git repository which hold 'Jenkinsfile'
- checkout scm
-
- // Display Git commit Id used with the Jenkinsfile on the Job "Build History" pane
- def commitId = sh(returnStdout: true, script: 'git rev-parse --short=11 HEAD').trim()
- currentBuild.description = sprintf('Git SHA1: %s', commitId[-11..-1])
-
- if (pkgList) {
- // Fetch individual package source code, but only if a URL is defined, this will
- // let us reuse this script for packages like vyos-1x which ship a Jenkinfile in
- // their repositories root folder.
- pkgList.each { pkg ->
- if (pkg.scmUrl && pkg.scmCommit) {
- dir(getJenkinsfilePath() + pkg.name) {
- checkout([$class: 'GitSCM',
- doGenerateSubmoduleConfigurations: false,
- extensions: [[$class: 'CleanCheckout']],
- branches: [[name: pkg.scmCommit]],
- userRemoteConfigs: [[url: pkg.scmUrl]]])
- }
- }
- }
- }
-
- // compile the source(s) ...
- if (pkgList) {
- pkgList.each { pkg ->
- dir(getJenkinsfilePath() + pkg.name) {
- sh pkg.buildCmd
- }
- }
- } else if (buildCmd) {
- sh buildCmd
- } else {
- // build dependency package and install it
- sh """
- if [ -f debian/control ]; then
- sudo mk-build-deps --install --tool "apt-get --yes --no-install-recommends"
- sudo dpkg -i *build-deps*.deb
- fi
- """
- try {
- sh 'dpkg-buildpackage -uc -us -tc -F'
- } catch (e) {
- print "Source packages build failed, ignoring - building binaries only"
- currentBuild.result = 'SUCCESS'
- sh 'dpkg-buildpackage -uc -us -tc -b'
- }
- }
- }
- if (architecture == 'amd64') {
- archiveArtifacts artifacts: "**/*.deb", fingerprint: true
- try {
- archiveArtifacts artifacts: "**/*.dsc", fingerprint: true
- archiveArtifacts artifacts: "**/*.tar.*z", fingerprint: true
- } catch (e) {
- print "Archiving failed, ignoring - no source packages"
- currentBuild.result = 'SUCCESS'
- }
- } else {
- archiveArtifacts artifacts: "**/*_${architecture}.deb", fingerprint: true
- }
-}
diff --git a/vars/getChangeSetPath.groovy b/vars/getChangeSetPath.groovy
deleted file mode 100644
index 195f999a..00000000
--- a/vars/getChangeSetPath.groovy
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- tmp = getJenkinsfilePath()
- if (tmp)
- tmp = "**/" + tmp + "*"
- else
- tmp = "**/*"
- echo tmp
- return tmp
-}
-
diff --git a/vars/getGitBranchName.groovy b/vars/getGitBranchName.groovy
deleted file mode 100644
index 133734da..00000000
--- a/vars/getGitBranchName.groovy
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- def branch = scm.branches[0].name
- return branch.split('/')[-1]
-}
-
diff --git a/vars/getGitRepoName.groovy b/vars/getGitRepoName.groovy
deleted file mode 100644
index 058e4ff5..00000000
--- a/vars/getGitRepoName.groovy
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- return getGitRepoURL().split('/').last()
-}
-
diff --git a/vars/getGitRepoURL.groovy b/vars/getGitRepoURL.groovy
deleted file mode 100644
index d36b48e6..00000000
--- a/vars/getGitRepoURL.groovy
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- return scm.userRemoteConfigs[0].url
-}
-
diff --git a/vars/getJenkinsfilePath.groovy b/vars/getJenkinsfilePath.groovy
deleted file mode 100644
index a6690f5e..00000000
--- a/vars/getJenkinsfilePath.groovy
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- tmp = currentBuild.rawBuild.parent.definition.scriptPath.replace('Jenkinsfile', '')
- if (tmp == null) {
- tmp = ''
- }
- return tmp
-}
-
diff --git a/vars/isCustomBuild.groovy b/vars/isCustomBuild.groovy
deleted file mode 100644
index c60ed883..00000000
--- a/vars/isCustomBuild.groovy
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- // Returns true if this is a custom build launched on any project fork.
- // Returns false if this is build from git@github.com:vyos/<reponame>.
- // <reponame> can be e.g. vyos-1x.git
- // GitHub organisation base URL
- def gitURI = 'git@github.com:vyos/' + getGitRepoName()
- def httpURI = 'https://github.com/vyos/' + getGitRepoName()
-
- return !((getGitRepoURL() == gitURI) || (getGitRepoURL() == httpURI)) || isPullRequest()
-}
diff --git a/vars/isPullRequest.groovy b/vars/isPullRequest.groovy
deleted file mode 100644
index 813341bc..00000000
--- a/vars/isPullRequest.groovy
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call() {
- if (env.CHANGE_ID) {
- return true
- }
- return false
-}
diff --git a/vars/setDescription.groovy b/vars/setDescription.groovy
deleted file mode 100644
index 683dc20c..00000000
--- a/vars/setDescription.groovy
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env groovy
-// Copyright (C) 2020 VyOS maintainers and contributors
-//
-// This program is free software; you can redistribute it and/or modify
-// in order to easy exprort images built to "external" world
-// it under the terms of the GNU General Public License version 2 or later as
-// published by the Free Software Foundation.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-def call(text = null) {
- def item = Jenkins.instance.getItemByFullName(env.JOB_NAME)
-
- // build up the main description text
- def description = ""
- if (text) {
- description += "<h2>VyOS package build: " + text + "</h2>"
- } else {
- description += "<h2>VyOS package build: " + getGitRepoName().replace('.git', '') + "</h2>"
- }
-
- if (isCustomBuild()) {
- description += "<p style='border: 3px dashed red; width: 50%;'>"
- description += "<b>Build not started from official Git repository!</b><br>"
- description += "<br>"
- description += "Repository: <font face = 'courier'>" + getGitRepoURL() + "</font><br>"
- description += "Branch: <font face = 'courier'>" + getGitBranchName() + "</font><br>"
- description += "</p>"
- } else {
- description += "Sources taken from Git branch: <font face = 'courier'>" + getGitBranchName() + "</font><br>"
- }
-
- item.setDescription(description)
- item.save()
-}
-