summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/conf_mode/protocols_failover.py121
-rwxr-xr-xsrc/conf_mode/service_webproxy.py100
-rwxr-xr-xsrc/helpers/vyos-failover.py184
-rwxr-xr-xsrc/op_mode/webproxy_update_blacklist.sh27
4 files changed, 408 insertions, 24 deletions
diff --git a/src/conf_mode/protocols_failover.py b/src/conf_mode/protocols_failover.py
new file mode 100755
index 000000000..048ba7a89
--- /dev/null
+++ b/src/conf_mode/protocols_failover.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+
+from pathlib import Path
+
+from vyos.config import Config
+from vyos.configdict import dict_merge
+from vyos.template import render
+from vyos.util import call
+from vyos.xml import defaults
+from vyos import ConfigError
+from vyos import airbag
+
+airbag.enable()
+
+
+service_name = 'vyos-failover'
+service_conf = Path(f'/run/{service_name}.conf')
+systemd_service = '/etc/systemd/system/vyos-failover.service'
+rt_proto_failover = '/etc/iproute2/rt_protos.d/failover.conf'
+
+
+def get_config(config=None):
+ if config:
+ conf = config
+ else:
+ conf = Config()
+
+ base = ['protocols', 'failover']
+ failover = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+
+ # Set default values only if we set config
+ if failover.get('route'):
+ for route, route_config in failover.get('route').items():
+ for next_hop, next_hop_config in route_config.get('next_hop').items():
+ default_values = defaults(base + ['route'])
+ failover['route'][route]['next_hop'][next_hop] = dict_merge(
+ default_values['next_hop'], failover['route'][route]['next_hop'][next_hop])
+
+ return failover
+
+def verify(failover):
+ # bail out early - looks like removal from running config
+ if not failover:
+ return None
+
+ if 'route' not in failover:
+ raise ConfigError(f'Failover "route" is mandatory!')
+
+ for route, route_config in failover['route'].items():
+ if not route_config.get('next_hop'):
+ raise ConfigError(f'Next-hop for "{route}" is mandatory!')
+
+ for next_hop, next_hop_config in route_config.get('next_hop').items():
+ if 'interface' not in next_hop_config:
+ raise ConfigError(f'Interface for route "{route}" next-hop "{next_hop}" is mandatory!')
+
+ if not next_hop_config.get('check'):
+ raise ConfigError(f'Check target for next-hop "{next_hop}" is mandatory!')
+
+ if 'target' not in next_hop_config['check']:
+ raise ConfigError(f'Check target for next-hop "{next_hop}" is mandatory!')
+
+ check_type = next_hop_config['check']['type']
+ if check_type == 'tcp' and 'port' not in next_hop_config['check']:
+ raise ConfigError(f'Check port for next-hop "{next_hop}" and type TCP is mandatory!')
+
+ return None
+
+def generate(failover):
+ if not failover:
+ service_conf.unlink(missing_ok=True)
+ return None
+
+ # Add own rt_proto 'failover'
+ # Helps to detect all own routes 'proto failover'
+ with open(rt_proto_failover, 'w') as f:
+ f.write('111 failover\n')
+
+ # Write configuration file
+ conf_json = json.dumps(failover, indent=4)
+ service_conf.write_text(conf_json)
+ render(systemd_service, 'protocols/systemd_vyos_failover_service.j2', failover)
+
+ return None
+
+def apply(failover):
+ if not failover:
+ call(f'systemctl stop {service_name}.service')
+ call('ip route flush protocol failover')
+ else:
+ call('systemctl daemon-reload')
+ call(f'systemctl restart {service_name}.service')
+ call(f'ip route flush protocol failover')
+
+ return None
+
+if __name__ == '__main__':
+ try:
+ c = get_config()
+ verify(c)
+ generate(c)
+ apply(c)
+ except ConfigError as e:
+ print(e)
+ exit(1)
diff --git a/src/conf_mode/service_webproxy.py b/src/conf_mode/service_webproxy.py
index 32af31bde..41a1deaa3 100755
--- a/src/conf_mode/service_webproxy.py
+++ b/src/conf_mode/service_webproxy.py
@@ -28,8 +28,10 @@ from vyos.util import dict_search
from vyos.util import write_file
from vyos.validate import is_addr_assigned
from vyos.xml import defaults
+from vyos.base import Warning
from vyos import ConfigError
from vyos import airbag
+
airbag.enable()
squid_config_file = '/etc/squid/squid.conf'
@@ -37,24 +39,57 @@ squidguard_config_file = '/etc/squidguard/squidGuard.conf'
squidguard_db_dir = '/opt/vyatta/etc/config/url-filtering/squidguard/db'
user_group = 'proxy'
-def generate_sg_localdb(category, list_type, role, proxy):
+
+def check_blacklist_categorydb(config_section):
+ if 'block_category' in config_section:
+ for category in config_section['block_category']:
+ check_categorydb(category)
+ if 'allow_category' in config_section:
+ for category in config_section['allow_category']:
+ check_categorydb(category)
+
+
+def check_categorydb(category: str):
+ """
+ Check if category's db exist
+ :param category:
+ :type str:
+ """
+ path_to_cat: str = f'{squidguard_db_dir}/{category}'
+ if not os.path.exists(f'{path_to_cat}/domains.db') \
+ and not os.path.exists(f'{path_to_cat}/urls.db') \
+ and not os.path.exists(f'{path_to_cat}/expressions.db'):
+ Warning(f'DB of category {category} does not exist.\n '
+ f'Use [update webproxy blacklists] '
+ f'or delete undefined category!')
+
+
+def generate_sg_rule_localdb(category, list_type, role, proxy):
+ if not category or not list_type or not role:
+ return None
+
cat_ = category.replace('-', '_')
- if isinstance(dict_search(f'url_filtering.squidguard.{cat_}', proxy),
- list):
+ if role == 'default':
+ path_to_cat = f'{cat_}'
+ else:
+ path_to_cat = f'rule.{role}.{cat_}'
+ if isinstance(
+ dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy),
+ list):
# local block databases must be generated "on-the-fly"
tmp = {
- 'squidguard_db_dir' : squidguard_db_dir,
- 'category' : f'{category}-default',
- 'list_type' : list_type,
- 'rule' : role
+ 'squidguard_db_dir': squidguard_db_dir,
+ 'category': f'{category}-{role}',
+ 'list_type': list_type,
+ 'rule': role
}
sg_tmp_file = '/tmp/sg.conf'
- db_file = f'{category}-default/{list_type}'
- domains = '\n'.join(dict_search(f'url_filtering.squidguard.{cat_}', proxy))
-
+ db_file = f'{category}-{role}/{list_type}'
+ domains = '\n'.join(
+ dict_search(f'url_filtering.squidguard.{path_to_cat}', proxy))
# local file
- write_file(f'{squidguard_db_dir}/{category}-default/local', '',
+ write_file(f'{squidguard_db_dir}/{category}-{role}/local', '',
user=user_group, group=user_group)
# database input file
write_file(f'{squidguard_db_dir}/{db_file}', domains,
@@ -64,17 +99,18 @@ def generate_sg_localdb(category, list_type, role, proxy):
render(sg_tmp_file, 'squid/sg_acl.conf.j2', tmp,
user=user_group, group=user_group)
- call(f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"')
+ call(
+ f'su - {user_group} -c "squidGuard -d -c {sg_tmp_file} -C {db_file}"')
if os.path.exists(sg_tmp_file):
os.unlink(sg_tmp_file)
-
else:
# if category is not part of our configuration, clean out the
# squidguard lists
- tmp = f'{squidguard_db_dir}/{category}-default'
+ tmp = f'{squidguard_db_dir}/{category}-{role}'
if os.path.exists(tmp):
- rmtree(f'{squidguard_db_dir}/{category}-default')
+ rmtree(f'{squidguard_db_dir}/{category}-{role}')
+
def get_config(config=None):
if config:
@@ -85,7 +121,8 @@ def get_config(config=None):
if not conf.exists(base):
return None
- proxy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
+ proxy = conf.get_config_dict(base, key_mangling=('-', '_'),
+ get_first_key=True)
# We have gathered the dict representation of the CLI, but there are default
# options which we need to update into the dictionary retrived.
default_values = defaults(base)
@@ -110,10 +147,11 @@ def get_config(config=None):
default_values = defaults(base + ['cache-peer'])
for peer in proxy['cache_peer']:
proxy['cache_peer'][peer] = dict_merge(default_values,
- proxy['cache_peer'][peer])
+ proxy['cache_peer'][peer])
return proxy
+
def verify(proxy):
if not proxy:
return None
@@ -170,17 +208,30 @@ def generate(proxy):
render(squidguard_config_file, 'squid/squidGuard.conf.j2', proxy)
cat_dict = {
- 'local-block' : 'domains',
- 'local-block-keyword' : 'expressions',
- 'local-block-url' : 'urls',
- 'local-ok' : 'domains',
- 'local-ok-url' : 'urls'
+ 'local-block': 'domains',
+ 'local-block-keyword': 'expressions',
+ 'local-block-url': 'urls',
+ 'local-ok': 'domains',
+ 'local-ok-url': 'urls'
}
- for category, list_type in cat_dict.items():
- generate_sg_localdb(category, list_type, 'default', proxy)
+ if dict_search(f'url_filtering.squidguard', proxy) is not None:
+ squidgard_config_section = proxy['url_filtering']['squidguard']
+
+ for category, list_type in cat_dict.items():
+ generate_sg_rule_localdb(category, list_type, 'default', proxy)
+ check_blacklist_categorydb(squidgard_config_section)
+
+ if 'rule' in squidgard_config_section:
+ for rule in squidgard_config_section['rule']:
+ rule_config_section = squidgard_config_section['rule'][
+ rule]
+ for category, list_type in cat_dict.items():
+ generate_sg_rule_localdb(category, list_type, rule, proxy)
+ check_blacklist_categorydb(rule_config_section)
return None
+
def apply(proxy):
if not proxy:
# proxy is removed in the commit
@@ -198,6 +249,7 @@ def apply(proxy):
call('systemctl restart squid.service')
return None
+
if __name__ == '__main__':
try:
c = get_config()
diff --git a/src/helpers/vyos-failover.py b/src/helpers/vyos-failover.py
new file mode 100755
index 000000000..1ac193423
--- /dev/null
+++ b/src/helpers/vyos-failover.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 VyOS maintainers and contributors
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 or later as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import json
+import subprocess
+import socket
+import time
+
+from vyos.util import rc_cmd
+from pathlib import Path
+from systemd import journal
+
+
+my_name = Path(__file__).stem
+
+
+def get_best_route_options(route, debug=False):
+ """
+ Return current best route ('gateway, interface, metric)
+
+ % get_best_route_options('203.0.113.1')
+ ('192.168.0.1', 'eth1', 1)
+
+ % get_best_route_options('203.0.113.254')
+ (None, None, None)
+ """
+ rc, data = rc_cmd(f'ip --detail --json route show protocol failover {route}')
+ if rc == 0:
+ data = json.loads(data)
+ if len(data) == 0:
+ print(f'\nRoute {route} for protocol failover was not found')
+ return None, None, None
+ # Fake metric 999 by default
+ # Search route with the lowest metric
+ best_metric = 999
+ for entry in data:
+ if debug: print('\n', entry)
+ metric = entry.get('metric')
+ gateway = entry.get('gateway')
+ iface = entry.get('dev')
+ if metric < best_metric:
+ best_metric = metric
+ best_gateway = gateway
+ best_interface = iface
+ if debug:
+ print(f'### Best_route exists: {route}, best_gateway: {best_gateway}, '
+ f'best_metric: {best_metric}, best_iface: {best_interface}')
+ return best_gateway, best_interface, best_metric
+
+def is_port_open(ip, port):
+ """
+ Check connection to remote host and port
+ Return True if host alive
+
+ % is_port_open('example.com', 8080)
+ True
+ """
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
+ s.settimeout(2)
+ try:
+ s.connect((ip, int(port)))
+ s.shutdown(socket.SHUT_RDWR)
+ return True
+ except:
+ return False
+ finally:
+ s.close()
+
+def is_target_alive(target=None, iface='', proto='icmp', port=None, debug=False):
+ """
+ Host availability check by ICMP, ARP, TCP
+ Return True if target checks is successful
+
+ % is_target_alive('192.0.2.1', 'eth1', proto='arp')
+ True
+ """
+ if iface != '':
+ iface = f'-I {iface}'
+ if proto == 'icmp':
+ command = f'/usr/bin/ping -q {target} {iface} -n -c 2 -W 1'
+ rc, response = rc_cmd(command)
+ if debug: print(f' [ CHECK-TARGET ]: [{command}] -- return-code [RC: {rc}]')
+ if rc == 0:
+ return True
+ elif proto == 'arp':
+ command = f'/usr/bin/arping -b -c 2 -f -w 1 -i 1 {iface} {target}'
+ rc, response = rc_cmd(command)
+ if debug: print(f' [ CHECK-TARGET ]: [{command}] -- return-code [RC: {rc}]')
+ if rc == 0:
+ return True
+ elif proto == 'tcp' and port is not None:
+ return True if is_port_open(target, port) else False
+ else:
+ return False
+
+
+if __name__ == '__main__':
+ # Parse command arguments and get config
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c',
+ '--config',
+ action='store',
+ help='Path to protocols failover configuration',
+ required=True,
+ type=Path)
+
+ args = parser.parse_args()
+ try:
+ config_path = Path(args.config)
+ config = json.loads(config_path.read_text())
+ except Exception as err:
+ print(
+ f'Configuration file "{config_path}" does not exist or malformed: {err}'
+ )
+ exit(1)
+
+ # Useful debug info to console, use debug = True
+ # sudo systemctl stop vyos-failover.service
+ # sudo /usr/libexec/vyos/vyos-failover.py --config /run/vyos-failover.conf
+ debug = False
+
+ while(True):
+
+ for route, route_config in config.get('route').items():
+
+ exists_route = exists_gateway, exists_iface, exists_metric = get_best_route_options(route, debug=debug)
+
+ for next_hop, nexthop_config in route_config.get('next_hop').items():
+ conf_iface = nexthop_config.get('interface')
+ conf_metric = int(nexthop_config.get('metric'))
+ port = nexthop_config.get('check').get('port')
+ port_opt = f'port {port}' if port else ''
+ proto = nexthop_config.get('check').get('type')
+ target = nexthop_config.get('check').get('target')
+ timeout = nexthop_config.get('check').get('timeout')
+
+ # Best route not fonund in the current routing table
+ if exists_route == (None, None, None):
+ if debug: print(f" [NEW_ROUTE_DETECTED] route: [{route}]")
+ # Add route if check-target alive
+ if is_target_alive(target, conf_iface, proto, port, debug=debug):
+ if debug: print(f' [ ADD ] -- ip route add {route} via {next_hop} dev {conf_iface} '
+ f'metric {conf_metric} proto failover\n###')
+ rc, command = rc_cmd(f'ip route add {route} via {next_hop} dev {conf_iface} '
+ f'metric {conf_metric} proto failover')
+ # If something is wrong and gateway not added
+ # Example: Error: Next-hop has invalid gateway.
+ if rc !=0:
+ if debug: print(f'{command} -- return-code [RC: {rc}] {next_hop} dev {conf_iface}')
+ else:
+ journal.send(f'ip route add {route} via {next_hop} dev {conf_iface} '
+ f'metric {conf_metric} proto failover', SYSLOG_IDENTIFIER=my_name)
+ else:
+ if debug: print(f' [ TARGET_FAIL ] target checks fails for [{target}], do nothing')
+ journal.send(f'Check fail for route {route} target {target} proto {proto} '
+ f'{port_opt}', SYSLOG_IDENTIFIER=my_name)
+
+ # Route was added, check if the target is alive
+ # We should delete route if check fails only if route exists in the routing table
+ if not is_target_alive(target, conf_iface, proto, port, debug=debug) and \
+ exists_route != (None, None, None):
+ if debug:
+ print(f'Nexh_hop {next_hop} fail, target not response')
+ print(f' [ DEL ] -- ip route del {route} via {next_hop} dev {conf_iface} '
+ f'metric {conf_metric} proto failover [DELETE]')
+ rc_cmd(f'ip route del {route} via {next_hop} dev {conf_iface} metric {conf_metric} proto failover')
+ journal.send(f'ip route del {route} via {next_hop} dev {conf_iface} '
+ f'metric {conf_metric} proto failover', SYSLOG_IDENTIFIER=my_name)
+
+ time.sleep(int(timeout))
diff --git a/src/op_mode/webproxy_update_blacklist.sh b/src/op_mode/webproxy_update_blacklist.sh
index d5f301b75..4fb9a54c6 100755
--- a/src/op_mode/webproxy_update_blacklist.sh
+++ b/src/op_mode/webproxy_update_blacklist.sh
@@ -18,6 +18,23 @@ blacklist_url='ftp://ftp.univ-tlse1.fr/pub/reseau/cache/squidguard_contrib/black
data_dir="/opt/vyatta/etc/config/url-filtering"
archive="${data_dir}/squidguard/archive"
db_dir="${data_dir}/squidguard/db"
+conf_file="/etc/squidguard/squidGuard.conf"
+tmp_conf_file="/tmp/sg_update_db.conf"
+
+#$1-category
+#$2-type
+#$3-list
+create_sg_db ()
+{
+ FILE=$db_dir/$1/$2
+ if test -f "$FILE"; then
+ rm -f ${tmp_conf_file}
+ printf "dbhome $db_dir\ndest $1 {\n $3 $1/$2\n}\nacl {\n default {\n pass any\n }\n}" >> ${tmp_conf_file}
+ /usr/bin/squidGuard -b -c ${tmp_conf_file} -C $FILE
+ rm -f ${tmp_conf_file}
+ fi
+
+}
while [ $# -gt 0 ]
do
@@ -88,6 +105,16 @@ if [[ -n $update ]] && [[ $update -eq "yes" ]]; then
# fix permissions
chown -R proxy:proxy ${db_dir}
+
+ #create db
+ category_list=(`find $db_dir -type d -exec basename {} \; `)
+ for category in ${category_list[@]}
+ do
+ create_sg_db $category "domains" "domainlist"
+ create_sg_db $category "urls" "urllist"
+ create_sg_db $category "expressions" "expressionlist"
+ done
+ chown -R proxy:proxy ${db_dir}
chmod 755 ${db_dir}
logger --priority WARNING "webproxy blacklist entries updated (${count_before}/${count_after})"