diff options
Diffstat (limited to 'src/services')
39 files changed, 4461 insertions, 0 deletions
diff --git a/src/services/api/graphql/README.graphql b/src/services/api/graphql/README.graphql new file mode 100644 index 0000000..1133d79 --- /dev/null +++ b/src/services/api/graphql/README.graphql @@ -0,0 +1,218 @@ + +The following examples are in the form as entered in the GraphQL +'playground', which is found at: + +https://{{ host_address }}/graphql + +Example using GraphQL mutations to configure a DHCP server: + +All examples assume that the http-api is running: + +'set service https api' + +One can configure an address on an interface, and configure the DHCP server +to run with that address as default router by requesting these 'mutations' +in the GraphQL playground: + +mutation { + CreateInterfaceEthernet (data: {interface: "eth1", + address: "192.168.0.1/24", + description: "BOB"}) { + success + errors + data { + address + } + } +} + +mutation { + CreateDhcpServer(data: {sharedNetworkName: "BOB", + subnet: "192.168.0.0/24", + defaultRouter: "192.168.0.1", + nameServer: "192.168.0.1", + domainName: "vyos.net", + lease: 86400, + range: 0, + start: "192.168.0.9", + stop: "192.168.0.254", + dnsForwardingAllowFrom: "192.168.0.0/24", + dnsForwardingCacheSize: 0, + dnsForwardingListenAddress: "192.168.0.1"}) { + success + errors + data { + defaultRouter + } + } +} + +To save the configuration, use the following mutation: + +mutation { + SaveConfigFile(data: {fileName: "/config/config.boot"}) { + success + errors + data { + fileName + } + } +} + +N.B. fileName can be empty (fileName: "") or data can be empty (data: {}) to +save to /config/config.boot; to save to an alternative path, specify +fileName. + +Similarly, using an analogous 'endpoint' (meaning the form of the request +and resolver; the actual enpoint for all GraphQL requests is +https://hostname/graphql), one can load an arbitrary config file from a +path. + +mutation { + LoadConfigFile(data: {fileName: "/home/vyos/config.boot"}) { + success + errors + data { + fileName + } + } +} + +Op-mode 'show' commands may be requested by path, e.g.: + +query { + Show (data: {path: ["interfaces", "ethernet", "detail"]}) { + success + errors + data { + result + } + } +} + +N.B. to see the output the 'data' field 'result' must be present in the +request. + +Mutations to manipulate firewall address groups: + +mutation { + CreateFirewallAddressGroup (data: {name: "ADDR-GRP", address: "10.0.0.1"}) { + success + errors + } +} + +mutation { + UpdateFirewallAddressGroupMembers (data: {name: "ADDR-GRP", + address: ["10.0.0.1-10.0.0.8", "192.168.0.1"]}) { + success + errors + } +} + +mutation { + RemoveFirewallAddressGroupMembers (data: {name: "ADDR-GRP", + address: "192.168.0.1"}) { + success + errors + } +} + +N.B. The schema for the above specify that 'address' be of the form 'list of +strings' (SDL type [String!]! for UpdateFirewallAddressGroupMembers, where +the ! indicates that the input is required; SDL type [String] in +CreateFirewallAddressGroup, since a group may be created without any +addresses). However, notice that a single string may be passed without being +a member of a list, in which case the specification allows for 'input +coercion': + +http://spec.graphql.org/October2021/#sec-Scalars.Input-Coercion + +Similarly, IPv6 versions of the above: + +CreateFirewallAddressIpv6Group +UpdateFirewallAddressIpv6GroupMembers +RemoveFirewallAddressIpv6GroupMembers + + +Instead of using the GraphQL playground, an equivalent curl command to the +first example above would be: + +curl -k 'https://192.168.100.168/graphql' -H 'Content-Type: application/json' --data-binary '{"query": "mutation {createInterfaceEthernet (data: {interface: \"eth1\", address: \"192.168.0.1/24\", description: \"BOB\"}) {success errors data {address}}}"}' + +Note that the 'mutation' term is prefaced by 'query' in the curl command. + +Curl equivalents may be read from within the GraphQL playground at the 'copy +curl' button. + +What's here: + +services +├── api +│ └── graphql +│ ├── bindings.py +│ ├── graphql +│ │ ├── directives.py +│ │ ├── __init__.py +│ │ ├── mutations.py +│ │ └── schema +│ │ ├── config_file.graphql +│ │ ├── dhcp_server.graphql +│ │ ├── firewall_group.graphql +│ │ ├── interface_ethernet.graphql +│ │ ├── schema.graphql +│ │ ├── show_config.graphql +│ │ └── show.graphql +│ ├── README.graphql +│ ├── recipes +│ │ ├── __init__.py +│ │ ├── remove_firewall_address_group_members.py +│ │ ├── session.py +│ │ └── templates +│ │ ├── create_dhcp_server.tmpl +│ │ ├── create_firewall_address_group.tmpl +│ │ ├── create_interface_ethernet.tmpl +│ │ ├── remove_firewall_address_group_members.tmpl +│ │ └── update_firewall_address_group_members.tmpl +│ └── state.py +├── vyos-configd +├── vyos-hostsd +└── vyos-http-api-server + +The GraphQL library that we are using, Ariadne, advertises itself as a +'schema-first' implementation: define the schema; define resolvers +(handlers) for declared Query and Mutation types (Subscription types are not +currently used). + +In the current approach to a high-level API, we consider the +Jinja2-templated collection of configuration mode 'set'/'delete' commands as +the Ur-data; the GraphQL schema is produced from those files, located in +'api/graphql/recipes/templates'. + +Resolvers for the schema Mutation fields are dynamically generated using a +'directive' added to the respective schema field. The directive, +'@configure', is handled by the class 'ConfigureDirective' in +'api/graphql/graphql/directives.py', which calls the +'make_configure_resolver' function in 'api/graphql/graphql/mutations.py'; +the produced resolver calls the appropriate wrapper in +'api/graphql/recipes', with base class doing the (overridable) configuration +steps of calling all defined 'set'/'delete' commands. + +Integrating the above with vyos-http-api-server is 4 lines of code. + +What needs to be done: + +• automate generation of schema and wrappers from templated configuration +commands + +• investigate whether the subclassing provided by the named wrappers in +'api/graphql/recipes' is sufficient for use cases which need to modify data + +• encapsulate the manipulation of 'canonical names' which transforms the +prefixed camel-case schema names to various snake-case file/function names + +• consider mechanism for migration of templates: offline vs. on-the-fly + +• define the naming convention for those schema fields that refer to +configuration mode parameters: e.g. how much of the path is needed as prefix +to uniquely define the term diff --git a/src/services/api/graphql/__init__.py b/src/services/api/graphql/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/services/api/graphql/__init__.py diff --git a/src/services/api/graphql/bindings.py b/src/services/api/graphql/bindings.py new file mode 100644 index 0000000..ef49664 --- /dev/null +++ b/src/services/api/graphql/bindings.py @@ -0,0 +1,36 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +import vyos.defaults +from . graphql.queries import query +from . graphql.mutations import mutation +from . graphql.directives import directives_dict +from . graphql.errors import op_mode_error +from . graphql.auth_token_mutation import auth_token_mutation +from . libs.token_auth import init_secret +from . import state +from ariadne import make_executable_schema, load_schema_from_path, snake_case_fallback_resolvers + +def generate_schema(): + api_schema_dir = vyos.defaults.directories['api_schema'] + + if state.settings['app'].state.vyos_auth_type == 'token': + init_secret() + + type_defs = load_schema_from_path(api_schema_dir) + + schema = make_executable_schema(type_defs, query, op_mode_error, mutation, auth_token_mutation, snake_case_fallback_resolvers, directives=directives_dict) + + return schema diff --git a/src/services/api/graphql/generate/composite_function.py b/src/services/api/graphql/generate/composite_function.py new file mode 100644 index 0000000..d6626fd --- /dev/null +++ b/src/services/api/graphql/generate/composite_function.py @@ -0,0 +1,7 @@ +# typing information for composite functions: those that invoke several +# elementary requests, and return the result as a single dict +def system_status(): + pass + +queries = {'system_status': system_status} +mutations = {} diff --git a/src/services/api/graphql/generate/config_session_function.py b/src/services/api/graphql/generate/config_session_function.py new file mode 100644 index 0000000..4ebb47a --- /dev/null +++ b/src/services/api/graphql/generate/config_session_function.py @@ -0,0 +1,30 @@ +# typing information for native configsession functions; used to generate +# schema definition files +import typing + +def show_config(path: list[str], configFormat: typing.Optional[str]): + pass + +def show(path: list[str]): + pass + +def show_user_info(user: str): + pass + +queries = {'show_config': show_config, + 'show': show, + 'show_user_info': show_user_info} + +def save_config_file(fileName: typing.Optional[str]): + pass +def load_config_file(fileName: str): + pass +def add_system_image(location: str): + pass +def delete_system_image(name: str): + pass + +mutations = {'save_config_file': save_config_file, + 'load_config_file': load_config_file, + 'add_system_image': add_system_image, + 'delete_system_image': delete_system_image} diff --git a/src/services/api/graphql/generate/generate_schema.py b/src/services/api/graphql/generate/generate_schema.py new file mode 100644 index 0000000..dd5e7ea --- /dev/null +++ b/src/services/api/graphql/generate/generate_schema.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# + +from schema_from_op_mode import generate_op_mode_definitions +from schema_from_config_session import generate_config_session_definitions +from schema_from_composite import generate_composite_definitions + +if __name__ == '__main__': + generate_op_mode_definitions() + generate_config_session_definitions() + generate_composite_definitions() diff --git a/src/services/api/graphql/generate/schema_from_composite.py b/src/services/api/graphql/generate/schema_from_composite.py new file mode 100644 index 0000000..06e7403 --- /dev/null +++ b/src/services/api/graphql/generate/schema_from_composite.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022-2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# A utility to generate GraphQL schema defintions from typing information of +# composite functions comprising several requests. + +import os +import sys +from inspect import signature +from jinja2 import Template + +from vyos.defaults import directories +if __package__ is None or __package__ == '': + sys.path.append(os.path.join(directories['services'], 'api')) + from graphql.libs.op_mode import snake_to_pascal_case, map_type_name + from composite_function import queries, mutations +else: + from .. libs.op_mode import snake_to_pascal_case, map_type_name + from . composite_function import queries, mutations + +SCHEMA_PATH = directories['api_schema'] +CLIENT_OP_PATH = directories['api_client_op'] + +schema_data: dict = {'schema_name': '', + 'schema_fields': []} + +query_template = """ +input {{ schema_name }}Input { + key: String + {%- for field_entry in schema_fields %} + {{ field_entry }} + {%- endfor %} +} + +type {{ schema_name }} { + result: Generic +} + +type {{ schema_name }}Result { + data: {{ schema_name }} + success: Boolean! + errors: [String] +} + +extend type Query { + {{ schema_name }}(data: {{ schema_name }}Input) : {{ schema_name }}Result @compositequery +} +""" + +mutation_template = """ +input {{ schema_name }}Input { + key: String + {%- for field_entry in schema_fields %} + {{ field_entry }} + {%- endfor %} +} + +type {{ schema_name }} { + result: Generic +} + +type {{ schema_name }}Result { + data: {{ schema_name }} + success: Boolean! + errors: [String] +} + +extend type Mutation { + {{ schema_name }}(data: {{ schema_name }}Input) : {{ schema_name }}Result @compositemutation +} +""" + +op_query_template = """ +query {{ op_name }} ({{ op_sig }}) { + {{ op_name }} (data: { {{ op_arg }} }) { + success + errors + data { + result + } + } +} +""" + +op_mutation_template = """ +mutation {{ op_name }} ({{ op_sig }}) { + {{ op_name }} (data: { {{ op_arg }} }) { + success + errors + data { + result + } + } +} +""" + +def create_schema(func_name: str, func: callable, template: str) -> str: + sig = signature(func) + + field_dict = {} + for k in sig.parameters: + field_dict[sig.parameters[k].name] = map_type_name(sig.parameters[k].annotation) + + schema_fields = [] + for k,v in field_dict.items(): + schema_fields.append(k+': '+v) + + schema_data['schema_name'] = snake_to_pascal_case(func_name) + schema_data['schema_fields'] = schema_fields + + j2_template = Template(template) + res = j2_template.render(schema_data) + + return res + +def create_client_op(func_name: str, func: callable, template: str) -> str: + sig = signature(func) + + field_dict = {} + for k in sig.parameters: + field_dict[sig.parameters[k].name] = map_type_name(sig.parameters[k].annotation) + + op_sig = ['$key: String'] + op_arg = ['key: $key'] + for k,v in field_dict.items(): + op_sig.append('$'+k+': '+v) + op_arg.append(k+': $'+k) + + op_data = {} + op_data['op_name'] = snake_to_pascal_case(func_name) + op_data['op_sig'] = ', '.join(op_sig) + op_data['op_arg'] = ', '.join(op_arg) + + j2_template = Template(template) + + res = j2_template.render(op_data) + + return res + +def generate_composite_definitions(): + schema = [] + client_op = [] + for name,func in queries.items(): + res = create_schema(name, func, query_template) + schema.append(res) + res = create_client_op(name, func, op_query_template) + client_op.append(res) + + for name,func in mutations.items(): + res = create_schema(name, func, mutation_template) + schema.append(res) + res = create_client_op(name, func, op_mutation_template) + client_op.append(res) + + out = '\n'.join(schema) + with open(f'{SCHEMA_PATH}/composite.graphql', 'w') as f: + f.write(out) + + out = '\n'.join(client_op) + with open(f'{CLIENT_OP_PATH}/composite.graphql', 'w') as f: + f.write(out) + +if __name__ == '__main__': + generate_composite_definitions() diff --git a/src/services/api/graphql/generate/schema_from_config_session.py b/src/services/api/graphql/generate/schema_from_config_session.py new file mode 100644 index 0000000..1d5ff1e --- /dev/null +++ b/src/services/api/graphql/generate/schema_from_config_session.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022-2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# A utility to generate GraphQL schema defintions from typing information of +# (wrappers of) native configsession functions. + +import os +import sys +from inspect import signature +from jinja2 import Template + +from vyos.defaults import directories +if __package__ is None or __package__ == '': + sys.path.append(os.path.join(directories['services'], 'api')) + from graphql.libs.op_mode import snake_to_pascal_case, map_type_name + from config_session_function import queries, mutations +else: + from .. libs.op_mode import snake_to_pascal_case, map_type_name + from . config_session_function import queries, mutations + +SCHEMA_PATH = directories['api_schema'] +CLIENT_OP_PATH = directories['api_client_op'] + +schema_data: dict = {'schema_name': '', + 'schema_fields': []} + +query_template = """ +input {{ schema_name }}Input { + key: String + {%- for field_entry in schema_fields %} + {{ field_entry }} + {%- endfor %} +} + +type {{ schema_name }} { + result: Generic +} + +type {{ schema_name }}Result { + data: {{ schema_name }} + success: Boolean! + errors: [String] +} + +extend type Query { + {{ schema_name }}(data: {{ schema_name }}Input) : {{ schema_name }}Result @configsessionquery +} +""" + +mutation_template = """ +input {{ schema_name }}Input { + key: String + {%- for field_entry in schema_fields %} + {{ field_entry }} + {%- endfor %} +} + +type {{ schema_name }} { + result: Generic +} + +type {{ schema_name }}Result { + data: {{ schema_name }} + success: Boolean! + errors: [String] +} + +extend type Mutation { + {{ schema_name }}(data: {{ schema_name }}Input) : {{ schema_name }}Result @configsessionmutation +} +""" + +op_query_template = """ +query {{ op_name }} ({{ op_sig }}) { + {{ op_name }} (data: { {{ op_arg }} }) { + success + errors + data { + result + } + } +} +""" + +op_mutation_template = """ +mutation {{ op_name }} ({{ op_sig }}) { + {{ op_name }} (data: { {{ op_arg }} }) { + success + errors + data { + result + } + } +} +""" + +def create_schema(func_name: str, func: callable, template: str) -> str: + sig = signature(func) + + field_dict = {} + for k in sig.parameters: + field_dict[sig.parameters[k].name] = map_type_name(sig.parameters[k].annotation) + + schema_fields = [] + for k,v in field_dict.items(): + schema_fields.append(k+': '+v) + + schema_data['schema_name'] = snake_to_pascal_case(func_name) + schema_data['schema_fields'] = schema_fields + + j2_template = Template(template) + res = j2_template.render(schema_data) + + return res + +def create_client_op(func_name: str, func: callable, template: str) -> str: + sig = signature(func) + + field_dict = {} + for k in sig.parameters: + field_dict[sig.parameters[k].name] = map_type_name(sig.parameters[k].annotation) + + op_sig = ['$key: String'] + op_arg = ['key: $key'] + for k,v in field_dict.items(): + op_sig.append('$'+k+': '+v) + op_arg.append(k+': $'+k) + + op_data = {} + op_data['op_name'] = snake_to_pascal_case(func_name) + op_data['op_sig'] = ', '.join(op_sig) + op_data['op_arg'] = ', '.join(op_arg) + + j2_template = Template(template) + + res = j2_template.render(op_data) + + return res + +def generate_config_session_definitions(): + schema = [] + client_op = [] + for name,func in queries.items(): + res = create_schema(name, func, query_template) + schema.append(res) + res = create_client_op(name, func, op_query_template) + client_op.append(res) + + for name,func in mutations.items(): + res = create_schema(name, func, mutation_template) + schema.append(res) + res = create_client_op(name, func, op_mutation_template) + client_op.append(res) + + out = '\n'.join(schema) + with open(f'{SCHEMA_PATH}/configsession.graphql', 'w') as f: + f.write(out) + + out = '\n'.join(client_op) + with open(f'{CLIENT_OP_PATH}/configsession.graphql', 'w') as f: + f.write(out) + +if __name__ == '__main__': + generate_config_session_definitions() diff --git a/src/services/api/graphql/generate/schema_from_op_mode.py b/src/services/api/graphql/generate/schema_from_op_mode.py new file mode 100644 index 0000000..ab7cb69 --- /dev/null +++ b/src/services/api/graphql/generate/schema_from_op_mode.py @@ -0,0 +1,301 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022-2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# +# A utility to generate GraphQL schema defintions from standardized op-mode +# scripts. + +import os +import sys +import json +from inspect import signature, getmembers, isfunction, isclass, getmro +from jinja2 import Template + +from vyos.defaults import directories +from vyos.opmode import _is_op_mode_function_name as is_op_mode_function_name +from vyos.opmode import _get_literal_values as get_literal_values +from vyos.utils.system import load_as_module +if __package__ is None or __package__ == '': + sys.path.append(os.path.join(directories['services'], 'api')) + from graphql.libs.op_mode import is_show_function_name + from graphql.libs.op_mode import snake_to_pascal_case, map_type_name +else: + from .. libs.op_mode import is_show_function_name + from .. libs.op_mode import snake_to_pascal_case, map_type_name + +OP_MODE_PATH = directories['op_mode'] +SCHEMA_PATH = directories['api_schema'] +CLIENT_OP_PATH = directories['api_client_op'] +DATA_DIR = directories['data'] + + +op_mode_include_file = os.path.join(DATA_DIR, 'op-mode-standardized.json') +op_mode_error_schema = 'op_mode_error.graphql' + +schema_data: dict = {'schema_name': '', + 'schema_fields': []} + +query_template = """ +input {{ schema_name }}Input { + key: String + {%- for field_entry in schema_fields %} + {{ field_entry }} + {%- endfor %} +} + +type {{ schema_name }} { + result: Generic +} + +type {{ schema_name }}Result { + data: {{ schema_name }} + op_mode_error: OpModeError + success: Boolean! + errors: [String] +} + +extend type Query { + {{ schema_name }}(data: {{ schema_name }}Input) : {{ schema_name }}Result @genopquery +} +""" + +mutation_template = """ +input {{ schema_name }}Input { + key: String + {%- for field_entry in schema_fields %} + {{ field_entry }} + {%- endfor %} +} + +type {{ schema_name }} { + result: Generic +} + +type {{ schema_name }}Result { + data: {{ schema_name }} + op_mode_error: OpModeError + success: Boolean! + errors: [String] +} + +extend type Mutation { + {{ schema_name }}(data: {{ schema_name }}Input) : {{ schema_name }}Result @genopmutation +} +""" + +enum_template = """ +enum {{ enum_name }} { + {%- for field_entry in enum_fields %} + {{ field_entry }} + {%- endfor %} +} +""" + +error_template = """ +interface OpModeError { + name: String! + message: String! + vyos_code: Int! +} +{% for name in error_names %} +type {{ name }} implements OpModeError { + name: String! + message: String! + vyos_code: Int! +} +{%- endfor %} +""" + +op_query_template = """ +query {{ op_name }} ({{ op_sig }}) { + {{ op_name }} (data: { {{ op_arg }} }) { + success + errors + op_mode_error { + name + message + vyos_code + } + data { + result + } + } +} +""" + +op_mutation_template = """ +mutation {{ op_name }} ({{ op_sig }}) { + {{ op_name }} (data: { {{ op_arg }} }) { + success + errors + op_mode_error { + name + message + vyos_code + } + data { + result + } + } +} +""" + +def create_schema(func_name: str, base_name: str, func: callable, + enums: dict) -> str: + sig = signature(func) + + for k in sig.parameters: + t = get_literal_values(sig.parameters[k].annotation) + if t: + enums[t] = snake_to_pascal_case(sig.parameters[k].name + '_' + base_name) + + field_dict = {} + for k in sig.parameters: + field_dict[sig.parameters[k].name] = map_type_name(sig.parameters[k].annotation, enums) + + # It is assumed that if one is generating a schema for a 'show_*' + # function, that 'get_raw_data' is present and 'raw' is desired. + if 'raw' in list(field_dict): + del field_dict['raw'] + + schema_fields = [] + for k,v in field_dict.items(): + schema_fields.append(k+': '+v) + + schema_data['schema_name'] = snake_to_pascal_case(func_name + '_' + base_name) + schema_data['schema_fields'] = schema_fields + + if is_show_function_name(func_name): + j2_template = Template(query_template) + else: + j2_template = Template(mutation_template) + + res = j2_template.render(schema_data) + + return res + +def create_client_op(func_name: str, base_name: str, func: callable, + enums: dict) -> str: + sig = signature(func) + + for k in sig.parameters: + t = get_literal_values(sig.parameters[k].annotation) + if t: + enums[t] = snake_to_pascal_case(sig.parameters[k].name + '_' + base_name) + + field_dict = {} + for k in sig.parameters: + field_dict[sig.parameters[k].name] = map_type_name(sig.parameters[k].annotation, enums) + + # It is assumed that if one is generating a schema for a 'show_*' + # function, that 'get_raw_data' is present and 'raw' is desired. + if 'raw' in list(field_dict): + del field_dict['raw'] + + op_sig = ['$key: String'] + op_arg = ['key: $key'] + for k,v in field_dict.items(): + op_sig.append('$'+k+': '+v) + op_arg.append(k+': $'+k) + + op_data = {} + op_data['op_name'] = snake_to_pascal_case(func_name + '_' + base_name) + op_data['op_sig'] = ', '.join(op_sig) + op_data['op_arg'] = ', '.join(op_arg) + + if is_show_function_name(func_name): + j2_template = Template(op_query_template) + else: + j2_template = Template(op_mutation_template) + + res = j2_template.render(op_data) + + return res + +def create_enums(enums: dict) -> str: + enum_data = [] + for k, v in enums.items(): + enum = {'enum_name': v, 'enum_fields': list(k)} + enum_data.append(enum) + + out = '' + j2_template = Template(enum_template) + for el in enum_data: + out += j2_template.render(el) + out += '\n' + + return out + +def create_error_schema(): + from vyos import opmode + + e = Exception + err_types = getmembers(opmode, isclass) + err_types = [k for k in err_types if issubclass(k[1], e)] + # drop base class, to be replaced by interface type. Find the class + # programmatically, in case the base class name changes. + for i in range(len(err_types)): + if err_types[i][1] in getmro(err_types[i-1][1]): + del err_types[i] + break + err_names = [k[0] for k in err_types] + error_data = {'error_names': err_names} + j2_template = Template(error_template) + res = j2_template.render(error_data) + + return res + +def generate_op_mode_definitions(): + os.makedirs(CLIENT_OP_PATH, exist_ok=True) + + out = create_error_schema() + with open(f'{SCHEMA_PATH}/{op_mode_error_schema}', 'w') as f: + f.write(out) + + with open(op_mode_include_file) as f: + op_mode_files = json.load(f) + + for file in op_mode_files: + basename = os.path.splitext(file)[0].replace('-', '_') + module = load_as_module(basename, os.path.join(OP_MODE_PATH, file)) + + funcs = getmembers(module, isfunction) + funcs = list(filter(lambda ft: is_op_mode_function_name(ft[0]), funcs)) + + funcs_dict = {} + for (name, thunk) in funcs: + funcs_dict[name] = thunk + + schema = [] + client_op = [] + enums = {} # gather enums from function Literal type args + for name,func in funcs_dict.items(): + res = create_schema(name, basename, func, enums) + schema.append(res) + res = create_client_op(name, basename, func, enums) + client_op.append(res) + + out = create_enums(enums) + out += '\n'.join(schema) + with open(f'{SCHEMA_PATH}/{basename}.graphql', 'w') as f: + f.write(out) + + out = '\n'.join(client_op) + with open(f'{CLIENT_OP_PATH}/{basename}.graphql', 'w') as f: + f.write(out) + +if __name__ == '__main__': + generate_op_mode_definitions() diff --git a/src/services/api/graphql/graphql/__init__.py b/src/services/api/graphql/graphql/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/services/api/graphql/graphql/__init__.py diff --git a/src/services/api/graphql/graphql/auth_token_mutation.py b/src/services/api/graphql/graphql/auth_token_mutation.py new file mode 100644 index 0000000..a53fa4d --- /dev/null +++ b/src/services/api/graphql/graphql/auth_token_mutation.py @@ -0,0 +1,61 @@ +# Copyright 2022-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +import datetime +from typing import Any +from typing import Dict +from ariadne import ObjectType +from graphql import GraphQLResolveInfo + +from .. libs.token_auth import generate_token +from .. session.session import get_user_info +from .. import state + +auth_token_mutation = ObjectType("Mutation") + +@auth_token_mutation.field('AuthToken') +def auth_token_resolver(obj: Any, info: GraphQLResolveInfo, data: Dict): + # non-nullable fields + user = data['username'] + passwd = data['password'] + + secret = state.settings['secret'] + exp_interval = int(state.settings['app'].state.vyos_token_exp) + expiration = (datetime.datetime.now(tz=datetime.timezone.utc) + + datetime.timedelta(seconds=exp_interval)) + + res = generate_token(user, passwd, secret, expiration) + try: + res |= get_user_info(user) + except ValueError: + # non-existent user already caught + pass + if 'token' in res: + data['result'] = res + return { + "success": True, + "data": data + } + + if 'errors' in res: + return { + "success": False, + "errors": res['errors'] + } + + return { + "success": False, + "errors": ['token generation failed'] + } diff --git a/src/services/api/graphql/graphql/client_op/auth_token.graphql b/src/services/api/graphql/graphql/client_op/auth_token.graphql new file mode 100644 index 0000000..5ea2ecc --- /dev/null +++ b/src/services/api/graphql/graphql/client_op/auth_token.graphql @@ -0,0 +1,10 @@ + +mutation AuthToken ($username: String!, $password: String!) { + AuthToken (data: { username: $username, password: $password }) { + success + errors + data { + result + } + } +} diff --git a/src/services/api/graphql/graphql/directives.py b/src/services/api/graphql/graphql/directives.py new file mode 100644 index 0000000..3927aee --- /dev/null +++ b/src/services/api/graphql/graphql/directives.py @@ -0,0 +1,87 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from ariadne import SchemaDirectiveVisitor +from . queries import * +from . mutations import * + +def non(arg): + pass + +class VyosDirective(SchemaDirectiveVisitor): + def visit_field_definition(self, field, object_type, make_resolver=non): + name = f'{field.type}' + # field.type contains the return value of the mutation; trim value + # to produce canonical name + name = name.replace('Result', '', 1) + + func = make_resolver(name) + field.resolve = func + return field + +class ConfigSessionQueryDirective(VyosDirective): + """ + Class providing implementation of 'configsessionquery' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_config_session_query_resolver) + +class ConfigSessionMutationDirective(VyosDirective): + """ + Class providing implementation of 'configsessionmutation' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_config_session_mutation_resolver) + +class GenOpQueryDirective(VyosDirective): + """ + Class providing implementation of 'genopquery' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_gen_op_query_resolver) + +class GenOpMutationDirective(VyosDirective): + """ + Class providing implementation of 'genopmutation' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_gen_op_mutation_resolver) + +class CompositeQueryDirective(VyosDirective): + """ + Class providing implementation of 'system_status' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_composite_query_resolver) + +class CompositeMutationDirective(VyosDirective): + """ + Class providing implementation of 'system_status' directive in schema. + """ + def visit_field_definition(self, field, object_type): + super().visit_field_definition(field, object_type, + make_resolver=make_composite_mutation_resolver) + +directives_dict = {"configsessionquery": ConfigSessionQueryDirective, + "configsessionmutation": ConfigSessionMutationDirective, + "genopquery": GenOpQueryDirective, + "genopmutation": GenOpMutationDirective, + "compositequery": CompositeQueryDirective, + "compositemutation": CompositeMutationDirective} diff --git a/src/services/api/graphql/graphql/errors.py b/src/services/api/graphql/graphql/errors.py new file mode 100644 index 0000000..1066300 --- /dev/null +++ b/src/services/api/graphql/graphql/errors.py @@ -0,0 +1,8 @@ + +from ariadne import InterfaceType + +op_mode_error = InterfaceType("OpModeError") + +@op_mode_error.type_resolver +def resolve_op_mode_error(obj, *_): + return obj['name'] diff --git a/src/services/api/graphql/graphql/mutations.py b/src/services/api/graphql/graphql/mutations.py new file mode 100644 index 0000000..d115a8e --- /dev/null +++ b/src/services/api/graphql/graphql/mutations.py @@ -0,0 +1,139 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from importlib import import_module +from ariadne import ObjectType, convert_camel_case_to_snake +from makefun import with_signature + +# used below by func_sig +from typing import Any, Dict, Optional # pylint: disable=W0611 +from graphql import GraphQLResolveInfo # pylint: disable=W0611 + +from .. import state +from .. libs import key_auth +from api.graphql.session.session import Session +from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code +from vyos.opmode import Error as OpModeError + +mutation = ObjectType("Mutation") + +def make_mutation_resolver(mutation_name, class_name, session_func): + """Dynamically generate a resolver for the mutation named in the + schema by 'mutation_name'. + + Dynamic generation is provided using the package 'makefun' (via the + decorator 'with_signature'), which provides signature-preserving + function wrappers; it provides several improvements over, say, + functools.wraps. + + :raise Exception: + raising ConfigErrors, or internal errors + """ + + func_base_name = convert_camel_case_to_snake(class_name) + resolver_name = f'resolve_{func_base_name}' + func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Optional[Dict]=None)' + + @mutation.field(mutation_name) + @with_signature(func_sig, func_name=resolver_name) + async def func_impl(*args, **kwargs): + try: + auth_type = state.settings['app'].state.vyos_auth_type + + if auth_type == 'key': + data = kwargs['data'] + key = data['key'] + + auth = key_auth.auth_required(key) + if auth is None: + return { + "success": False, + "errors": ['invalid API key'] + } + + # We are finished with the 'key' entry, and may remove so as to + # pass the rest of data (if any) to function. + del data['key'] + + elif auth_type == 'token': + data = kwargs['data'] + if data is None: + data = {} + info = kwargs['info'] + user = info.context.get('user') + if user is None: + error = info.context.get('error') + if error is not None: + return { + "success": False, + "errors": [error] + } + return { + "success": False, + "errors": ['not authenticated'] + } + else: + # AtrributeError will have already been raised if no + # vyos_auth_type; validation and defaultValue ensure it is + # one of the previous cases, so this is never reached. + pass + + session = state.settings['app'].state.vyos_session + + # one may override the session functions with a local subclass + try: + mod = import_module(f'api.graphql.session.override.{func_base_name}') + klass = getattr(mod, class_name) + except ImportError: + # otherwise, dynamically generate subclass to invoke subclass + # name based functions + klass = type(class_name, (Session,), {}) + k = klass(session, data) + method = getattr(k, session_func) + result = method() + data['result'] = result + + return { + "success": True, + "data": data + } + except OpModeError as e: + typename = type(e).__name__ + msg = str(e) + return { + "success": False, + "errore": ['op_mode_error'], + "op_mode_error": {"name": f"{typename}", + "message": msg if msg else op_mode_err_msg.get(typename, "Unknown"), + "vyos_code": op_mode_err_code.get(typename, 9999)} + } + except Exception as error: + return { + "success": False, + "errors": [repr(error)] + } + + return func_impl + +def make_config_session_mutation_resolver(mutation_name): + return make_mutation_resolver(mutation_name, mutation_name, + convert_camel_case_to_snake(mutation_name)) + +def make_gen_op_mutation_resolver(mutation_name): + return make_mutation_resolver(mutation_name, mutation_name, 'gen_op_mutation') + +def make_composite_mutation_resolver(mutation_name): + return make_mutation_resolver(mutation_name, mutation_name, + convert_camel_case_to_snake(mutation_name)) diff --git a/src/services/api/graphql/graphql/queries.py b/src/services/api/graphql/graphql/queries.py new file mode 100644 index 0000000..7170982 --- /dev/null +++ b/src/services/api/graphql/graphql/queries.py @@ -0,0 +1,139 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from importlib import import_module +from ariadne import ObjectType, convert_camel_case_to_snake +from makefun import with_signature + +# used below by func_sig +from typing import Any, Dict, Optional # pylint: disable=W0611 +from graphql import GraphQLResolveInfo # pylint: disable=W0611 + +from .. import state +from .. libs import key_auth +from api.graphql.session.session import Session +from api.graphql.session.errors.op_mode_errors import op_mode_err_msg, op_mode_err_code +from vyos.opmode import Error as OpModeError + +query = ObjectType("Query") + +def make_query_resolver(query_name, class_name, session_func): + """Dynamically generate a resolver for the query named in the + schema by 'query_name'. + + Dynamic generation is provided using the package 'makefun' (via the + decorator 'with_signature'), which provides signature-preserving + function wrappers; it provides several improvements over, say, + functools.wraps. + + :raise Exception: + raising ConfigErrors, or internal errors + """ + + func_base_name = convert_camel_case_to_snake(class_name) + resolver_name = f'resolve_{func_base_name}' + func_sig = '(obj: Any, info: GraphQLResolveInfo, data: Optional[Dict]=None)' + + @query.field(query_name) + @with_signature(func_sig, func_name=resolver_name) + async def func_impl(*args, **kwargs): + try: + auth_type = state.settings['app'].state.vyos_auth_type + + if auth_type == 'key': + data = kwargs['data'] + key = data['key'] + + auth = key_auth.auth_required(key) + if auth is None: + return { + "success": False, + "errors": ['invalid API key'] + } + + # We are finished with the 'key' entry, and may remove so as to + # pass the rest of data (if any) to function. + del data['key'] + + elif auth_type == 'token': + data = kwargs['data'] + if data is None: + data = {} + info = kwargs['info'] + user = info.context.get('user') + if user is None: + error = info.context.get('error') + if error is not None: + return { + "success": False, + "errors": [error] + } + return { + "success": False, + "errors": ['not authenticated'] + } + else: + # AtrributeError will have already been raised if no + # vyos_auth_type; validation and defaultValue ensure it is + # one of the previous cases, so this is never reached. + pass + + session = state.settings['app'].state.vyos_session + + # one may override the session functions with a local subclass + try: + mod = import_module(f'api.graphql.session.override.{func_base_name}') + klass = getattr(mod, class_name) + except ImportError: + # otherwise, dynamically generate subclass to invoke subclass + # name based functions + klass = type(class_name, (Session,), {}) + k = klass(session, data) + method = getattr(k, session_func) + result = method() + data['result'] = result + + return { + "success": True, + "data": data + } + except OpModeError as e: + typename = type(e).__name__ + msg = str(e) + return { + "success": False, + "errors": ['op_mode_error'], + "op_mode_error": {"name": f"{typename}", + "message": msg if msg else op_mode_err_msg.get(typename, "Unknown"), + "vyos_code": op_mode_err_code.get(typename, 9999)} + } + except Exception as error: + return { + "success": False, + "errors": [repr(error)] + } + + return func_impl + +def make_config_session_query_resolver(query_name): + return make_query_resolver(query_name, query_name, + convert_camel_case_to_snake(query_name)) + +def make_gen_op_query_resolver(query_name): + return make_query_resolver(query_name, query_name, 'gen_op_query') + +def make_composite_query_resolver(query_name): + return make_query_resolver(query_name, query_name, + convert_camel_case_to_snake(query_name)) diff --git a/src/services/api/graphql/graphql/schema/auth_token.graphql b/src/services/api/graphql/graphql/schema/auth_token.graphql new file mode 100644 index 0000000..af53a29 --- /dev/null +++ b/src/services/api/graphql/graphql/schema/auth_token.graphql @@ -0,0 +1,19 @@ + +input AuthTokenInput { + username: String! + password: String! +} + +type AuthToken { + result: Generic +} + +type AuthTokenResult { + data: AuthToken + success: Boolean! + errors: [String] +} + +extend type Mutation { + AuthToken(data: AuthTokenInput) : AuthTokenResult +} diff --git a/src/services/api/graphql/graphql/schema/schema.graphql b/src/services/api/graphql/graphql/schema/schema.graphql new file mode 100644 index 0000000..62b0d30 --- /dev/null +++ b/src/services/api/graphql/graphql/schema/schema.graphql @@ -0,0 +1,16 @@ +schema { + query: Query + mutation: Mutation +} + +directive @compositequery on FIELD_DEFINITION +directive @compositemutation on FIELD_DEFINITION +directive @configsessionquery on FIELD_DEFINITION +directive @configsessionmutation on FIELD_DEFINITION +directive @genopquery on FIELD_DEFINITION +directive @genopmutation on FIELD_DEFINITION + +scalar Generic + +type Query +type Mutation diff --git a/src/services/api/graphql/libs/key_auth.py b/src/services/api/graphql/libs/key_auth.py new file mode 100644 index 0000000..2db0f7d --- /dev/null +++ b/src/services/api/graphql/libs/key_auth.py @@ -0,0 +1,18 @@ + +from .. import state + +def check_auth(key_list, key): + if not key_list: + return None + key_id = None + for k in key_list: + if k['key'] == key: + key_id = k['id'] + return key_id + +def auth_required(key): + api_keys = None + api_keys = state.settings['app'].state.vyos_keys + key_id = check_auth(api_keys, key) + state.settings['app'].state.vyos_id = key_id + return key_id diff --git a/src/services/api/graphql/libs/op_mode.py b/src/services/api/graphql/libs/op_mode.py new file mode 100644 index 0000000..86e38ea --- /dev/null +++ b/src/services/api/graphql/libs/op_mode.py @@ -0,0 +1,103 @@ +# Copyright 2022-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +import os +import re +import typing + +from typing import Union +from typing import Optional +from humps import decamelize + +from vyos.defaults import directories +from vyos.utils.system import load_as_module +from vyos.opmode import _normalize_field_names +from vyos.opmode import _is_literal_type, _get_literal_values + +def load_op_mode_as_module(name: str): + path = os.path.join(directories['op_mode'], name) + name = os.path.splitext(name)[0].replace('-', '_') + return load_as_module(name, path) + +def is_show_function_name(name): + if re.match(r"^show", name): + return True + return False + +def _nth_split(delim: str, n: int, s: str): + groups = s.split(delim) + l = len(groups) + if n > l-1 or n < 1: + return (s, '') + return (delim.join(groups[:n]), delim.join(groups[n:])) + +def _nth_rsplit(delim: str, n: int, s: str): + groups = s.split(delim) + l = len(groups) + if n > l-1 or n < 1: + return (s, '') + return (delim.join(groups[:l-n]), delim.join(groups[l-n:])) + +# Since we have mangled possible hyphens in the file name while constructing +# the snake case of the query/mutation name, we will need to recover the +# file name by searching with mangling: +def _filter_on_mangled(test): + def func(elem): + mangle = os.path.splitext(elem)[0].replace('-', '_') + return test == mangle + return func + +# Find longest name in concatenated string that matches the basename of an +# op-mode script. Should one prefer to concatenate in the reverse order +# (script_name + '_' + function_name), use _nth_rsplit. +def split_compound_op_mode_name(name: str, files: list): + for i in range(1, name.count('_') + 1): + pair = _nth_split('_', i, name) + f = list(filter(_filter_on_mangled(pair[1]), files)) + if f: + pair = (pair[0], f[0]) + return pair + return (name, '') + +def snake_to_pascal_case(name: str) -> str: + res = ''.join(map(str.title, name.split('_'))) + return res + +def map_type_name(type_name: type, enums: Optional[dict] = None, optional: bool = False) -> str: + if type_name == str: + return 'String!' if not optional else 'String = null' + if type_name == int: + return 'Int!' if not optional else 'Int = null' + if type_name == bool: + return 'Boolean = false' + if typing.get_origin(type_name) == list: + if not optional: + return f'[{map_type_name(typing.get_args(type_name)[0], enums=enums)}]!' + return f'[{map_type_name(typing.get_args(type_name)[0], enums=enums)}]' + if _is_literal_type(type_name): + mapped = enums.get(_get_literal_values(type_name), '') + if not mapped: + raise ValueError(typing.get_args(type_name)) + return f'{mapped}!' if not optional else mapped + # typing.Optional is typing.Union[_, NoneType] + if (typing.get_origin(type_name) is typing.Union and + typing.get_args(type_name)[1] == type(None)): + return f'{map_type_name(typing.get_args(type_name)[0], enums=enums, optional=True)}' + + # scalar 'Generic' is defined in schema.graphql + return 'Generic' + +def normalize_output(result: Union[dict, list]) -> Union[dict, list]: + return _normalize_field_names(decamelize(result)) diff --git a/src/services/api/graphql/libs/token_auth.py b/src/services/api/graphql/libs/token_auth.py new file mode 100644 index 0000000..8585485 --- /dev/null +++ b/src/services/api/graphql/libs/token_auth.py @@ -0,0 +1,70 @@ +import jwt +import uuid +import pam +from secrets import token_hex + +from .. import state + +def _check_passwd_pam(username: str, passwd: str) -> bool: + if pam.authenticate(username, passwd): + return True + return False + +def init_secret(): + length = int(state.settings['app'].state.vyos_secret_len) + secret = token_hex(length) + state.settings['secret'] = secret + +def generate_token(user: str, passwd: str, secret: str, exp: int) -> dict: + if user is None or passwd is None: + return {} + if _check_passwd_pam(user, passwd): + app = state.settings['app'] + try: + users = app.state.vyos_token_users + except AttributeError: + app.state.vyos_token_users = {} + users = app.state.vyos_token_users + user_id = uuid.uuid1().hex + payload_data = {'iss': user, 'sub': user_id, 'exp': exp} + secret = state.settings.get('secret') + if secret is None: + return {"errors": ['missing secret']} + token = jwt.encode(payload=payload_data, key=secret, algorithm="HS256") + + users |= {user_id: user} + return {'token': token} + else: + return {"errors": ['failed pam authentication']} + +def get_user_context(request): + context = {} + context['request'] = request + context['user'] = None + if 'Authorization' in request.headers: + auth = request.headers['Authorization'] + scheme, token = auth.split() + if scheme.lower() != 'bearer': + return context + + try: + secret = state.settings.get('secret') + payload = jwt.decode(token, secret, algorithms=["HS256"]) + user_id: str = payload.get('sub') + if user_id is None: + return context + except jwt.exceptions.ExpiredSignatureError: + context['error'] = 'expired token' + return context + except jwt.PyJWTError: + return context + try: + users = state.settings['app'].state.vyos_token_users + except AttributeError: + return context + + user = users.get(user_id) + if user is not None: + context['user'] = user + + return context diff --git a/src/services/api/graphql/session/__init__.py b/src/services/api/graphql/session/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/services/api/graphql/session/__init__.py diff --git a/src/services/api/graphql/session/composite/system_status.py b/src/services/api/graphql/session/composite/system_status.py new file mode 100644 index 0000000..516a4ef --- /dev/null +++ b/src/services/api/graphql/session/composite/system_status.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2022-2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from api.graphql.libs.op_mode import load_op_mode_as_module + +def get_system_version() -> dict: + show_version = load_op_mode_as_module('version.py') + return show_version.show(raw=True, funny=False) + +def get_system_uptime() -> dict: + show_uptime = load_op_mode_as_module('uptime.py') + return show_uptime._get_raw_data() + +def get_system_ram_usage() -> dict: + show_ram = load_op_mode_as_module('memory.py') + return show_ram.show(raw=True) diff --git a/src/services/api/graphql/session/errors/op_mode_errors.py b/src/services/api/graphql/session/errors/op_mode_errors.py new file mode 100644 index 0000000..8007672 --- /dev/null +++ b/src/services/api/graphql/session/errors/op_mode_errors.py @@ -0,0 +1,19 @@ +op_mode_err_msg = { + "UnconfiguredSubsystem": "subsystem is not configured or not running", + "UnconfiguredObject": "object does not exist in the system configuration", + "DataUnavailable": "data currently unavailable", + "PermissionDenied": "client does not have permission", + "InsufficientResources": "insufficient system resources", + "IncorrectValue": "argument value is incorrect", + "UnsupportedOperation": "operation is not supported (yet)", +} + +op_mode_err_code = { + "UnconfiguredSubsystem": 2000, + "UnconfiguredObject": 2003, + "DataUnavailable": 2001, + "InsufficientResources": 2002, + "PermissionDenied": 1003, + "IncorrectValue": 1002, + "UnsupportedOperation": 1004, +} diff --git a/src/services/api/graphql/session/override/remove_firewall_address_group_members.py b/src/services/api/graphql/session/override/remove_firewall_address_group_members.py new file mode 100644 index 0000000..b91932e --- /dev/null +++ b/src/services/api/graphql/session/override/remove_firewall_address_group_members.py @@ -0,0 +1,35 @@ +# Copyright 2021 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +from . session import Session + +class RemoveFirewallAddressGroupMembers(Session): + def __init__(self, session, data): + super().__init__(session, data) + + # Define any custom processing of parameters here by overriding + # configure: + # + # def configure(self): + # self._data = transform_data(self._data) + # super().configure() + # self.clean_up() + + def configure(self): + super().configure() + + group_name = self._data['name'] + path = ['firewall', 'group', 'address-group', group_name] + self.delete_path_if_childless(path) diff --git a/src/services/api/graphql/session/session.py b/src/services/api/graphql/session/session.py new file mode 100644 index 0000000..6ae44b9 --- /dev/null +++ b/src/services/api/graphql/session/session.py @@ -0,0 +1,211 @@ +# Copyright 2021-2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +import os +import json + +from ariadne import convert_camel_case_to_snake + +from vyos.config import Config +from vyos.configtree import ConfigTree +from vyos.defaults import directories +from vyos.opmode import Error as OpModeError + +from api.graphql.libs.op_mode import load_op_mode_as_module, split_compound_op_mode_name +from api.graphql.libs.op_mode import normalize_output + +op_mode_include_file = os.path.join(directories['data'], 'op-mode-standardized.json') + +def get_config_dict(path=[], effective=False, key_mangling=None, + get_first_key=False, no_multi_convert=False, + no_tag_node_value_mangle=False): + config = Config() + return config.get_config_dict(path=path, effective=effective, + key_mangling=key_mangling, + get_first_key=get_first_key, + no_multi_convert=no_multi_convert, + no_tag_node_value_mangle=no_tag_node_value_mangle) + +def get_user_info(user): + user_info = {} + info = get_config_dict(['system', 'login', 'user', user], + get_first_key=True) + if not info: + raise ValueError("No such user") + + user_info['user'] = user + user_info['full_name'] = info.get('full-name', '') + + return user_info + +class Session: + """ + Wrapper for calling configsession functions based on GraphQL requests. + Non-nullable fields in the respective schema allow avoiding a key check + in 'data'. + """ + def __init__(self, session, data): + self._session = session + self._data = data + self._name = convert_camel_case_to_snake(type(self).__name__) + + try: + with open(op_mode_include_file) as f: + self._op_mode_list = json.loads(f.read()) + except Exception: + self._op_mode_list = None + + def show_config(self): + session = self._session + data = self._data + out = '' + + try: + out = session.show_config(data['path']) + if data.get('config_format', '') == 'json': + config_tree = ConfigTree(out) + out = json.loads(config_tree.to_json()) + except Exception as error: + raise error + + return out + + def save_config_file(self): + session = self._session + data = self._data + if 'file_name' not in data or not data['file_name']: + data['file_name'] = '/config/config.boot' + + try: + session.save_config(data['file_name']) + except Exception as error: + raise error + + def load_config_file(self): + session = self._session + data = self._data + + try: + session.load_config(data['file_name']) + session.commit() + except Exception as error: + raise error + + def show(self): + session = self._session + data = self._data + out = '' + + try: + out = session.show(data['path']) + except Exception as error: + raise error + + return out + + def add_system_image(self): + session = self._session + data = self._data + + try: + res = session.install_image(data['location']) + except Exception as error: + raise error + + return res + + def delete_system_image(self): + session = self._session + data = self._data + + try: + res = session.remove_image(data['name']) + except Exception as error: + raise error + + return res + + def show_user_info(self): + session = self._session + data = self._data + + user_info = {} + user = data['user'] + try: + user_info = get_user_info(user) + except Exception as error: + raise error + + return user_info + + def system_status(self): + import api.graphql.session.composite.system_status as system_status + + session = self._session + data = self._data + + status = {} + status['host_name'] = session.show(['host', 'name']).strip() + status['version'] = system_status.get_system_version() + status['uptime'] = system_status.get_system_uptime() + status['ram'] = system_status.get_system_ram_usage() + + return status + + def gen_op_query(self): + session = self._session + data = self._data + name = self._name + op_mode_list = self._op_mode_list + + # handle the case that the op-mode file contains underscores: + if op_mode_list is None: + raise FileNotFoundError(f"No op-mode file list at '{op_mode_include_file}'") + (func_name, scriptname) = split_compound_op_mode_name(name, op_mode_list) + if scriptname == '': + raise FileNotFoundError(f"No op-mode file named in string '{name}'") + + mod = load_op_mode_as_module(f'{scriptname}') + func = getattr(mod, func_name) + try: + res = func(True, **data) + except OpModeError as e: + raise e + + res = normalize_output(res) + + return res + + def gen_op_mutation(self): + session = self._session + data = self._data + name = self._name + op_mode_list = self._op_mode_list + + # handle the case that the op-mode file name contains underscores: + if op_mode_list is None: + raise FileNotFoundError(f"No op-mode file list at '{op_mode_include_file}'") + (func_name, scriptname) = split_compound_op_mode_name(name, op_mode_list) + if scriptname == '': + raise FileNotFoundError(f"No op-mode file named in string '{name}'") + + mod = load_op_mode_as_module(f'{scriptname}') + func = getattr(mod, func_name) + try: + res = func(**data) + except OpModeError as e: + raise e + + return res diff --git a/src/services/api/graphql/session/templates/create_dhcp_server.tmpl b/src/services/api/graphql/session/templates/create_dhcp_server.tmpl new file mode 100644 index 0000000..70de431 --- /dev/null +++ b/src/services/api/graphql/session/templates/create_dhcp_server.tmpl @@ -0,0 +1,9 @@ +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} default-router {{ default_router }} +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} name-server {{ name_server }} +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} domain-name {{ domain_name }} +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} lease {{ lease }} +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} range {{ range }} start {{ start }} +set service dhcp-server shared-network-name {{ shared_network_name }} subnet {{ subnet }} range {{ range }} stop {{ stop }} +set service dns forwarding allow-from {{ dns_forwarding_allow_from }} +set service dns forwarding cache-size {{ dns_forwarding_cache_size }} +set service dns forwarding listen-address {{ dns_forwarding_listen_address }} diff --git a/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl b/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl new file mode 100644 index 0000000..a890d00 --- /dev/null +++ b/src/services/api/graphql/session/templates/create_firewall_address_group.tmpl @@ -0,0 +1,4 @@ +set firewall group address-group {{ name }} +{% for add in address %} +set firewall group address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl b/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl new file mode 100644 index 0000000..e9b6607 --- /dev/null +++ b/src/services/api/graphql/session/templates/create_firewall_address_ipv_6_group.tmpl @@ -0,0 +1,4 @@ +set firewall group ipv6-address-group {{ name }} +{% for add in address %} +set firewall group ipv6-address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl b/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl new file mode 100644 index 0000000..d9d7ed6 --- /dev/null +++ b/src/services/api/graphql/session/templates/create_interface_ethernet.tmpl @@ -0,0 +1,5 @@ +{% if replace %} +delete interfaces ethernet {{ interface }} address +{% endif %} +set interfaces ethernet {{ interface }} address {{ address }} +set interfaces ethernet {{ interface }} description {{ description }} diff --git a/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl b/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl new file mode 100644 index 0000000..458f3e5 --- /dev/null +++ b/src/services/api/graphql/session/templates/remove_firewall_address_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +delete firewall group address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl new file mode 100644 index 0000000..0efa0b2 --- /dev/null +++ b/src/services/api/graphql/session/templates/remove_firewall_address_ipv_6_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +delete firewall group ipv6-address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl b/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl new file mode 100644 index 0000000..f56c612 --- /dev/null +++ b/src/services/api/graphql/session/templates/update_firewall_address_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +set firewall group address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl b/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl new file mode 100644 index 0000000..f98a551 --- /dev/null +++ b/src/services/api/graphql/session/templates/update_firewall_address_ipv_6_group_members.tmpl @@ -0,0 +1,3 @@ +{% for add in address %} +set firewall group ipv6-address-group {{ name }} address {{ add }} +{% endfor %} diff --git a/src/services/api/graphql/state.py b/src/services/api/graphql/state.py new file mode 100644 index 0000000..63db9f4 --- /dev/null +++ b/src/services/api/graphql/state.py @@ -0,0 +1,4 @@ + +def init(): + global settings + settings = {} diff --git a/src/services/vyos-configd b/src/services/vyos-configd new file mode 100644 index 0000000..cb23642 --- /dev/null +++ b/src/services/vyos-configd @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2020-2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +# pylint: disable=redefined-outer-name + +import os +import sys +import grp +import re +import json +import typing +import logging +import signal +import traceback +import importlib.util +import io +from contextlib import redirect_stdout + +import zmq + +from vyos.defaults import directories +from vyos.utils.boot import boot_configuration_complete +from vyos.configsource import ConfigSourceString +from vyos.configsource import ConfigSourceError +from vyos.configdiff import get_commit_scripts +from vyos.config import Config +from vyos import ConfigError + +CFG_GROUP = 'vyattacfg' + +script_stdout_log = '/tmp/vyos-configd-script-stdout' + +debug = True + +logger = logging.getLogger(__name__) +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) + +if debug: + logger.setLevel(logging.DEBUG) +else: + logger.setLevel(logging.INFO) + +SOCKET_PATH = 'ipc:///run/vyos-configd.sock' +MAX_MSG_SIZE = 65535 + +# Response error codes +R_SUCCESS = 1 +R_ERROR_COMMIT = 2 +R_ERROR_DAEMON = 4 +R_PASS = 8 + +vyos_conf_scripts_dir = directories['conf_mode'] +configd_include_file = os.path.join(directories['data'], 'configd-include.json') +configd_env_set_file = os.path.join(directories['data'], 'vyos-configd-env-set') +configd_env_unset_file = os.path.join(directories['data'], 'vyos-configd-env-unset') +# sourced on entering config session +configd_env_file = '/etc/default/vyos-configd-env' + +def key_name_from_file_name(f): + return os.path.splitext(f)[0] + +def module_name_from_key(k): + return k.replace('-', '_') + +def path_from_file_name(f): + return os.path.join(vyos_conf_scripts_dir, f) + + +# opt-in to be run by daemon +with open(configd_include_file) as f: + try: + include = json.load(f) + except OSError as e: + logger.critical(f'configd include file error: {e}') + sys.exit(1) + except json.JSONDecodeError as e: + logger.critical(f'JSON load error: {e}') + sys.exit(1) + + +# import conf_mode scripts +(_, _, filenames) = next(iter(os.walk(vyos_conf_scripts_dir))) +filenames.sort() + +load_filenames = [f for f in filenames if f in include] +imports = [key_name_from_file_name(f) for f in load_filenames] +module_names = [module_name_from_key(k) for k in imports] +paths = [path_from_file_name(f) for f in load_filenames] +to_load = list(zip(module_names, paths)) + +modules = [] + +for x in to_load: + spec = importlib.util.spec_from_file_location(x[0], x[1]) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + modules.append(module) + +conf_mode_scripts = dict(zip(imports, modules)) + +exclude_set = {key_name_from_file_name(f) for f in filenames if f not in include} +include_set = {key_name_from_file_name(f) for f in filenames if f in include} + + +def write_stdout_log(file_name, msg): + if boot_configuration_complete(): + return + with open(file_name, 'a') as f: + f.write(msg) + + +def run_script(script_name, config, args) -> tuple[int, str]: + # pylint: disable=broad-exception-caught + + script = conf_mode_scripts[script_name] + script.argv = args + config.set_level([]) + try: + c = script.get_config(config) + script.verify(c) + script.generate(c) + script.apply(c) + except ConfigError as e: + logger.error(e) + return R_ERROR_COMMIT, str(e) + except Exception: + tb = traceback.format_exc() + logger.error(tb) + return R_ERROR_COMMIT, tb + + return R_SUCCESS, '' + + +def initialization(socket): + # pylint: disable=broad-exception-caught,too-many-locals + + # Reset config strings: + active_string = '' + session_string = '' + # check first for resent init msg, in case of client timeout + while True: + msg = socket.recv().decode('utf-8', 'ignore') + try: + message = json.loads(msg) + if message['type'] == 'init': + resp = 'init' + socket.send(resp.encode()) + except Exception: + break + + # zmq synchronous for ipc from single client: + active_string = msg + resp = 'active' + socket.send(resp.encode()) + session_string = socket.recv().decode('utf-8', 'ignore') + resp = 'session' + socket.send(resp.encode()) + pid_string = socket.recv().decode('utf-8', 'ignore') + resp = 'pid' + socket.send(resp.encode()) + sudo_user_string = socket.recv().decode('utf-8', 'ignore') + resp = 'sudo_user' + socket.send(resp.encode()) + temp_config_dir_string = socket.recv().decode('utf-8', 'ignore') + resp = 'temp_config_dir' + socket.send(resp.encode()) + changes_only_dir_string = socket.recv().decode('utf-8', 'ignore') + resp = 'changes_only_dir' + socket.send(resp.encode()) + + logger.debug(f'config session pid is {pid_string}') + logger.debug(f'config session sudo_user is {sudo_user_string}') + + os.environ['SUDO_USER'] = sudo_user_string + if temp_config_dir_string: + os.environ['VYATTA_TEMP_CONFIG_DIR'] = temp_config_dir_string + if changes_only_dir_string: + os.environ['VYATTA_CHANGES_ONLY_DIR'] = changes_only_dir_string + + try: + configsource = ConfigSourceString(running_config_text=active_string, + session_config_text=session_string) + except ConfigSourceError as e: + logger.debug(e) + return None + + config = Config(config_source=configsource) + dependent_func: dict[str, list[typing.Callable]] = {} + setattr(config, 'dependent_func', dependent_func) + + commit_scripts = get_commit_scripts(config) + logger.debug(f'commit_scripts: {commit_scripts}') + + scripts_called = [] + setattr(config, 'scripts_called', scripts_called) + + return config + + +def process_node_data(config, data, _last: bool = False) -> tuple[int, str]: + if not config: + out = 'Empty config' + logger.critical(out) + return R_ERROR_DAEMON, out + + script_name = None + os.environ['VYOS_TAGNODE_VALUE'] = '' + args = [] + config.dependency_list.clear() + + res = re.match(r'^(VYOS_TAGNODE_VALUE=[^/]+)?.*\/([^/]+).py(.*)', data) + if res.group(1): + env = res.group(1).split('=') + os.environ[env[0]] = env[1] + if res.group(2): + script_name = res.group(2) + if not script_name: + out = 'Missing script_name' + logger.critical(out) + return R_ERROR_DAEMON, out + if res.group(3): + args = res.group(3).split() + args.insert(0, f'{script_name}.py') + + tag_value = os.getenv('VYOS_TAGNODE_VALUE', '') + tag_ext = f'_{tag_value}' if tag_value else '' + script_record = f'{script_name}{tag_ext}' + scripts_called = getattr(config, 'scripts_called', []) + scripts_called.append(script_record) + + if script_name not in include_set: + return R_PASS, '' + + with redirect_stdout(io.StringIO()) as o: + result, err_out = run_script(script_name, config, args) + amb_out = o.getvalue() + o.close() + + out = amb_out + err_out + + return result, out + + +def send_result(sock, err, msg): + msg_size = min(MAX_MSG_SIZE, len(msg)) if msg else 0 + + err_rep = err.to_bytes(1, byteorder=sys.byteorder) + logger.debug(f'Sending reply: {err}') + sock.send(err_rep) + + # size req from vyshim client + size_req = sock.recv().decode() + logger.debug(f'Received request: {size_req}') + msg_size_rep = hex(msg_size).encode() + sock.send(msg_size_rep) + logger.debug(f'Sending reply: {msg_size}') + + if msg_size > 0: + # send req is sent from vyshim client only if msg_size > 0 + send_req = sock.recv().decode() + logger.debug(f'Received request: {send_req}') + sock.send(msg.encode()) + logger.debug('Sending reply with output') + + write_stdout_log(script_stdout_log, msg) + + +def remove_if_file(f: str): + try: + os.remove(f) + except FileNotFoundError: + pass + + +def shutdown(): + remove_if_file(configd_env_file) + os.symlink(configd_env_unset_file, configd_env_file) + sys.exit(0) + + +if __name__ == '__main__': + context = zmq.Context() + socket = context.socket(zmq.REP) + + # Set the right permissions on the socket, then change it back + o_mask = os.umask(0) + socket.bind(SOCKET_PATH) + os.umask(o_mask) + + cfg_group = grp.getgrnam(CFG_GROUP) + os.setgid(cfg_group.gr_gid) + + os.environ['VYOS_CONFIGD'] = 't' + + def sig_handler(signum, frame): + # pylint: disable=unused-argument + shutdown() + + signal.signal(signal.SIGTERM, sig_handler) + signal.signal(signal.SIGINT, sig_handler) + + # Define the vyshim environment variable + remove_if_file(configd_env_file) + os.symlink(configd_env_set_file, configd_env_file) + + config = None + + while True: + # Wait for next request from client + msg = socket.recv().decode() + logger.debug(f'Received message: {msg}') + message = json.loads(msg) + + if message['type'] == 'init': + resp = 'init' + socket.send(resp.encode()) + config = initialization(socket) + elif message['type'] == 'node': + res, out = process_node_data(config, message['data'], message['last']) + send_result(socket, res, out) + + if message['last'] and config: + scripts_called = getattr(config, 'scripts_called', []) + logger.debug(f'scripts_called: {scripts_called}') + else: + logger.critical(f'Unexpected message: {message}') diff --git a/src/services/vyos-conntrack-logger b/src/services/vyos-conntrack-logger new file mode 100644 index 0000000..9c31b46 --- /dev/null +++ b/src/services/vyos-conntrack-logger @@ -0,0 +1,458 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import argparse +import grp +import logging +import multiprocessing +import os +import queue +import signal +import socket +import threading +from datetime import timedelta +from pathlib import Path +from time import sleep +from typing import Dict, AnyStr + +from pyroute2 import conntrack +from pyroute2.netlink import nfnetlink +from pyroute2.netlink.nfnetlink import NFNL_SUBSYS_CTNETLINK +from pyroute2.netlink.nfnetlink.nfctsocket import nfct_msg, \ + IPCTNL_MSG_CT_DELETE, IPCTNL_MSG_CT_NEW, IPS_SEEN_REPLY, \ + IPS_OFFLOAD, IPS_ASSURED + +from vyos.utils.file import read_json + + +shutdown_event = multiprocessing.Event() + +logging.basicConfig(level=logging.INFO, format='%(message)s') +logger = logging.getLogger(__name__) + + +class DebugFormatter(logging.Formatter): + def format(self, record): + self._style._fmt = '[%(asctime)s] %(levelname)s: %(message)s' + return super().format(record) + + +def set_log_level(level: str) -> None: + if level == 'debug': + logger.setLevel(logging.DEBUG) + logger.parent.handlers[0].setFormatter(DebugFormatter()) + else: + logger.setLevel(logging.INFO) + + +EVENT_NAME_TO_GROUP = { + 'new': nfnetlink.NFNLGRP_CONNTRACK_NEW, + 'update': nfnetlink.NFNLGRP_CONNTRACK_UPDATE, + 'destroy': nfnetlink.NFNLGRP_CONNTRACK_DESTROY +} + +# https://github.com/torvalds/linux/blob/1dfe225e9af5bd3399a1dbc6a4df6a6041ff9c23/include/uapi/linux/netfilter/nf_conntrack_tcp.h#L9 +TCP_CONNTRACK_SYN_SENT = 1 +TCP_CONNTRACK_SYN_RECV = 2 +TCP_CONNTRACK_ESTABLISHED = 3 +TCP_CONNTRACK_FIN_WAIT = 4 +TCP_CONNTRACK_CLOSE_WAIT = 5 +TCP_CONNTRACK_LAST_ACK = 6 +TCP_CONNTRACK_TIME_WAIT = 7 +TCP_CONNTRACK_CLOSE = 8 +TCP_CONNTRACK_LISTEN = 9 +TCP_CONNTRACK_MAX = 10 +TCP_CONNTRACK_IGNORE = 11 +TCP_CONNTRACK_RETRANS = 12 +TCP_CONNTRACK_UNACK = 13 +TCP_CONNTRACK_TIMEOUT_MAX = 14 + +TCP_CONNTRACK_TO_NAME = { + TCP_CONNTRACK_SYN_SENT: "SYN_SENT", + TCP_CONNTRACK_SYN_RECV: "SYN_RECV", + TCP_CONNTRACK_ESTABLISHED: "ESTABLISHED", + TCP_CONNTRACK_FIN_WAIT: "FIN_WAIT", + TCP_CONNTRACK_CLOSE_WAIT: "CLOSE_WAIT", + TCP_CONNTRACK_LAST_ACK: "LAST_ACK", + TCP_CONNTRACK_TIME_WAIT: "TIME_WAIT", + TCP_CONNTRACK_CLOSE: "CLOSE", + TCP_CONNTRACK_LISTEN: "LISTEN", + TCP_CONNTRACK_MAX: "MAX", + TCP_CONNTRACK_IGNORE: "IGNORE", + TCP_CONNTRACK_RETRANS: "RETRANS", + TCP_CONNTRACK_UNACK: "UNACK", + TCP_CONNTRACK_TIMEOUT_MAX: "TIMEOUT_MAX", +} + +# https://github.com/torvalds/linux/blob/1dfe225e9af5bd3399a1dbc6a4df6a6041ff9c23/include/uapi/linux/netfilter/nf_conntrack_sctp.h#L8 +SCTP_CONNTRACK_CLOSED = 1 +SCTP_CONNTRACK_COOKIE_WAIT = 2 +SCTP_CONNTRACK_COOKIE_ECHOED = 3 +SCTP_CONNTRACK_ESTABLISHED = 4 +SCTP_CONNTRACK_SHUTDOWN_SENT = 5 +SCTP_CONNTRACK_SHUTDOWN_RECD = 6 +SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7 +SCTP_CONNTRACK_HEARTBEAT_SENT = 8 +SCTP_CONNTRACK_HEARTBEAT_ACKED = 9 # no longer used +SCTP_CONNTRACK_MAX = 10 + +SCTP_CONNTRACK_TO_NAME = { + SCTP_CONNTRACK_CLOSED: 'CLOSED', + SCTP_CONNTRACK_COOKIE_WAIT: 'COOKIE_WAIT', + SCTP_CONNTRACK_COOKIE_ECHOED: 'COOKIE_ECHOED', + SCTP_CONNTRACK_ESTABLISHED: 'ESTABLISHED', + SCTP_CONNTRACK_SHUTDOWN_SENT: 'SHUTDOWN_SENT', + SCTP_CONNTRACK_SHUTDOWN_RECD: 'SHUTDOWN_RECD', + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT: 'SHUTDOWN_ACK_SENT', + SCTP_CONNTRACK_HEARTBEAT_SENT: 'HEARTBEAT_SENT', + SCTP_CONNTRACK_HEARTBEAT_ACKED: 'HEARTBEAT_ACKED', + SCTP_CONNTRACK_MAX: 'MAX', +} + +PROTO_CONNTRACK_TO_NAME = { + 'TCP': TCP_CONNTRACK_TO_NAME, + 'SCTP': SCTP_CONNTRACK_TO_NAME +} + +SUPPORTED_PROTO_TO_NAME = { + socket.IPPROTO_ICMP: 'icmp', + socket.IPPROTO_TCP: 'tcp', + socket.IPPROTO_UDP: 'udp', +} + +PROTO_TO_NAME = { + socket.IPPROTO_ICMPV6: 'icmpv6', + socket.IPPROTO_SCTP: 'sctp', + socket.IPPROTO_GRE: 'gre', +} + +PROTO_TO_NAME.update(SUPPORTED_PROTO_TO_NAME) + + +def sig_handler(signum, frame): + process_name = multiprocessing.current_process().name + logger.debug(f'[{process_name}]: {"Shutdown" if signum == signal.SIGTERM else "Reload"} signal received...') + shutdown_event.set() + + +def format_flow_data(data: Dict) -> AnyStr: + """ + Formats the flow event data into a string suitable for logging. + """ + key_format = { + 'SRC_PORT': 'sport', + 'DST_PORT': 'dport' + } + message = f"src={data['ADDR'].get('SRC')} dst={data['ADDR'].get('DST')}" + + for key in ['SRC_PORT', 'DST_PORT', 'TYPE', 'CODE', 'ID']: + tmp = data['PROTO'].get(key) + if tmp is not None: + key = key_format.get(key, key) + message += f" {key.lower()}={tmp}" + + if 'COUNTERS' in data: + for key in ['PACKETS', 'BYTES']: + tmp = data['COUNTERS'].get(key) + if tmp is not None: + message += f" {key.lower()}={tmp}" + + return message + + +def format_event_message(event: Dict) -> AnyStr: + """ + Formats the internal parsed event data into a string suitable for logging. + """ + event_type = f"[{event['COMMON']['EVENT_TYPE'].upper()}]" + message = f"{event_type:<{9}} {event['COMMON']['ID']} " \ + f"{event['ORIG']['PROTO'].get('NAME'):<{8}} " \ + f"{event['ORIG']['PROTO'].get('NUMBER')} " + + tmp = event['COMMON']['TIME_OUT'] + if tmp is not None: message += f"{tmp} " + + if proto_info := event['COMMON'].get('PROTO_INFO'): + message += f"{proto_info.get('STATE_NAME')} " + + for key in ['ORIG', 'REPLY']: + message += f"{format_flow_data(event[key])} " + if key == 'ORIG' and not (event['COMMON']['STATUS'] & IPS_SEEN_REPLY): + message += f"[UNREPLIED] " + + tmp = event['COMMON']['MARK'] + if tmp is not None: message += f"mark={tmp} " + + if event['COMMON']['STATUS'] & IPS_OFFLOAD: message += f" [OFFLOAD] " + elif event['COMMON']['STATUS'] & IPS_ASSURED: message += f" [ASSURED] " + + if tmp := event['COMMON']['PORTID']: message += f"portid={tmp} " + if tstamp := event['COMMON'].get('TIMESTAMP'): + message += f"start={tstamp['START']} stop={tstamp['STOP']} " + delta_ns = tstamp['STOP'] - tstamp['START'] + delta_s = delta_ns // 1e9 + remaining_ns = delta_ns % 1e9 + delta = timedelta(seconds=delta_s, microseconds=remaining_ns / 1000) + message += f"delta={delta.total_seconds()} " + + return message + + +def parse_event_type(header: Dict) -> AnyStr: + """ + Extract event type from nfct_msg. new, update, destroy + """ + event_type = 'unknown' + if header['type'] == IPCTNL_MSG_CT_DELETE | (NFNL_SUBSYS_CTNETLINK << 8): + event_type = 'destroy' + elif header['type'] == IPCTNL_MSG_CT_NEW | (NFNL_SUBSYS_CTNETLINK << 8): + event_type = 'update' + if header['flags']: + event_type = 'new' + return event_type + + +def parse_proto(cta: nfct_msg.cta_tuple) -> Dict: + """ + Extract proto info from nfct_msg. src/dst port, code, type, id + """ + data = dict() + + cta_proto = cta.get_attr('CTA_TUPLE_PROTO') + proto_num = cta_proto.get_attr('CTA_PROTO_NUM') + + data['NUMBER'] = proto_num + data['NAME'] = PROTO_TO_NAME.get(proto_num, 'unknown') + + if proto_num in (socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6): + pref = 'CTA_PROTO_ICMP' + if proto_num == socket.IPPROTO_ICMPV6: pref += 'V6' + keys = ['TYPE', 'CODE', 'ID'] + else: + pref = 'CTA_PROTO' + keys = ['SRC_PORT', 'DST_PORT'] + + for key in keys: + data[key] = cta_proto.get_attr(f'{pref}_{key}') + + return data + + +def parse_proto_info(cta: nfct_msg.cta_protoinfo) -> Dict: + """ + Extract proto state and state name from nfct_msg + """ + data = dict() + if not cta: + return data + + for proto in ['TCP', 'SCTP']: + if proto_info := cta.get_attr(f'CTA_PROTOINFO_{proto}'): + data['STATE'] = proto_info.get_attr(f'CTA_PROTOINFO_{proto}_STATE') + data['STATE_NAME'] = PROTO_CONNTRACK_TO_NAME.get(proto, {}).get(data['STATE'], 'unknown') + return data + + +def parse_timestamp(cta: nfct_msg.cta_timestamp) -> Dict: + """ + Extract timestamp from nfct_msg + """ + data = dict() + if not cta: + return data + data['START'] = cta.get_attr('CTA_TIMESTAMP_START') + data['STOP'] = cta.get_attr('CTA_TIMESTAMP_STOP') + + return data + + +def parse_ip_addr(family: int, cta: nfct_msg.cta_tuple) -> Dict: + """ + Extract ip adr from nfct_msg + """ + data = dict() + cta_ip = cta.get_attr('CTA_TUPLE_IP') + + if family == socket.AF_INET: + pref = 'CTA_IP_V4' + elif family == socket.AF_INET6: + pref = 'CTA_IP_V6' + else: + logger.error(f'Undefined INET: {family}') + raise NotImplementedError(family) + + for direct in ['SRC', 'DST']: + data[direct] = cta_ip.get_attr(f'{pref}_{direct}') + + return data + + +def parse_counters(cta: nfct_msg.cta_counters) -> Dict: + """ + Extract counters from nfct_msg + """ + data = dict() + if not cta: + return data + + for key in ['PACKETS', 'BYTES']: + tmp = cta.get_attr(f'CTA_COUNTERS_{key}') + if tmp is None: + tmp = cta.get_attr(f'CTA_COUNTERS32_{key}') + data['key'] = tmp + + return data + + +def is_need_to_log(event_type: AnyStr, proto_num: int, conf_event: Dict): + """ + Filter message by event type and protocols + """ + conf = conf_event.get(event_type) + if conf == {} or conf.get(SUPPORTED_PROTO_TO_NAME.get(proto_num, 'other')) is not None: + return True + return False + + +def parse_conntrack_event(msg: nfct_msg, conf_event: Dict) -> Dict: + """ + Convert nfct_msg to internal data dict. + """ + data = dict() + event_type = parse_event_type(msg['header']) + proto_num = msg.get_nested('CTA_TUPLE_ORIG', 'CTA_TUPLE_PROTO', 'CTA_PROTO_NUM') + + if not is_need_to_log(event_type, proto_num, conf_event): + return data + + data = { + 'COMMON': { + 'ID': msg.get_attr('CTA_ID'), + 'EVENT_TYPE': event_type, + 'TIME_OUT': msg.get_attr('CTA_TIMEOUT'), + 'MARK': msg.get_attr('CTA_MARK'), + 'PORTID': msg['header'].get('pid'), + 'PROTO_INFO': parse_proto_info(msg.get_attr('CTA_PROTOINFO')), + 'STATUS': msg.get_attr('CTA_STATUS'), + 'TIMESTAMP': parse_timestamp(msg.get_attr('CTA_TIMESTAMP')) + }, + 'ORIG': {}, + 'REPLY': {}, + } + + for direct in ['ORIG', 'REPLY']: + data[direct]['ADDR'] = parse_ip_addr(msg['nfgen_family'], msg.get_attr(f'CTA_TUPLE_{direct}')) + data[direct]['PROTO'] = parse_proto(msg.get_attr(f'CTA_TUPLE_{direct}')) + data[direct]['COUNTERS'] = parse_counters(msg.get_attr(f'CTA_COUNTERS_{direct}')) + + return data + + +def worker(ct: conntrack.Conntrack, shutdown_event: multiprocessing.Event, conf_event: Dict): + """ + Main function of parser worker process + """ + process_name = multiprocessing.current_process().name + logger.debug(f'[{process_name}] started') + timeout = 0.1 + while not shutdown_event.is_set(): + if not ct.buffer_queue.empty(): + try: + for msg in ct.get(): + parsed_event = parse_conntrack_event(msg, conf_event) + if parsed_event: + message = format_event_message(parsed_event) + if logger.level == logging.DEBUG: + logger.debug(f"[{process_name}]: {message} raw: {msg}") + else: + logger.info(message) + except queue.Full: + logger.error("Conntrack message queue if full.") + except Exception as e: + logger.error(f"Error in queue: {e.__class__} {e}") + else: + sleep(timeout) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-c', + '--config', + action='store', + help='Path to vyos-conntrack-logger configuration', + required=True, + type=Path) + + args = parser.parse_args() + try: + config = read_json(args.config) + except Exception as err: + logger.error(f'Configuration file "{args.config}" does not exist or malformed: {err}') + exit(1) + + set_log_level(config.get('log_level', 'info')) + + signal.signal(signal.SIGHUP, sig_handler) + signal.signal(signal.SIGTERM, sig_handler) + + if 'event' in config: + event_groups = list(config.get('event').keys()) + else: + logger.error(f'Configuration is wrong. Event filter is empty.') + exit(1) + + conf_event = config['event'] + qsize = config.get('queue_size') + ct = conntrack.Conntrack(async_qsize=int(qsize) if qsize else None) + ct.buffer_queue = multiprocessing.Queue(ct.async_qsize) + ct.bind(async_cache=True) + + for name in event_groups: + if group := EVENT_NAME_TO_GROUP.get(name): + ct.add_membership(group) + else: + logger.error(f'Unexpected event group {name}') + processes = list() + try: + for _ in range(multiprocessing.cpu_count()): + p = multiprocessing.Process(target=worker, args=(ct, + shutdown_event, + conf_event)) + processes.append(p) + p.start() + logger.info('Conntrack socket bound and listening for messages.') + + while not shutdown_event.is_set(): + if not ct.pthread.is_alive(): + if ct.buffer_queue.qsize()/ct.async_qsize < 0.9: + if not shutdown_event.is_set(): + logger.debug('Restart listener thread') + # restart listener thread after queue overloaded when queue size low than 90% + ct.pthread = threading.Thread( + name="Netlink async cache", target=ct.async_recv + ) + ct.pthread.daemon = True + ct.pthread.start() + else: + sleep(0.1) + finally: + for p in processes: + p.join() + if not p.is_alive(): + logger.debug(f"[{p.name}]: finished") + ct.close() + logging.info("Conntrack socket closed.") + exit() diff --git a/src/services/vyos-hostsd b/src/services/vyos-hostsd new file mode 100644 index 0000000..1ba9047 --- /dev/null +++ b/src/services/vyos-hostsd @@ -0,0 +1,651 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2019-2023 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +######### +# USAGE # +######### +# This daemon listens on its socket for JSON messages. +# The received message format is: +# +# { 'type': '<message type>', +# 'op': '<message operation>', +# 'data': <data list or dict> +# } +# +# For supported message types, see below. +# 'op' can be 'add', delete', 'get', 'set' or 'apply'. +# Different message types support different sets of operations and different +# data formats. +# +# Changes to configuration made via add or delete don't take effect immediately, +# they are remembered in a state variable and saved to disk to a state file. +# State is remembered across daemon restarts but not across system reboots +# as it's saved in a temporary filesystem (/run). +# +# 'apply' is a special operation that applies the configuration from the cached +# state, rendering all config files and reloading relevant daemons (currently +# just pdns-recursor via rec-control). +# +# note: 'add' operation also acts as 'update' as it uses dict.update, if the +# 'data' dict item value is a dict. If it is a list, it uses list.append. +# +### tags +# Tags can be arbitrary, but they are generally in this format: +# 'static', 'system', 'dhcp(v6)-<intf>' or 'dhcp-server-<client ip>' +# They are used to distinguish entries created by different scripts so they can +# be removed and recreated without having to track what needs to be changed. +# They are also used as a way to control which tags settings (e.g. nameservers) +# get added to various config files via name_server_tags_(recursor|system) +# +### name_server_tags_(recursor|system) +# A list of tags whose nameservers and search domains is used to generate +# /etc/resolv.conf and pdns-recursor config. +# system list is used to generate resolv.conf. +# recursor list is used to generate pdns-rec forward-zones. +# When generating each file, the order of nameservers is as per the order of +# name_server_tags (the order in which tags were added), then the order in +# which the name servers for each tag were added. +# +#### Message types +# +### name_servers +# +# { 'type': 'name_servers', +# 'op': 'add', +# 'data': { +# '<str tag>': ['<str nameserver>', ...], +# ... +# } +# } +# +# { 'type': 'name_servers', +# 'op': 'delete', +# 'data': ['<str tag>', ...] +# } +# +# { 'type': 'name_servers', +# 'op': 'get', +# 'tag_regex': '<str regex>' +# } +# response: +# { 'data': { +# '<str tag>': ['<str nameserver>', ...], +# ... +# } +# } +# +### name_server_tags +# +# { 'type': 'name_server_tags', +# 'op': 'add', +# 'data': ['<str tag>', ...] +# } +# +# { 'type': 'name_server_tags', +# 'op': 'delete', +# 'data': ['<str tag>', ...] +# } +# +# { 'type': 'name_server_tags', +# 'op': 'get', +# } +# response: +# { 'data': ['<str tag>', ...] } +# +### forward_zones +## Additional zones added to pdns-recursor forward-zones-file. +## If recursion_desired is true, '+' will be prepended to the zone line. +## If addnta is true, a NTA (Negative Trust Anchor) will be added via +## lua-config-file. +# +# { 'type': 'forward_zones', +# 'op': 'add', +# 'data': { +# '<str zone>': { +# 'server': ['<str nameserver>', ...], +# 'addnta': <bool>, +# 'recursion_desired': <bool> +# } +# ... +# } +# } +# +# { 'type': 'forward_zones', +# 'op': 'delete', +# 'data': ['<str zone>', ...] +# } +# +# { 'type': 'forward_zones', +# 'op': 'get', +# } +# response: +# { 'data': { +# '<str zone>': { ... }, +# ... +# } +# } +# +# +### authoritative_zones +## Additional zones hosted authoritatively by pdns-recursor. +## We add NTAs for these zones but do not do much else here. +# +# { 'type': 'authoritative_zones', +# 'op': 'add', +# 'data': ['<str zone>', ...] +# } +# +# { 'type': 'authoritative_zones', +# 'op': 'delete', +# 'data': ['<str zone>', ...] +# } +# +# { 'type': 'authoritative_zones', +# 'op': 'get', +# } +# response: +# { 'data': ['<str zone>', ...] } +# +# +### search_domains +# +# { 'type': 'search_domains', +# 'op': 'add', +# 'data': { +# '<str tag>': ['<str domain>', ...], +# ... +# } +# } +# +# { 'type': 'search_domains', +# 'op': 'delete', +# 'data': ['<str tag>', ...] +# } +# +# { 'type': 'search_domains', +# 'op': 'get', +# } +# response: +# { 'data': { +# '<str tag>': ['<str domain>', ...], +# ... +# } +# } +# +### hosts +# +# { 'type': 'hosts', +# 'op': 'add', +# 'data': { +# '<str tag>': { +# '<str host>': { +# 'address': '<str address>', +# 'aliases': ['<str alias>, ...] +# }, +# ... +# }, +# ... +# } +# } +# +# { 'type': 'hosts', +# 'op': 'delete', +# 'data': ['<str tag>', ...] +# } +# +# { 'type': 'hosts', +# 'op': 'get' +# 'tag_regex': '<str regex>' +# } +# response: +# { 'data': { +# '<str tag>': { +# '<str host>': { +# 'address': '<str address>', +# 'aliases': ['<str alias>, ...] +# }, +# ... +# }, +# ... +# } +# } +### host_name +# +# { 'type': 'host_name', +# 'op': 'set', +# 'data': { +# 'host_name': '<str hostname>' +# 'domain_name': '<str domainname>' +# } +# } + +import os +import sys +import time +import json +import signal +import traceback +import re +import logging +import zmq + +from voluptuous import Schema, MultipleInvalid, Required, Any +from collections import OrderedDict +from vyos.utils.file import makedir +from vyos.utils.permission import chown +from vyos.utils.permission import chmod_755 +from vyos.utils.process import popen +from vyos.utils.process import process_named_running +from vyos.template import render + +debug = True + +# Configure logging +logger = logging.getLogger(__name__) +# set stream as output +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) + +if debug: + logger.setLevel(logging.DEBUG) +else: + logger.setLevel(logging.INFO) + +RUN_DIR = "/run/vyos-hostsd" +STATE_FILE = os.path.join(RUN_DIR, "vyos-hostsd.state") +SOCKET_PATH = "ipc://" + os.path.join(RUN_DIR, 'vyos-hostsd.sock') + +RESOLV_CONF_FILE = '/etc/resolv.conf' +HOSTS_FILE = '/etc/hosts' + +PDNS_REC_USER_GROUP = 'pdns' +PDNS_REC_RUN_DIR = '/run/pdns-recursor' +PDNS_REC_LUA_CONF_FILE = f'{PDNS_REC_RUN_DIR}/recursor.vyos-hostsd.conf.lua' +PDNS_REC_ZONES_FILE = f'{PDNS_REC_RUN_DIR}/recursor.forward-zones.conf' + +STATE = { + "name_servers": {}, + "name_server_tags_recursor": [], + "name_server_tags_system": [], + "forward_zones": {}, + "authoritative_zones": [], + "hosts": {}, + "host_name": "vyos", + "domain_name": "", + "search_domains": {}, + "changes": 0 + } + +# the base schema that every received message must be in +base_schema = Schema({ + Required('op'): Any('add', 'delete', 'set', 'get', 'apply'), + 'type': Any('name_servers', + 'name_server_tags_recursor', 'name_server_tags_system', + 'forward_zones', 'authoritative_zones', 'search_domains', + 'hosts', 'host_name'), + 'data': Any(list, dict), + 'tag': str, + 'tag_regex': str + }) + +# more specific schemas +op_schema = Schema({ + 'op': str, + }, required=True) + +op_type_schema = op_schema.extend({ + 'type': str, + }, required=True) + +host_name_add_schema = op_type_schema.extend({ + 'data': { + 'host_name': str, + 'domain_name': Any(str, None) + } + }, required=True) + +data_dict_list_schema = op_type_schema.extend({ + 'data': { + str: [str] + } + }, required=True) + +data_list_schema = op_type_schema.extend({ + 'data': [str] + }, required=True) + +tag_regex_schema = op_type_schema.extend({ + 'tag_regex': str + }, required=True) + +forward_zone_add_schema = op_type_schema.extend({ + 'data': { + str: { + 'name_server': [str], + 'addnta': Any({}, None), + 'recursion_desired': Any({}, None), + } + } + }, required=False) + +hosts_add_schema = op_type_schema.extend({ + 'data': { + str: { + str: { + 'address': [str], + 'aliases': [str] + } + } + } + }, required=True) + + +# op and type to schema mapping +msg_schema_map = { + 'name_servers': { + 'add': data_dict_list_schema, + 'delete': data_list_schema, + 'get': tag_regex_schema + }, + 'name_server_tags_recursor': { + 'add': data_list_schema, + 'delete': data_list_schema, + 'get': op_type_schema + }, + 'name_server_tags_system': { + 'add': data_list_schema, + 'delete': data_list_schema, + 'get': op_type_schema + }, + 'forward_zones': { + 'add': forward_zone_add_schema, + 'delete': data_list_schema, + 'get': op_type_schema + }, + 'authoritative_zones': { + 'add': data_list_schema, + 'delete': data_list_schema, + 'get': op_type_schema + }, + 'search_domains': { + 'add': data_dict_list_schema, + 'delete': data_list_schema, + 'get': tag_regex_schema + }, + 'hosts': { + 'add': hosts_add_schema, + 'delete': data_list_schema, + 'get': tag_regex_schema + }, + 'host_name': { + 'set': host_name_add_schema + }, + None: { + 'apply': op_schema + } + } + +def validate_schema(data): + base_schema(data) + + try: + schema = msg_schema_map[data['type'] if 'type' in data else None][data['op']] + schema(data) + except KeyError: + raise ValueError(( + 'Invalid or unknown combination: ' + f'op: "{data["op"]}", type: "{data["type"]}"')) + + +def pdns_rec_control(command): + if not process_named_running('pdns_recursor'): + logger.info(f'pdns_recursor not running, not sending "{command}"') + return + + logger.info(f'Running "rec_control {command}"') + (ret,ret_code) = popen(( + f"rec_control --socket-dir={PDNS_REC_RUN_DIR} {command}")) + if ret_code > 0: + logger.exception(( + f'"rec_control {command}" failed with exit status {ret_code}, ' + f'output: "{ret}"')) + +def make_resolv_conf(state): + logger.info(f"Writing {RESOLV_CONF_FILE}") + render(RESOLV_CONF_FILE, 'vyos-hostsd/resolv.conf.j2', state, + user='root', group='root') + +def make_hosts(state): + logger.info(f"Writing {HOSTS_FILE}") + render(HOSTS_FILE, 'vyos-hostsd/hosts.j2', state, + user='root', group='root') + +def make_pdns_rec_conf(state): + logger.info(f"Writing {PDNS_REC_LUA_CONF_FILE}") + + # on boot, /run/pdns-recursor does not exist, so create it + makedir(PDNS_REC_RUN_DIR, user=PDNS_REC_USER_GROUP, group=PDNS_REC_USER_GROUP) + chmod_755(PDNS_REC_RUN_DIR) + + render(PDNS_REC_LUA_CONF_FILE, + 'dns-forwarding/recursor.vyos-hostsd.conf.lua.j2', + state, user=PDNS_REC_USER_GROUP, group=PDNS_REC_USER_GROUP) + + logger.info(f"Writing {PDNS_REC_ZONES_FILE}") + render(PDNS_REC_ZONES_FILE, + 'dns-forwarding/recursor.forward-zones.conf.j2', + state, user=PDNS_REC_USER_GROUP, group=PDNS_REC_USER_GROUP) + +def set_host_name(state, data): + if data['host_name']: + state['host_name'] = data['host_name'] + if 'domain_name' in data: + state['domain_name'] = data['domain_name'] + +def add_items_to_dict(_dict, items): + """ + Dedupes and preserves sort order. + """ + assert isinstance(_dict, dict) + assert isinstance(items, dict) + + if not items: + return + + _dict.update(items) + +def add_items_to_dict_as_keys(_dict, items): + """ + Added item values are converted to OrderedDict with the value as keys + and null values. This is to emulate a list but with inherent deduplication. + Dedupes and preserves sort order. + """ + assert isinstance(_dict, dict) + assert isinstance(items, dict) + + if not items: + return + + for item, item_val in items.items(): + if item not in _dict: + _dict[item] = OrderedDict({}) + _dict[item].update(OrderedDict.fromkeys(item_val)) + +def add_items_to_list(_list, items): + """ + Dedupes and preserves sort order. + """ + assert isinstance(_list, list) + assert isinstance(items, list) + + if not items: + return + + for item in items: + if item not in _list: + _list.append(item) + +def delete_items_from_dict(_dict, items): + """ + items is a list of keys to delete. + Doesn't error if the key doesn't exist. + """ + assert isinstance(_dict, dict) + assert isinstance(items, list) + + for item in items: + if item in _dict: + del _dict[item] + +def delete_items_from_list(_list, items): + """ + items is a list of items to remove. + Doesn't error if the key doesn't exist. + """ + assert isinstance(_list, list) + assert isinstance(items, list) + + for item in items: + if item in _list: + _list.remove(item) + +def get_items_from_dict_regex(_dict, item_regex_string): + """ + Returns the items whose keys match item_regex_string. + """ + assert isinstance(_dict, dict) + assert isinstance(item_regex_string, str) + + tmp = {} + regex = re.compile(item_regex_string) + for item in _dict: + if regex.match(item): + tmp[item] = _dict[item] + return tmp + +def get_option(msg, key): + if key in msg: + return msg[key] + else: + raise ValueError("Missing required option \"{0}\"".format(key)) + +def handle_message(msg): + result = None + op = get_option(msg, 'op') + + if op in ['add', 'delete', 'set']: + STATE['changes'] += 1 + + if op == 'delete': + _type = get_option(msg, 'type') + data = get_option(msg, 'data') + if _type in ['name_servers', 'forward_zones', 'search_domains', 'hosts']: + delete_items_from_dict(STATE[_type], data) + elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'authoritative_zones']: + delete_items_from_list(STATE[_type], data) + else: + raise ValueError(f'Operation "{op}" unknown data type "{_type}"') + elif op == 'add': + _type = get_option(msg, 'type') + data = get_option(msg, 'data') + if _type in ['name_servers', 'search_domains']: + add_items_to_dict_as_keys(STATE[_type], data) + elif _type in ['forward_zones', 'hosts']: + add_items_to_dict(STATE[_type], data) + # maybe we need to rec_control clear-nta each domain that was removed here? + elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'authoritative_zones']: + add_items_to_list(STATE[_type], data) + else: + raise ValueError(f'Operation "{op}" unknown data type "{_type}"') + elif op == 'set': + _type = get_option(msg, 'type') + data = get_option(msg, 'data') + if _type == 'host_name': + set_host_name(STATE, data) + else: + raise ValueError(f'Operation "{op}" unknown data type "{_type}"') + elif op == 'get': + _type = get_option(msg, 'type') + if _type in ['name_servers', 'search_domains', 'hosts']: + tag_regex = get_option(msg, 'tag_regex') + result = get_items_from_dict_regex(STATE[_type], tag_regex) + elif _type in ['name_server_tags_recursor', 'name_server_tags_system', 'forward_zones', 'authoritative_zones']: + result = STATE[_type] + else: + raise ValueError(f'Operation "{op}" unknown data type "{_type}"') + elif op == 'apply': + logger.info(f"Applying {STATE['changes']} changes") + make_resolv_conf(STATE) + make_hosts(STATE) + make_pdns_rec_conf(STATE) + pdns_rec_control('reload-lua-config') + pdns_rec_control('reload-zones') + logger.info("Success") + result = {'message': f'Applied {STATE["changes"]} changes'} + STATE['changes'] = 0 + + else: + raise ValueError(f"Unknown operation {op}") + + logger.debug(f"Saving state to {STATE_FILE}") + with open(STATE_FILE, 'w') as f: + json.dump(STATE, f) + + return result + +if __name__ == '__main__': + # Create a directory for state checkpoints + os.makedirs(RUN_DIR, exist_ok=True) + if os.path.exists(STATE_FILE): + with open(STATE_FILE, 'r') as f: + try: + STATE = json.load(f) + except: + logger.exception(traceback.format_exc()) + logger.exception("Failed to load the state file, using default") + + context = zmq.Context() + socket = context.socket(zmq.REP) + + # Set the right permissions on the socket, then change it back + o_mask = os.umask(0o000) + socket.bind(SOCKET_PATH) + os.umask(o_mask) + + while True: + # Wait for next request from client + msg_json = socket.recv().decode() + logger.debug(f"Request data: {msg_json}") + + try: + msg = json.loads(msg_json) + validate_schema(msg) + + resp = {} + resp['data'] = handle_message(msg) + except ValueError as e: + resp['error'] = str(e) + except MultipleInvalid as e: + # raised by schema + resp['error'] = f'Invalid message: {str(e)}' + logger.exception(resp['error']) + except: + logger.exception(traceback.format_exc()) + resp['error'] = "Internal error" + + # Send reply back to client + socket.send(json.dumps(resp).encode()) + logger.debug(f"Sent response: {resp}") diff --git a/src/services/vyos-http-api-server b/src/services/vyos-http-api-server new file mode 100644 index 0000000..9110041 --- /dev/null +++ b/src/services/vyos-http-api-server @@ -0,0 +1,1036 @@ +#!/usr/share/vyos-http-api-tools/bin/python3 +# +# Copyright (C) 2019-2024 VyOS maintainers and contributors +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 or later as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import sys +import grp +import copy +import json +import logging +import signal +import traceback +import threading +from enum import Enum + +from time import sleep +from typing import List, Union, Callable, Dict, Self + +from fastapi import FastAPI, Depends, Request, Response, HTTPException +from fastapi import BackgroundTasks +from fastapi.responses import HTMLResponse +from fastapi.exceptions import RequestValidationError +from fastapi.routing import APIRoute +from pydantic import BaseModel, StrictStr, validator, model_validator +from starlette.middleware.cors import CORSMiddleware +from starlette.datastructures import FormData +from starlette.formparsers import FormParser, MultiPartParser +from multipart.multipart import parse_options_header +from uvicorn import Config as UvicornConfig +from uvicorn import Server as UvicornServer + +from ariadne.asgi import GraphQL + +from vyos.config import Config +from vyos.configtree import ConfigTree +from vyos.configdiff import get_config_diff +from vyos.configsession import ConfigSession +from vyos.configsession import ConfigSessionError +from vyos.defaults import api_config_state + +import api.graphql.state + +CFG_GROUP = 'vyattacfg' + +debug = True + +logger = logging.getLogger(__name__) +logs_handler = logging.StreamHandler() +logger.addHandler(logs_handler) + +if debug: + logger.setLevel(logging.DEBUG) +else: + logger.setLevel(logging.INFO) + +# Giant lock! +lock = threading.Lock() + +def load_server_config(): + with open(api_config_state) as f: + config = json.load(f) + return config + +def check_auth(key_list, key): + key_id = None + for k in key_list: + if k['key'] == key: + key_id = k['id'] + return key_id + +def error(code, msg): + resp = {"success": False, "error": msg, "data": None} + resp = json.dumps(resp) + return HTMLResponse(resp, status_code=code) + +def success(data): + resp = {"success": True, "data": data, "error": None} + resp = json.dumps(resp) + return HTMLResponse(resp) + +# Pydantic models for validation +# Pydantic will cast when possible, so use StrictStr +# validators added as needed for additional constraints +# schema_extra adds anotations to OpenAPI, to add examples + +class ApiModel(BaseModel): + key: StrictStr + +class BasePathModel(BaseModel): + op: StrictStr + path: List[StrictStr] + + @validator("path") + def check_non_empty(cls, path): + if not len(path) > 0: + raise ValueError('path must be non-empty') + return path + +class BaseConfigureModel(BasePathModel): + value: StrictStr = None + +class ConfigureModel(ApiModel, BaseConfigureModel): + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "set | delete | comment", + "path": ['config', 'mode', 'path'], + } + } + +class ConfigureListModel(ApiModel): + commands: List[BaseConfigureModel] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "commands": "list of commands", + } + } + +class BaseConfigSectionModel(BasePathModel): + section: Dict + +class ConfigSectionModel(ApiModel, BaseConfigSectionModel): + pass + +class ConfigSectionListModel(ApiModel): + commands: List[BaseConfigSectionModel] + +class BaseConfigSectionTreeModel(BaseModel): + op: StrictStr + mask: Dict + config: Dict + +class ConfigSectionTreeModel(ApiModel, BaseConfigSectionTreeModel): + pass + +class RetrieveModel(ApiModel): + op: StrictStr + path: List[StrictStr] + configFormat: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "returnValue | returnValues | exists | showConfig", + "path": ['config', 'mode', 'path'], + "configFormat": "json (default) | json_ast | raw", + + } + } + +class ConfigFileModel(ApiModel): + op: StrictStr + file: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "save | load", + "file": "filename", + } + } + + +class ImageOp(str, Enum): + add = "add" + delete = "delete" + show = "show" + set_default = "set_default" + + +class ImageModel(ApiModel): + op: ImageOp + url: StrictStr = None + name: StrictStr = None + + @model_validator(mode='after') + def check_data(self) -> Self: + if self.op == 'add': + if not self.url: + raise ValueError("Missing required field \"url\"") + elif self.op in ['delete', 'set_default']: + if not self.name: + raise ValueError("Missing required field \"name\"") + + return self + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "add | delete | show | set_default", + "url": "imagelocation", + "name": "imagename", + } + } + +class ImportPkiModel(ApiModel): + op: StrictStr + path: List[StrictStr] + passphrase: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "import_pki", + "path": ["op", "mode", "path"], + "passphrase": "passphrase", + } + } + + +class ContainerImageModel(ApiModel): + op: StrictStr + name: StrictStr = None + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "add | delete | show", + "name": "imagename", + } + } + +class GenerateModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "generate", + "path": ["op", "mode", "path"], + } + } + +class ShowModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "show", + "path": ["op", "mode", "path"], + } + } + +class RebootModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "reboot", + "path": ["op", "mode", "path"], + } + } + +class ResetModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "reset", + "path": ["op", "mode", "path"], + } + } + +class PoweroffModel(ApiModel): + op: StrictStr + path: List[StrictStr] + + class Config: + schema_extra = { + "example": { + "key": "id_key", + "op": "poweroff", + "path": ["op", "mode", "path"], + } + } + + +class Success(BaseModel): + success: bool + data: Union[str, bool, Dict] + error: str + +class Error(BaseModel): + success: bool = False + data: Union[str, bool, Dict] + error: str + +responses = { + 200: {'model': Success}, + 400: {'model': Error}, + 422: {'model': Error, 'description': 'Validation Error'}, + 500: {'model': Error} +} + +def auth_required(data: ApiModel): + key = data.key + api_keys = app.state.vyos_keys + key_id = check_auth(api_keys, key) + if not key_id: + raise HTTPException(status_code=401, detail="Valid API key is required") + app.state.vyos_id = key_id + +# override Request and APIRoute classes in order to convert form request to json; +# do all explicit validation here, for backwards compatability of error messages; +# the explicit validation may be dropped, if desired, in favor of native +# validation by FastAPI/Pydantic, as is used for application/json requests +class MultipartRequest(Request): + _form_err = () + @property + def form_err(self): + return self._form_err + + @form_err.setter + def form_err(self, val): + if not self._form_err: + self._form_err = val + + @property + def orig_headers(self): + self._orig_headers = super().headers + return self._orig_headers + + @property + def headers(self): + self._headers = super().headers.mutablecopy() + self._headers['content-type'] = 'application/json' + return self._headers + + async def form(self) -> FormData: + if self._form is None: + assert ( + parse_options_header is not None + ), "The `python-multipart` library must be installed to use form parsing." + content_type_header = self.orig_headers.get("Content-Type") + content_type, options = parse_options_header(content_type_header) + if content_type == b"multipart/form-data": + multipart_parser = MultiPartParser(self.orig_headers, self.stream()) + self._form = await multipart_parser.parse() + elif content_type == b"application/x-www-form-urlencoded": + form_parser = FormParser(self.orig_headers, self.stream()) + self._form = await form_parser.parse() + else: + self._form = FormData() + return self._form + + async def body(self) -> bytes: + if not hasattr(self, "_body"): + forms = {} + merge = {} + body = await super().body() + self._body = body + + form_data = await self.form() + if form_data: + endpoint = self.url.path + logger.debug("processing form data") + for k, v in form_data.multi_items(): + forms[k] = v + + if 'data' not in forms: + self.form_err = (422, "Non-empty data field is required") + return self._body + else: + try: + tmp = json.loads(forms['data']) + except json.JSONDecodeError as e: + self.form_err = (400, f'Failed to parse JSON: {e}') + return self._body + if isinstance(tmp, list): + merge['commands'] = tmp + else: + merge = tmp + + if 'commands' in merge: + cmds = merge['commands'] + else: + cmds = copy.deepcopy(merge) + cmds = [cmds] + + for c in cmds: + if not isinstance(c, dict): + self.form_err = (400, + f"Malformed command '{c}': any command must be JSON of dict") + return self._body + if 'op' not in c: + self.form_err = (400, + f"Malformed command '{c}': missing 'op' field") + if endpoint not in ('/config-file', '/container-image', + '/image', '/configure-section'): + if 'path' not in c: + self.form_err = (400, + f"Malformed command '{c}': missing 'path' field") + elif not isinstance(c['path'], list): + self.form_err = (400, + f"Malformed command '{c}': 'path' field must be a list") + elif not all(isinstance(el, str) for el in c['path']): + self.form_err = (400, + f"Malformed command '{0}': 'path' field must be a list of strings") + if endpoint in ('/configure'): + if not c['path']: + self.form_err = (400, + f"Malformed command '{c}': 'path' list must be non-empty") + if 'value' in c and not isinstance(c['value'], str): + self.form_err = (400, + f"Malformed command '{c}': 'value' field must be a string") + if endpoint in ('/configure-section'): + if 'section' not in c and 'config' not in c: + self.form_err = (400, + f"Malformed command '{c}': missing 'section' or 'config' field") + + if 'key' not in forms and 'key' not in merge: + self.form_err = (401, "Valid API key is required") + if 'key' in forms and 'key' not in merge: + merge['key'] = forms['key'] + + new_body = json.dumps(merge) + new_body = new_body.encode() + self._body = new_body + + return self._body + +class MultipartRoute(APIRoute): + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + request = MultipartRequest(request.scope, request.receive) + try: + response: Response = await original_route_handler(request) + except HTTPException as e: + return error(e.status_code, e.detail) + except Exception as e: + form_err = request.form_err + if form_err: + return error(*form_err) + raise e + + return response + + return custom_route_handler + +app = FastAPI(debug=True, + title="VyOS API", + version="0.1.0", + responses={**responses}, + dependencies=[Depends(auth_required)]) + +app.router.route_class = MultipartRoute + +@app.exception_handler(RequestValidationError) +async def validation_exception_handler(request, exc): + return error(400, str(exc.errors()[0])) + +self_ref_msg = "Requested HTTP API server configuration change; commit will be called in the background" + +def call_commit(s: ConfigSession): + try: + s.commit() + except ConfigSessionError as e: + s.discard() + if app.state.vyos_debug: + logger.warning(f"ConfigSessionError:\n {traceback.format_exc()}") + else: + logger.warning(f"ConfigSessionError: {e}") + +def _configure_op(data: Union[ConfigureModel, ConfigureListModel, + ConfigSectionModel, ConfigSectionListModel, + ConfigSectionTreeModel], + request: Request, background_tasks: BackgroundTasks): + session = app.state.vyos_session + env = session.get_session_env() + + endpoint = request.url.path + + # Allow users to pass just one command + if not isinstance(data, (ConfigureListModel, ConfigSectionListModel)): + data = [data] + else: + data = data.commands + + # We don't want multiple people/apps to be able to commit at once, + # or modify the shared session while someone else is doing the same, + # so the lock is really global + lock.acquire() + + config = Config(session_env=env) + + status = 200 + msg = None + error_msg = None + try: + for c in data: + op = c.op + if not isinstance(c, BaseConfigSectionTreeModel): + path = c.path + + if isinstance(c, BaseConfigureModel): + if c.value: + value = c.value + else: + value = "" + # For vyos.configsession calls that have no separate value arguments, + # and for type checking too + cfg_path = " ".join(path + [value]).strip() + + elif isinstance(c, BaseConfigSectionModel): + section = c.section + + elif isinstance(c, BaseConfigSectionTreeModel): + mask = c.mask + config = c.config + + if isinstance(c, BaseConfigureModel): + if op == 'set': + session.set(path, value=value) + elif op == 'delete': + if app.state.vyos_strict and not config.exists(cfg_path): + raise ConfigSessionError(f"Cannot delete [{cfg_path}]: path/value does not exist") + session.delete(path, value=value) + elif op == 'comment': + session.comment(path, value=value) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + + elif isinstance(c, BaseConfigSectionModel): + if op == 'set': + session.set_section(path, section) + elif op == 'load': + session.load_section(path, section) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + + elif isinstance(c, BaseConfigSectionTreeModel): + if op == 'set': + session.set_section_tree(config) + elif op == 'load': + session.load_section_tree(mask, config) + else: + raise ConfigSessionError(f"'{op}' is not a valid operation") + # end for + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, session) + msg = self_ref_msg + else: + # capture non-fatal warnings + out = session.commit() + msg = out if out else msg + + logger.info(f"Configuration modified via HTTP API using key '{app.state.vyos_id}'") + except ConfigSessionError as e: + session.discard() + status = 400 + if app.state.vyos_debug: + logger.critical(f"ConfigSessionError:\n {traceback.format_exc()}") + error_msg = str(e) + except Exception as e: + session.discard() + logger.critical(traceback.format_exc()) + status = 500 + + # Don't give the details away to the outer world + error_msg = "An internal error occured. Check the logs for details." + finally: + lock.release() + + if status != 200: + return error(status, error_msg) + + return success(msg) + +def create_path_import_pki_no_prompt(path): + correct_paths = ['ca', 'certificate', 'key-pair'] + if path[1] not in correct_paths: + return False + path[1] = '--' + path[1].replace('-', '') + path[3] = '--key-filename' + return path[1:] + +@app.post('/configure') +def configure_op(data: Union[ConfigureModel, + ConfigureListModel], + request: Request, background_tasks: BackgroundTasks): + return _configure_op(data, request, background_tasks) + +@app.post('/configure-section') +def configure_section_op(data: Union[ConfigSectionModel, + ConfigSectionListModel, + ConfigSectionTreeModel], + request: Request, background_tasks: BackgroundTasks): + return _configure_op(data, request, background_tasks) + +@app.post("/retrieve") +async def retrieve_op(data: RetrieveModel): + session = app.state.vyos_session + env = session.get_session_env() + config = Config(session_env=env) + + op = data.op + path = " ".join(data.path) + + try: + if op == 'returnValue': + res = config.return_value(path) + elif op == 'returnValues': + res = config.return_values(path) + elif op == 'exists': + res = config.exists(path) + elif op == 'showConfig': + config_format = 'json' + if data.configFormat: + config_format = data.configFormat + + res = session.show_config(path=data.path) + if config_format == 'json': + config_tree = ConfigTree(res) + res = json.loads(config_tree.to_json()) + elif config_format == 'json_ast': + config_tree = ConfigTree(res) + res = json.loads(config_tree.to_json_ast()) + elif config_format == 'raw': + pass + else: + return error(400, f"'{config_format}' is not a valid config format") + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/config-file') +def config_file_op(data: ConfigFileModel, background_tasks: BackgroundTasks): + session = app.state.vyos_session + env = session.get_session_env() + op = data.op + msg = None + + try: + if op == 'save': + if data.file: + path = data.file + else: + path = '/config/config.boot' + msg = session.save_config(path) + elif op == 'load': + if data.file: + path = data.file + else: + return error(400, "Missing required field \"file\"") + + session.migrate_and_load_config(path) + + config = Config(session_env=env) + d = get_config_diff(config) + + if d.is_node_changed(['service', 'https']): + background_tasks.add_task(call_commit, session) + msg = self_ref_msg + else: + session.commit() + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(msg) + +@app.post('/image') +def image_op(data: ImageModel): + session = app.state.vyos_session + + op = data.op + + try: + if op == 'add': + res = session.install_image(data.url) + elif op == 'delete': + res = session.remove_image(data.name) + elif op == 'show': + res = session.show(["system", "image"]) + elif op == 'set_default': + res = session.set_default_image(data.name) + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/container-image') +def container_image_op(data: ContainerImageModel): + session = app.state.vyos_session + + op = data.op + + try: + if op == 'add': + if data.name: + name = data.name + else: + return error(400, "Missing required field \"name\"") + res = session.add_container_image(name) + elif op == 'delete': + if data.name: + name = data.name + else: + return error(400, "Missing required field \"name\"") + res = session.delete_container_image(name) + elif op == 'show': + res = session.show_container_image() + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/generate') +def generate_op(data: GenerateModel): + session = app.state.vyos_session + + op = data.op + path = data.path + + try: + if op == 'generate': + res = session.generate(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/show') +def show_op(data: ShowModel): + session = app.state.vyos_session + + op = data.op + path = data.path + + try: + if op == 'show': + res = session.show(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/reboot') +def reboot_op(data: RebootModel): + session = app.state.vyos_session + + op = data.op + path = data.path + + try: + if op == 'reboot': + res = session.reboot(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/reset') +def reset_op(data: ResetModel): + session = app.state.vyos_session + + op = data.op + path = data.path + + try: + if op == 'reset': + res = session.reset(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + +@app.post('/import-pki') +def import_pki(data: ImportPkiModel): + session = app.state.vyos_session + + op = data.op + path = data.path + + lock.acquire() + + try: + if op == 'import-pki': + # need to get rid or interactive mode for private key + if len(path) == 5 and path[3] in ['key-file', 'private-key']: + path_no_prompt = create_path_import_pki_no_prompt(path) + if not path_no_prompt: + return error(400, f"Invalid command: {' '.join(path)}") + if data.passphrase: + path_no_prompt += ['--passphrase', data.passphrase] + res = session.import_pki_no_prompt(path_no_prompt) + else: + res = session.import_pki(path) + if not res[0].isdigit(): + return error(400, res) + # commit changes + session.commit() + res = res.split('. ')[0] + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + finally: + lock.release() + + return success(res) + +@app.post('/poweroff') +def poweroff_op(data: PoweroffModel): + session = app.state.vyos_session + + op = data.op + path = data.path + + try: + if op == 'poweroff': + res = session.poweroff(path) + else: + return error(400, f"'{op}' is not a valid operation") + except ConfigSessionError as e: + return error(400, str(e)) + except Exception as e: + logger.critical(traceback.format_exc()) + return error(500, "An internal error occured. Check the logs for details.") + + return success(res) + + +### +# GraphQL integration +### + +def graphql_init(app: FastAPI = app): + from api.graphql.libs.token_auth import get_user_context + api.graphql.state.init() + api.graphql.state.settings['app'] = app + + # import after initializaion of state + from api.graphql.bindings import generate_schema + schema = generate_schema() + + in_spec = app.state.vyos_introspection + + if app.state.vyos_origins: + origins = app.state.vyos_origins + app.add_route('/graphql', CORSMiddleware(GraphQL(schema, + context_value=get_user_context, + debug=True, + introspection=in_spec), + allow_origins=origins, + allow_methods=("GET", "POST", "OPTIONS"), + allow_headers=("Authorization",))) + else: + app.add_route('/graphql', GraphQL(schema, + context_value=get_user_context, + debug=True, + introspection=in_spec)) +### +# Modify uvicorn to allow reloading server within the configsession +### + +server = None +shutdown = False + +class ApiServerConfig(UvicornConfig): + pass + +class ApiServer(UvicornServer): + def install_signal_handlers(self): + pass + +def reload_handler(signum, frame): + global server + logger.debug('Reload signal received...') + if server is not None: + server.handle_exit(signum, frame) + server = None + logger.info('Server stopping for reload...') + else: + logger.warning('Reload called for non-running server...') + +def shutdown_handler(signum, frame): + global shutdown + logger.debug('Shutdown signal received...') + server.handle_exit(signum, frame) + logger.info('Server shutdown...') + shutdown = True + +def flatten_keys(d: dict) -> list[dict]: + keys_list = [] + for el in list(d['keys'].get('id', {})): + key = d['keys']['id'][el].get('key', '') + if key: + keys_list.append({'id': el, 'key': key}) + return keys_list + +def initialization(session: ConfigSession, app: FastAPI = app): + global server + try: + server_config = load_server_config() + except Exception as e: + logger.critical(f'Failed to load the HTTP API server config: {e}') + sys.exit(1) + + app.state.vyos_session = session + app.state.vyos_keys = [] + + if 'keys' in server_config: + app.state.vyos_keys = flatten_keys(server_config) + + app.state.vyos_debug = bool('debug' in server_config) + app.state.vyos_strict = bool('strict' in server_config) + app.state.vyos_origins = server_config.get('cors', {}).get('allow_origin', []) + if 'graphql' in server_config: + app.state.vyos_graphql = True + if isinstance(server_config['graphql'], dict): + if 'introspection' in server_config['graphql']: + app.state.vyos_introspection = True + else: + app.state.vyos_introspection = False + # default values if not set explicitly + app.state.vyos_auth_type = server_config['graphql']['authentication']['type'] + app.state.vyos_token_exp = server_config['graphql']['authentication']['expiration'] + app.state.vyos_secret_len = server_config['graphql']['authentication']['secret_length'] + else: + app.state.vyos_graphql = False + + if app.state.vyos_graphql: + graphql_init(app) + + config = ApiServerConfig(app, uds="/run/api.sock", proxy_headers=True) + server = ApiServer(config) + +def run_server(): + try: + server.run() + except OSError as e: + logger.critical(e) + sys.exit(1) + +if __name__ == '__main__': + # systemd's user and group options don't work, do it by hand here, + # else no one else will be able to commit + cfg_group = grp.getgrnam(CFG_GROUP) + os.setgid(cfg_group.gr_gid) + + # Need to set file permissions to 775 too so that every vyattacfg group member + # has write access to the running config + os.umask(0o002) + + signal.signal(signal.SIGHUP, reload_handler) + signal.signal(signal.SIGTERM, shutdown_handler) + + config_session = ConfigSession(os.getpid()) + + while True: + logger.debug('Enter main loop...') + if shutdown: + break + if server is None: + initialization(config_session) + server.run() + sleep(1) |