summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/config/cc_resizefs_vyos.py356
-rw-r--r--cloudinit/config/cc_vyos.py624
-rw-r--r--cloudinit/config/cc_vyos_userdata.py216
3 files changed, 1196 insertions, 0 deletions
diff --git a/cloudinit/config/cc_resizefs_vyos.py b/cloudinit/config/cc_resizefs_vyos.py
new file mode 100644
index 00000000..b54f2e27
--- /dev/null
+++ b/cloudinit/config/cc_resizefs_vyos.py
@@ -0,0 +1,356 @@
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Resizefs_vyos: cloud-config module which resizes filesystems"""
+
+import errno
+import os
+import stat
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.settings import PER_ALWAYS
+
+NOBLOCK = "noblock"
+RESIZEFS_LIST_DEFAULT = ['/']
+
+frequency = PER_ALWAYS
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_resizefs_vyos",
+ "name": "Resizefs_vyos",
+ "title": "Resize filesystems",
+ "description": dedent(
+ """\
+ Resize filesystems to use all avaliable space on partition. This
+ module is useful along with ``cc_growpart`` and will ensure that if a
+ partition has been resized the filesystem will be resized
+ along with it. By default, ``cc_resizefs`` will resize the root
+ partition and will block the boot process while the resize command is
+ running. Optionally, the resize operation can be performed in the
+ background while cloud-init continues running modules. This can be
+ enabled by setting ``resizefs_enabled`` to ``true``. This module can
+ be disabled altogether by setting ``resizefs_enabled`` to ``false``."""
+ ),
+ "distros": distros,
+ "examples": [
+ "resizefs_enabled: false # disable filesystems resize operation",
+ "resizefs_list: [\"/\", \"/dev/vda1\"]"],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "resizefs_enabled": {
+ "enum": [True, False, NOBLOCK],
+ "description": dedent(
+ """\
+ Whether to resize the partitions. Default: 'true'"""
+ ),
+ },
+ "resizefs_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "additionalItems": False, # Reject items non-string
+ "description": dedent(
+ """\
+ List of partitions filesystems on which should be resized.
+ Default: '/'"""
+ )
+ }
+ },
+}
+
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+
+
+def _resize_btrfs(mount_point, devpth):
+ # If "/" is ro resize will fail. However it should be allowed since resize
+ # makes everything bigger and subvolumes that are not ro will benefit.
+ # Use a subvolume that is not ro to trick the resize operation to do the
+ # "right" thing. The use of ".snapshot" is specific to "snapper" a generic
+ # solution would be walk the subvolumes and find a rw mounted subvolume.
+ if not util.mount_is_read_write(mount_point) and os.path.isdir(
+ "%s/.snapshots" % mount_point
+ ):
+ return (
+ "btrfs",
+ "filesystem",
+ "resize",
+ "max",
+ "%s/.snapshots" % mount_point,
+ )
+ else:
+ return ("btrfs", "filesystem", "resize", "max", mount_point)
+
+
+def _resize_ext(mount_point, devpth):
+ return ("resize2fs", devpth)
+
+
+def _resize_xfs(mount_point, devpth):
+ return ("xfs_growfs", mount_point)
+
+
+def _resize_ufs(mount_point, devpth):
+ return ("growfs", "-y", mount_point)
+
+
+def _resize_zfs(mount_point, devpth):
+ return ("zpool", "online", "-e", mount_point, devpth)
+
+
+def _resize_hammer2(mount_point, devpth):
+ return ("hammer2", "growfs", mount_point)
+
+
+def _can_skip_resize_ufs(mount_point, devpth):
+ # possible errors cases on the code-path to growfs -N following:
+ # https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c
+ # This is the "good" error:
+ skip_start = "growfs: requested size"
+ skip_contain = "is not larger than the current filesystem size"
+ # growfs exits with 1 for almost all cases up to this one.
+ # This means we can't just use rcs=[0, 1] as subp parameter:
+ try:
+ subp.subp(["growfs", "-N", devpth])
+ except subp.ProcessExecutionError as e:
+ if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
+ # This FS is already at the desired size
+ return True
+ else:
+ raise e
+ return False
+
+
+# Do not use a dictionary as these commands should be able to be used
+# for multiple filesystem types if possible, e.g. one command for
+# ext2, ext3 and ext4.
+RESIZE_FS_PREFIXES_CMDS = [
+ ("btrfs", _resize_btrfs),
+ ("ext", _resize_ext),
+ ("xfs", _resize_xfs),
+ ("ufs", _resize_ufs),
+ ("zfs", _resize_zfs),
+ ("hammer2", _resize_hammer2),
+]
+
+RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs}
+
+
+def can_skip_resize(fs_type, resize_what, devpth):
+ fstype_lc = fs_type.lower()
+ for i, func in RESIZE_FS_PRECHECK_CMDS.items():
+ if fstype_lc.startswith(i):
+ return func(resize_what, devpth)
+ return False
+
+
+def maybe_get_writable_device_path(devpath, info, log):
+ """Return updated devpath if the devpath is a writable block device.
+
+ @param devpath: Requested path to the root device we want to resize.
+ @param info: String representing information about the requested device.
+ @param log: Logger to which logs will be added upon error.
+
+ @returns devpath or updated devpath per kernel commandline if the device
+ path is a writable block device, returns None otherwise.
+ """
+ container = util.is_container()
+
+ # Ensure the path is a block device.
+ if (
+ devpath == "/dev/root"
+ and not os.path.exists(devpath)
+ and not container
+ ):
+ devpath = util.rootdev_from_cmdline(util.get_cmdline())
+ if devpath is None:
+ log.warning("Unable to find device '/dev/root'")
+ return None
+ log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
+
+ if devpath == "overlayroot":
+ log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
+ return None
+
+ # FreeBSD zpool can also just use gpt/<label>
+ # with that in mind we can not do an os.stat on "gpt/whatever"
+ # therefore return the devpath already here.
+ if devpath.startswith("gpt/"):
+ log.debug("We have a gpt label - just go ahead")
+ return devpath
+ # Alternatively, our device could simply be a name as returned by gpart,
+ # such as da0p3
+ if not devpath.startswith("/dev/") and not os.path.exists(devpath):
+ fulldevpath = "/dev/" + devpath.lstrip("/")
+ log.debug(
+ "'%s' doesn't appear to be a valid device path. Trying '%s'",
+ devpath,
+ fulldevpath,
+ )
+ devpath = fulldevpath
+
+ try:
+ statret = os.stat(devpath)
+ except OSError as exc:
+ if container and exc.errno == errno.ENOENT:
+ log.debug(
+ "Device '%s' did not exist in container. cannot resize: %s",
+ devpath,
+ info,
+ )
+ elif exc.errno == errno.ENOENT:
+ log.warning(
+ "Device '%s' did not exist. cannot resize: %s", devpath, info
+ )
+ else:
+ raise exc
+ return None
+
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
+ if container:
+ log.debug(
+ "device '%s' not a block device in container."
+ " cannot resize: %s" % (devpath, info)
+ )
+ else:
+ log.warning(
+ "device '%s' not a block device. cannot resize: %s"
+ % (devpath, info)
+ )
+ return None
+ return devpath # The writable block devpath
+
+
+def resize_fs(resize_what, log, resize_enabled):
+ result = util.get_mount_info(resize_what, log)
+ if not result:
+ log.warning("Could not determine filesystem type of %s", resize_what)
+ return
+
+ (devpth, fs_type, mount_point) = result
+
+ # if we have a zfs then our device path at this point
+ # is the zfs label. For example: vmzroot/ROOT/freebsd
+ # we will have to get the zpool name out of this
+ # and set the resize_what variable to the zpool
+ # so the _resize_zfs function gets the right attribute.
+ if fs_type == "zfs":
+ zpool = devpth.split("/")[0]
+ devpth = util.get_device_info_from_zpool(zpool)
+ if not devpth:
+ return # could not find device from zpool
+ resize_what = zpool
+
+ info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
+ log.debug("resize_info: %s" % info)
+
+ devpth = maybe_get_writable_device_path(devpth, info, log)
+ if not devpth:
+ return # devpath was not a writable block device
+
+ resizer = None
+ if can_skip_resize(fs_type, resize_what, devpth):
+ log.debug(
+ "Skip resize filesystem type %s for %s", fs_type, resize_what
+ )
+ return
+
+ fstype_lc = fs_type.lower()
+ for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
+ if fstype_lc.startswith(pfix):
+ resizer = root_cmd
+ break
+
+ if not resizer:
+ log.warning(
+ "Not resizing unknown filesystem type %s for %s",
+ fs_type,
+ resize_what,
+ )
+ return
+
+ resize_cmd = resizer(resize_what, devpth)
+ log.debug(
+ "Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
+ )
+
+ if resize_enabled == NOBLOCK:
+ # Fork to a child that will run
+ # the resize command
+ util.fork_cb(
+ util.log_time,
+ logfunc=log.debug,
+ msg="backgrounded Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+ else:
+ util.log_time(
+ logfunc=log.debug,
+ msg="Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+
+ action = "Resized"
+ if resize_enabled == NOBLOCK:
+ action = "Resizing (via forking)"
+ log.debug(
+ "%s filesystem on %s (type=%s, val=%s)", action, resize_what,
+ fs_type, resize_enabled
+ )
+
+
+def handle(name, cfg, _cloud, log, args):
+ if len(args) != 0:
+ resize_enabled = args[0]
+ else:
+ resize_enabled = util.get_cfg_option_str(cfg, "resizefs_enabled", True)
+
+ # Warn about the old-style configuration
+ resize_rootfs_option = util.get_cfg_option_str(cfg, "resize_rootfs")
+ if resize_rootfs_option:
+ log.warning("""The resize_rootfs option is deprecated, please use
+ resizefs_enabled instead!""")
+ resize_enabled = resize_rootfs_option
+
+ validate_cloudconfig_schema(cfg, schema)
+ if not util.translate_bool(resize_enabled, addons=[NOBLOCK]):
+ log.debug("Skipping module named %s, resizing disabled", name)
+ return
+
+ # Get list of partitions to resize
+ resize_list = util.get_cfg_option_list(cfg, "resizefs_list",
+ RESIZEFS_LIST_DEFAULT)
+ log.debug("Filesystems to resize: %s", resize_list)
+
+ # Resize all filesystems from resize_list
+ for resize_what in resize_list:
+ resize_fs(resize_what, log, resize_enabled)
+
+
+def do_resize(resize_cmd, log):
+ try:
+ subp.subp(resize_cmd)
+ except subp.ProcessExecutionError:
+ util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
+ raise
+ # TODO(harlowja): Should we add a fsck check after this to make
+ # sure we didn't corrupt anything?
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_vyos.py b/cloudinit/config/cc_vyos.py
new file mode 100644
index 00000000..a7f75316
--- /dev/null
+++ b/cloudinit/config/cc_vyos.py
@@ -0,0 +1,624 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2019 Sentrium S.L.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Kim Hagen <kim@sentrium.io>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import ipaddress
+from pathlib import Path
+from subprocess import run, DEVNULL
+from uuid import uuid4
+from cloudinit import log as logging
+from cloudinit.ssh_util import AuthKeyLineParser
+from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.stages import Init
+from cloudinit.util import load_file, load_json, get_hostname_fqdn
+from cloudinit.sources.DataSourceOVF import get_properties as ovf_get_properties
+try:
+ from vyos.configtree import ConfigTree
+except ImportError as err:
+ print(f'The module cannot be imported: {err}')
+
+# configure logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+frequency = PER_INSTANCE
+
+
+class VyosError(Exception):
+ """Raised when the distro runs into an exception when setting vyos config.
+ This may happen when the ssh pub key format is wrong.
+ """
+ pass
+
+
+# configure user account with password
+def set_pass_login(config, user, password):
+ # check if a password string is a hash or a plaintext password
+ # the regex from Cloud-init documentation, so we should trust it for this purpose
+ encrypted_pass = re.match(r'\$(1|2a|2y|5|6)(\$.+){2}', password)
+ if encrypted_pass:
+ logger.debug("Configuring encrypted password for: {}".format(user))
+ config.set(['system', 'login', 'user', user, 'authentication', 'encrypted-password'], value=password, replace=True)
+ else:
+ logger.debug("Configuring plaintext password password for: {}".format(user))
+ config.set(['system', 'login', 'user', user, 'authentication', 'plaintext-password'], value=password, replace=True)
+
+ config.set_tag(['system', 'login', 'user'])
+
+ # Return True if credentials added
+ return True
+
+
+# configure user account with ssh key
+def set_ssh_login(config, user, key_string):
+ ssh_parser = AuthKeyLineParser()
+ key_parsed = ssh_parser.parse(key_string)
+ logger.debug("Parsed SSH public key: type: {}, base64: \"{}\", comment: {}, options: {}".format(key_parsed.keytype, key_parsed.base64, key_parsed.comment, key_parsed.options))
+
+ if key_parsed.keytype not in ['ssh-dss', 'ssh-rsa', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', 'ssh-ed25519', 'ecdsa-sha2-nistp521']:
+ logger.error("Key type {} not supported.".format(key_parsed.keytype))
+ return False
+
+ if not key_parsed.base64:
+ logger.error("Key base64 not defined, wrong ssh key format.")
+ return False
+
+ if not key_parsed.comment:
+ key_parsed.comment = "cloud-init-{}".format(uuid4())
+
+ config.set(['system', 'login', 'user', user, 'authentication', 'public-keys', key_parsed.comment, 'key'], value=key_parsed.base64, replace=True)
+ config.set(['system', 'login', 'user', user, 'authentication', 'public-keys', key_parsed.comment, 'type'], value=key_parsed.keytype, replace=True)
+ if key_parsed.options:
+ config.set(['system', 'login', 'user', user, 'authentication', 'public-keys', key_parsed.comment, 'options'], value=key_parsed.options, replace=True)
+ config.set_tag(['system', 'login', 'user'])
+ config.set_tag(['system', 'login', 'user', user, 'authentication', 'public-keys'])
+ logger.debug("Configured SSH public key for user: {}".format(user))
+
+ # Return True if credentials added
+ return True
+
+
+# filter hostname to be sure that it can be applied
+# NOTE: here we cannot attempt to deny anything prohibited, as it is too late.
+# Therefore, we need only pass what is allowed, cutting everything else
+def hostname_filter(hostname):
+ # define regex for alloweed characters and resulted hostname
+ regex_characters = re.compile(r'[a-z0-9.-]', re.IGNORECASE)
+ regex_hostname = re.compile(r'[a-z0-9](([a-z0-9-]\.|[a-z0-9-])*[a-z0-9])?', re.IGNORECASE)
+ # filter characters
+ filtered_characters = ''.join(regex_characters.findall(hostname))
+ # check that hostname start and end by allowed characters and cut unsupported ones, limit to 64 characters total
+ filtered_hostname = regex_hostname.search(filtered_characters).group()[:64]
+
+ if hostname != filtered_hostname:
+ logger.warning("Hostname/domain was filtered: {} -> {}".format(hostname, filtered_hostname))
+ # return safe to apply host-name value
+ return filtered_hostname
+
+
+# configure system parameters from OVF template
+def set_config_ovf(config, ovf_environment):
+ logger.debug("Applying configuration from an OVF template")
+
+ # Check for 'null' values and replace them by the 'None'
+ # this make the rest of the code easier
+ for (ovf_property_key, ovf_property_value) in ovf_environment.items():
+ if ovf_property_value == 'null':
+ ovf_environment[ovf_property_key] = None
+
+ # get all variables required for configuration
+ ip_address = ovf_environment['ip0']
+ ip_mask = ovf_environment['netmask0']
+ gateway = ovf_environment['gateway']
+ dns_string = ovf_environment['DNS']
+ ntp_string = ovf_environment['NTP']
+ api_key = ovf_environment['APIKEY']
+ api_port = ovf_environment['APIPORT']
+ api_debug = ovf_environment['APIDEBUG']
+
+ # Configure an interface and default route
+ if ip_address and ip_mask and gateway:
+ ip_address_cidr = ipaddress.ip_interface("{}/{}".format(ip_address, ip_mask.replace('/', ''))).with_prefixlen
+ logger.debug("Configuring the IP address on the eth0 interface: {}".format(ip_address_cidr))
+ set_ipaddress(config, 'eth0', ip_address_cidr)
+
+ logger.debug("Configuring default route via: {}".format(gateway))
+ config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=gateway, replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+ else:
+ logger.debug("Configuring a DHCP client on the eth0 interface (fallback from OVF)")
+ set_ipaddress(config, 'eth0', 'dhcp')
+
+ # Configure DNS servers
+ if dns_string:
+ dns_list = list(dns_string.replace(' ', '').split(','))
+ for server in dns_list:
+ logger.debug("Configuring DNS server: {}".format(server))
+ config.set(['system', 'name-server'], value=server, replace=False)
+
+ # Configure NTP servers
+ if ntp_string:
+ ntp_list = list(ntp_string.replace(' ', '').split(','))
+ config.delete(['system', 'ntp'])
+ for server in ntp_list:
+ logger.debug("Configuring NTP server: {}".format(server))
+ config.set(['system', 'ntp', 'server'], value=server, replace=False)
+ config.set_tag(['system', 'ntp', 'server'])
+
+ # Configure API
+ if api_key:
+ logger.debug("Configuring HTTP API key: {}".format(api_key))
+ config.set(['service', 'https', 'api', 'keys', 'id', 'cloud-init', 'key'], value=api_key, replace=True)
+ config.set_tag(['service', 'https', 'api', 'keys', 'id'])
+ if api_key and api_port:
+ logger.debug("Configuring HTTP API port: {}".format(api_port))
+ config.set(['service', 'https', 'listen-address', '0.0.0.0', 'listen-port'], value=api_port, replace=True)
+ config.set_tag(['service', 'https', 'listen-address'])
+ if api_key and api_debug != 'False':
+ logger.debug("Enabling HTTP API debug")
+ config.set(['service', 'https', 'api', 'debug'], replace=True)
+
+
+# get an IP address type
+def get_ip_type(address):
+ addr_type = None
+ if address in ['dhcp', 'dhcpv6']:
+ addr_type = address
+ else:
+ try:
+ ip_version = ipaddress.ip_interface(address).version
+ if ip_version == 4:
+ addr_type = 'ipv4'
+ if ip_version == 6:
+ addr_type = 'ipv6'
+ except Exception as err:
+ logger.error("Unable to detect IP address type: {}".format(err))
+ logger.debug("IP address {} have type: {}".format(address, addr_type))
+ return addr_type
+
+
+# configure IP address for interface
+def set_ipaddress(config, iface, address):
+ # detect an IP address type
+ addr_type = get_ip_type(address)
+ if not addr_type:
+ logger.error("Unable to configure the IP address: {}".format(address))
+ return
+
+ # check a current configuration of an interface
+ if config.exists(['interfaces', 'ethernet', iface, 'address']):
+ current_addresses = config.return_values(['interfaces', 'ethernet', iface, 'address'])
+ logger.debug("IP address for interface {} already configured: {}".format(iface, current_addresses))
+ # check if currently configured addresses can be used with new one
+ incompatible_addresses = []
+ for current_address in current_addresses:
+ # dhcp cannot be used with static IP address at the same time
+ if ((addr_type == 'dhcp' and get_ip_type(current_address) == 'ipv4') or
+ (addr_type == 'ipv4' and get_ip_type(current_address) == 'dhcp') or
+ (addr_type == 'dhcpv6' and get_ip_type(current_address) == 'ipv6') or
+ (addr_type == 'ipv6' and get_ip_type(current_address) == 'dhcpv6')):
+ incompatible_addresses.append(current_address)
+ # inform about error and skip configuration
+ if incompatible_addresses:
+ logger.error("IP address {} cannot be configured, because it conflicts with already exists: {}".format(address, incompatible_addresses))
+ return
+
+ # configure address
+ logger.debug("Configuring IP address {} on interface {}".format(address, iface))
+ config.set(['interfaces', 'ethernet', iface, 'address'], value=address, replace=False)
+ config.set_tag(['interfaces', 'ethernet'])
+
+
+# configure interface from networking config version 1
+def set_config_interfaces_v1(config, iface_config):
+ logger.debug("Configuring network using Cloud-init networking config version 1")
+ # configure physical interfaces
+ if iface_config['type'] == 'physical':
+ iface_name = iface_config['name']
+
+ # configre MAC
+ if 'mac_address' in iface_config:
+ logger.debug("Setting MAC for {}: {}".format(iface_name, iface_config['mac_address']))
+ config.set(['interfaces', 'ethernet', iface_name, 'hw-id'], value=iface_config['mac_address'], replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+
+ # configre MTU
+ if 'mtu' in iface_config:
+ logger.debug("Setting MTU for {}: {}".format(iface_name, iface_config['mtu']))
+ config.set(['interfaces', 'ethernet', iface_name, 'mtu'], value=iface_config['mtu'], replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+
+ # configure subnets
+ if 'subnets' in iface_config:
+ for subnet in iface_config['subnets']:
+ # configure DHCP client
+ if subnet['type'] in ['dhcp', 'dhcp4', 'dhcp6']:
+ if subnet['type'] == 'dhcp6':
+ set_ipaddress(config, iface_name, 'dhcpv6')
+ else:
+ set_ipaddress(config, iface_name, 'dhcp')
+
+ continue
+
+ # configure static options
+ if subnet['type'] in ['static', 'static6']:
+ # configure IP address
+ try:
+ ip_interface = ipaddress.ip_interface(subnet['address'])
+ ip_version = ip_interface.version
+ ip_address = ip_interface.ip.compressed
+ ip_static_addr = ''
+ # format IPv4
+ if ip_version == 4 and ip_address != '0.0.0.0':
+ if '/' in subnet['address']:
+ ip_static_addr = ip_interface.compressed
+ else:
+ ip_static_addr = ipaddress.IPv4Interface('{}/{}'.format(ip_address, subnet['netmask'])).compressed
+ # format IPv6
+ if ip_version == 6:
+ ip_static_addr = ip_interface.compressed
+ # apply to the configuration
+ if ip_static_addr:
+ set_ipaddress(config, iface_name, ip_static_addr)
+ except Exception as err:
+ logger.error("Impossible to configure static IP address: {}".format(err))
+
+ # configure gateway
+ if 'gateway' in subnet and subnet['gateway'] != '0.0.0.0':
+ logger.debug("Configuring gateway for {}: {}".format(iface_name, subnet['gateway']))
+ config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=subnet['gateway'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+
+ # configure routes
+ if 'routes' in subnet:
+ for item in subnet['routes']:
+ try:
+ ip_network = ipaddress.ip_network('{}/{}'.format(item['network'], item['netmask']))
+ if ip_network.version == 4:
+ logger.debug("Configuring IPv4 route on {}: {} via {}".format(iface_name, ip_network.compressed, item['gateway']))
+ config.set(['protocols', 'static', 'route', ip_network.compressed, 'next-hop'], value=item['gateway'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', ip_network.compressed, 'next-hop'])
+ if ip_network.version == 6:
+ logger.debug("Configuring IPv6 route on {}: {} via {}".format(iface_name, ip_network.compressed, item['gateway']))
+ config.set(['protocols', 'static', 'route6', ip_network.compressed, 'next-hop'], value=item['gateway'], replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', ip_network.compressed, 'next-hop'])
+ except Exception as err:
+ logger.error("Impossible to detect IP protocol version: {}".format(err))
+
+ # configure nameservers
+ if 'dns_nameservers' in subnet:
+ for item in subnet['dns_nameservers']:
+ logger.debug("Configuring DNS nameserver for {}: {}".format(iface_name, item))
+ config.set(['system', 'name-server'], value=item, replace=False)
+
+ if 'dns_search' in subnet:
+ for item in subnet['dns_search']:
+ logger.debug("Configuring DNS search domain for {}: {}".format(iface_name, item))
+ config.set(['system', 'domain-search', 'domain'], value=item, replace=False)
+
+ # configure nameservers
+ if iface_config['type'] == 'nameserver':
+ for item in iface_config['address']:
+ logger.debug("Configuring DNS nameserver: {}".format(item))
+ config.set(['system', 'name-server'], value=item, replace=False)
+
+ if 'search' in iface_config:
+ for item in iface_config['search']:
+ logger.debug("Configuring DNS search domain: {}".format(item))
+ config.set(['system', 'domain-search', 'domain'], value=item, replace=False)
+
+ # configure routes
+ if iface_config['type'] == 'route':
+ try:
+ ip_network = ipaddress.ip_network(iface_config['destination'])
+ if ip_network.version == 4:
+ logger.debug("Configuring IPv4 route: {} via {}".format(ip_network.compressed, iface_config['gateway']))
+ config.set(['protocols', 'static', 'route', ip_network.compressed, 'next-hop'], value=iface_config['gateway'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', ip_network.compressed, 'next-hop'])
+ if 'metric' in iface_config:
+ config.set(['protocols', 'static', 'route', ip_network.compressed, 'next-hop', iface_config['gateway'], 'distance'], value=iface_config['metric'], replace=True)
+ if ip_network.version == 6:
+ logger.debug("Configuring IPv6 route: {} via {}".format(ip_network.compressed, iface_config['gateway']))
+ config.set(['protocols', 'static', 'route6', ip_network.compressed, 'next-hop'], value=iface_config['gateway'], replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', ip_network.compressed, 'next-hop'])
+ if 'metric' in iface_config:
+ config.set(['protocols', 'static', 'route6', ip_network.compressed, 'next-hop', iface_config['gateway'], 'distance'], value=iface_config['metric'], replace=True)
+ except Exception as err:
+ logger.error("Impossible to detect IP protocol version: {}".format(err))
+
+
+# configure interface from networking config version 2
+def set_config_interfaces_v2(config, iface_name, iface_config):
+ logger.debug("Configuring network using Cloud-init networking config version 2")
+
+ # configure MAC
+ if 'match' in iface_config and 'macaddress' in iface_config['match']:
+ logger.debug("Setting MAC for {}: {}".format(iface_name, iface_config['match']['macaddress']))
+ config.set(['interfaces', 'ethernet', iface_name, 'hw-id'], value=iface_config['match']['macaddress'], replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+
+ # configure DHCP client
+ if 'dhcp4' in iface_config:
+ if iface_config['dhcp4'] is True:
+ set_ipaddress(config, iface_name, 'dhcp')
+ if 'dhcp6' in iface_config:
+ if iface_config['dhcp6'] is True:
+ set_ipaddress(config, iface_name, 'dhcpv6')
+
+ # configure static addresses
+ if 'addresses' in iface_config:
+ for item in iface_config['addresses']:
+ set_ipaddress(config, iface_name, item)
+
+ # configure gateways
+ if 'gateway4' in iface_config:
+ logger.debug("Configuring IPv4 gateway for {}: {}".format(iface_name, iface_config['gateway4']))
+ config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=iface_config['gateway4'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+ if 'gateway6' in iface_config:
+ logger.debug("Configuring IPv6 gateway for {}: {}".format(iface_name, iface_config['gateway6']))
+ config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=iface_config['gateway6'], replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop'])
+
+ # configure MTU
+ if 'mtu' in iface_config:
+ logger.debug("Setting MTU for {}: {}".format(iface_name, iface_config['mtu']))
+ config.set(['interfaces', 'ethernet', iface_name, 'mtu'], value=iface_config['mtu'], replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+
+ # configure routes
+ if 'routes' in iface_config:
+ for item in iface_config['routes']:
+ try:
+ if ipaddress.ip_network(item['to']).version == 4:
+ logger.debug("Configuring IPv4 route on {}: {} via {}".format(iface_name, item['to'], item['via']))
+ config.set(['protocols', 'static', 'route', item['to'], 'next-hop'], value=item['via'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', item['to'], 'next-hop'])
+ if ipaddress.ip_network(item['to']).version == 6:
+ logger.debug("Configuring IPv6 route on {}: {} via {}".format(iface_name, item['to'], item['via']))
+ config.set(['protocols', 'static', 'route6', item['to'], 'next-hop'], value=item['via'], replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', item['to'], 'next-hop'])
+ except Exception as err:
+ logger.error("Impossible to detect IP protocol version: {}".format(err))
+
+ # configure nameservers
+ if 'nameservers' in iface_config:
+ if 'search' in iface_config['nameservers']:
+ for item in iface_config['nameservers']['search']:
+ logger.debug("Configuring DNS search domain for {}: {}".format(iface_name, item))
+ config.set(['system', 'domain-search', 'domain'], value=item, replace=False)
+ if 'addresses' in iface_config['nameservers']:
+ for item in iface_config['nameservers']['addresses']:
+ logger.debug("Configuring DNS nameserver for {}: {}".format(iface_name, item))
+ config.set(['system', 'name-server'], value=item, replace=False)
+
+
+# configure DHCP client for eth0 interface (fallback)
+def set_config_dhcp(config):
+ logger.debug("Configuring DHCPv4 on eth0 interface (fallback)")
+ set_ipaddress(config, 'eth0', 'dhcp')
+
+
+# configure SSH server service
+def set_config_ssh(config):
+ logger.debug("Configuring SSH service")
+ config.set(['service', 'ssh'], replace=True)
+ config.set(['service', 'ssh', 'port'], value='22', replace=True)
+ config.set(['service', 'ssh', 'client-keepalive-interval'], value='180', replace=True)
+
+
+# configure hostname
+def set_config_hostname(config, hostname, fqdn):
+ if hostname:
+ logger.debug("Configuring hostname to: {}".format(hostname_filter(hostname)))
+ config.set(['system', 'host-name'], value=hostname_filter(hostname), replace=True)
+ if fqdn:
+ try:
+ domain_name = fqdn.partition("{}.".format(hostname))[2]
+ if domain_name:
+ logger.debug("Configuring domain-name to: {}".format(hostname_filter(domain_name)))
+ config.set(['system', 'domain-name'], value=hostname_filter(domain_name), replace=True)
+ except Exception as err:
+ logger.error("Failed to configure domain-name: {}".format(err))
+
+
+# cleanup network interface config file added by cloud-init
+def network_cleanup():
+ logger.debug("Cleaning up network configuration applied by Cloud-Init")
+ net_config_file = Path('/etc/network/interfaces.d/50-cloud-init')
+ if net_config_file.exists():
+ logger.debug(f"Configuration file {net_config_file} was found")
+ try:
+ # get a list of interfaces that need to be deconfigured
+ configured_ifaces = run(
+ ['ifquery', '-l', '-X', 'lo', '-i', net_config_file],
+ capture_output=True).stdout.decode().splitlines()
+ if configured_ifaces:
+ for iface in configured_ifaces:
+ logger.debug(f"Deconfiguring interface: {iface}")
+ run(['ifdown', iface], stdout=DEVNULL)
+ # delete the file
+ net_config_file.unlink()
+ logger.debug(f"Configuration file {net_config_file} was removed")
+ except Exception as err:
+ logger.error(f"Failed to cleanup network configuration: {err}")
+
+
+# main config handler
+def handle(name, cfg, cloud, log, _args):
+ logger.debug("Cloud-init config: {}".format(cfg))
+ # fetch all required data from Cloud-init
+ # Datasource name
+ dsname = cloud.datasource.dsname
+ logger.debug("Datasource: {}".format(dsname))
+ # Metadata (datasource specific)
+ metadata_ds = cloud.datasource.metadata
+ logger.debug("Meta-Data ds: {}".format(metadata_ds))
+ # Metadata in stable v1 format (the same structure for all datasources)
+ instance_data_json = load_json(load_file("{}/{}".format(cloud.datasource.paths.run_dir, INSTANCE_JSON_FILE)))
+ metadata_v1 = instance_data_json.get('v1')
+ logger.debug("Meta-Data v1: {}".format(metadata_v1))
+ # User-Data
+ userdata = cloud.datasource.userdata
+ logger.debug("User-Data: {}".format(userdata))
+ # Vendor-Data
+ vendordata = cloud.datasource.vendordata
+ logger.debug("Vendor-Data: {}".format(vendordata))
+ # Network-config
+ netcfg = cloud.datasource.network_config
+ if netcfg:
+ netcfg_src = dsname
+ else:
+ init_stage = Init()
+ (netcfg, netcfg_src) = init_stage._find_networking_config()
+ logger.debug("Network-config: {}".format(netcfg))
+ logger.debug("Network-config source: {}".format(netcfg_src))
+ # Hostname with FQDN (if exist)
+ (hostname, fqdn) = get_hostname_fqdn(cfg, cloud, metadata_only=True)
+ logger.debug("Hostname: {}, FQDN: {}".format(hostname, fqdn))
+ # Get users list
+ (users, _) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ logger.debug("Users: {}".format(users))
+ (default_user, default_user_config) = ug_util.extract_default(users)
+ logger.debug("Default user: {}".format(default_user))
+ # Get OVF properties
+ if 'OVF' in dsname:
+ ovf_environment = ovf_get_properties(cloud.datasource.environment)
+ logger.debug("OVF environment: {}".format(ovf_environment))
+
+ # VyOS configuration file selection
+ cfg_file_name = '/opt/vyatta/etc/config/config.boot'
+ bak_file_name = '/opt/vyatta/etc/config.boot.default'
+
+ # open configuration file
+ if not Path(cfg_file_name).exists:
+ file_name = bak_file_name
+ else:
+ file_name = cfg_file_name
+
+ logger.debug("Using configuration file: {}".format(file_name))
+ with open(file_name, 'r') as f:
+ config_file = f.read()
+ config = ConfigTree(config_file)
+
+ # Initialization of variables
+ DEFAULT_VYOS_USER = 'vyos'
+ DEFAULT_VYOS_PASSWORD = 'vyos'
+ logins_configured = False
+ network_configured = False
+
+ # configure system logins
+ # Prepare SSH public keys for default user, to be sure that global keys applied to the default account (if it exist)
+ # If the ssh key is left emty on an OVA deploy the OVF datastore passes an empty string which generates an invalid key error.
+ # Set the ssh_keys variable from the metadata_v1['public_ssh_keys'] checked for empty strings.
+ ssh_keys = [key for key in metadata_v1['public_ssh_keys'] if key]
+ # append SSH keys from cloud-config
+ ssh_keys.extend(cfg.get('ssh_authorized_keys', []))
+ # Configure authentication for default user account
+ if default_user:
+ # key-based
+ for ssh_key in ssh_keys:
+ if set_ssh_login(config, default_user, ssh_key):
+ logins_configured = True
+ # password-based
+ password = cfg.get('password')
+ if password:
+ if set_pass_login(config, default_user, password):
+ logins_configured = True
+
+ # Configure all users accounts
+ for user, user_cfg in users.items():
+ # Configure password-based authentication
+ password = user_cfg.get('passwd')
+ if password and password != '':
+ if set_pass_login(config, user, password):
+ logins_configured = True
+
+ # Configure key-based authentication
+ for ssh_key in user_cfg.get('ssh_authorized_keys', []):
+ if set_ssh_login(config, user, ssh_key):
+ logins_configured = True
+
+ # Create a fallback user if there was no others
+ if not logins_configured:
+ logger.debug("Adding fallback user: {}".format(DEFAULT_VYOS_USER))
+ set_pass_login(config, DEFAULT_VYOS_USER, DEFAULT_VYOS_PASSWORD)
+
+ # apply settings from OVF template
+ if 'OVF' in dsname:
+ set_config_ovf(config, ovf_environment)
+ # Empty hostname option may be interpreted as 'null' string by some hypervisors
+ # we need to replace it to the empty value to process it later properly
+ if hostname and hostname == 'null':
+ hostname = None
+ network_configured = True
+
+ # process networking configuration data
+ if netcfg and network_configured is False:
+ # check which one version of config we have
+ # version 1
+ if netcfg['version'] == 1:
+ for interface_config in netcfg['config']:
+ set_config_interfaces_v1(config, interface_config)
+ network_configured = True
+
+ # version 2
+ if netcfg['version'] == 2:
+ if 'ethernets' in netcfg:
+ for interface_name, interface_config in netcfg['ethernets'].items():
+ set_config_interfaces_v2(config, interface_name, interface_config)
+ network_configured = True
+
+ # enable DHCPv4 on eth0 if network still not configured
+ if network_configured is False:
+ set_config_dhcp(config)
+
+ # enable SSH service
+ set_config_ssh(config)
+ # configure hostname and domain
+ if hostname:
+ set_config_hostname(config, hostname, fqdn)
+ else:
+ set_config_hostname(config, 'vyos', None)
+
+ # save a new configuration file
+ try:
+ with open(cfg_file_name, 'w') as f:
+ f.write(config.to_string())
+ logger.debug("Configuration file saved: {}".format(cfg_file_name))
+ except Exception as e:
+ logger.error("Failed to write configs into file {}: {}".format(cfg_file_name, e))
+
+ # since we already have a config file, it is a time to clean up what Cloud-init may left
+ network_cleanup()
diff --git a/cloudinit/config/cc_vyos_userdata.py b/cloudinit/config/cc_vyos_userdata.py
new file mode 100644
index 00000000..5ad27b31
--- /dev/null
+++ b/cloudinit/config/cc_vyos_userdata.py
@@ -0,0 +1,216 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2020 Sentrium S.L.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+from pathlib import Path
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+try:
+ from vyos.configtree import ConfigTree
+except ImportError as err:
+ print(f'The module cannot be imported: {err}')
+
+# configure logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+frequency = PER_INSTANCE
+# path to templates directory, required for analyzing nodes
+TEMPLATES_DIR = '/opt/vyatta/share/vyatta-cfg/templates/'
+# VyOS configuration files
+CFG_FILE_MAIN = '/opt/vyatta/etc/config/config.boot'
+CFG_FILE_DEFAULT = '/opt/vyatta/etc/config.boot.default'
+
+
+# get list of all tag nodes
+def get_tag_nodes():
+ try:
+ logger.debug("Searching for tag nodes in configuration templates")
+ tag_nodes = []
+ # search for node.tag directories
+ node_tag_dirs = Path(TEMPLATES_DIR).rglob('node.tag')
+ # add each found directory to tag nodes list
+ for node_tag in node_tag_dirs:
+ current_node_path = node_tag.relative_to(TEMPLATES_DIR).parent.parts
+ tag_nodes.append(current_node_path)
+ logger.debug("Tag nodes: {}".format(tag_nodes))
+ return tag_nodes
+ except Exception as err:
+ logger.error("Failed to find tag nodes: {}".format(err))
+
+
+# get list of all multi nodes
+def get_multi_nodes():
+ try:
+ logger.debug("Searching for multi nodes in configuration templates")
+ multi_nodes = []
+ # search for node.def files
+ node_def_files = Path(TEMPLATES_DIR).rglob('node.def')
+ # prepare filter to match multi node files
+ regex_filter = re.compile(r'^multi:.*$', re.MULTILINE)
+ # add each node.def with multi mark to list
+ for node_def_file in node_def_files:
+ file_content = node_def_file.read_text()
+ if regex_filter.search(file_content):
+ current_multi_path = node_def_file.relative_to(
+ TEMPLATES_DIR).parent.parts
+ multi_nodes.append(current_multi_path)
+ logger.debug("Multi nodes: {}".format(multi_nodes))
+ return multi_nodes
+ except Exception as err:
+ logger.error("Failed to find multi nodes: {}".format(err))
+
+
+# check if a node is inside a list of nodes
+def inside_nodes_list(node_path, nodes_list):
+ match = False
+ # compare with all items in list
+ for list_item in nodes_list:
+ # continue only if lengths are equal
+ if len(list_item) == len(node_path):
+ # match parts of nodes paths one by one
+ for element_id in list(range(len(node_path))):
+ # break is items does not match
+ if not (node_path[element_id] == list_item[element_id] or
+ list_item[element_id] == 'node.tag'):
+ break
+ # match as tag node only if both nodes have the same length
+ elif ((node_path[element_id] == list_item[element_id] or
+ list_item[element_id] == 'node.tag') and
+ element_id == len(node_path) - 1):
+ match = True
+ # break if we have a match
+ if match is True:
+ break
+ return match
+
+
+# convert string to command (action + path + value)
+def string_to_command(stringcmd):
+ # regex to split string to action + path + value
+ regex_filter = re.compile(
+ r'^(?P<cmd_action>set|delete) (?P<cmd_path>[^\']+)( \'(?P<cmd_value>.*)\')*$'
+ )
+ if regex_filter.search(stringcmd):
+ # command structure
+ command = {
+ 'cmd_action':
+ regex_filter.search(stringcmd).group('cmd_action'),
+ 'cmd_path':
+ regex_filter.search(stringcmd).group('cmd_path').split(),
+ 'cmd_value':
+ regex_filter.search(stringcmd).group('cmd_value')
+ }
+ return command
+ else:
+ return None
+
+
+# helper: mark nodes as tag in config, if this is necessary
+def mark_tag(config, node_path, tag_nodes):
+ current_node_path = []
+ # check and mark each element in command path if necessary
+ for current_node in node_path:
+ current_node_path.append(current_node)
+ if inside_nodes_list(current_node_path, tag_nodes):
+ logger.debug(
+ "Marking node as tag: \"{}\"".format(current_node_path))
+ config.set_tag(current_node_path)
+
+
+# apply "set" command
+def apply_command_set(config, tag_nodes, multi_nodes, command):
+ # if a node is multi type add value instead replacing
+ replace_option = not inside_nodes_list(command['cmd_path'], multi_nodes)
+ if not replace_option:
+ logger.debug("{} is a multi node, adding value".format(
+ command['cmd_path']))
+
+ config.set(command['cmd_path'],
+ command['cmd_value'],
+ replace=replace_option)
+
+ # mark configured nodes as tag, if this is necessary
+ mark_tag(config, command['cmd_path'], tag_nodes)
+
+
+# apply "delete" command
+def apply_command_delete(config, command):
+ # delete a value
+ if command['cmd_value']:
+ config.delete_value(command['cmd_path'], command['cmd_value'])
+ # otherwise delete path
+ else:
+ config.delete(command['cmd_path'])
+
+
+# apply command
+def apply_commands(config, commands_list):
+ # get all tag and multi nodes
+ tag_nodes = get_tag_nodes()
+ multi_nodes = get_multi_nodes()
+
+ # roll through configration commands
+ for command_line in commands_list:
+ # convert command to format, appliable to configuration
+ command = string_to_command(command_line)
+ # if conversion is successful, apply the command
+ if command:
+ logger.debug("Configuring command: \"{}\"".format(command_line))
+ try:
+ if command['cmd_action'] == 'set':
+ apply_command_set(config, tag_nodes, multi_nodes, command)
+ if command['cmd_action'] == 'delete':
+ apply_command_delete(config, command)
+ except Exception as err:
+ logger.error("Unable to configure command: {}".format(err))
+
+
+# main config handler
+def handle(name, cfg, cloud, log, _args):
+ # Get commands list to configure
+ commands_list = cfg.get('vyos_config_commands', [])
+ logger.debug("Commands to configure: {}".format(commands_list))
+
+ if commands_list:
+ # open configuration file
+ if Path(CFG_FILE_MAIN).exists():
+ config_file_path = CFG_FILE_MAIN
+ else:
+ config_file_path = CFG_FILE_DEFAULT
+
+ logger.debug("Using configuration file: {}".format(config_file_path))
+ with open(config_file_path, 'r') as f:
+ config_file = f.read()
+ # load a file content into a config object
+ config = ConfigTree(config_file)
+
+ # Add configuration from the vyos_config_commands cloud-config section
+ try:
+ apply_commands(config, commands_list)
+ except Exception as err:
+ logger.error(
+ "Failed to apply configuration commands: {}".format(err))
+
+ # save a new configuration file
+ try:
+ with open(config_file_path, 'w') as f:
+ f.write(config.to_string())
+ logger.debug(
+ "Configuration file saved: {}".format(config_file_path))
+ except Exception as err:
+ logger.error("Failed to write config into the file {}: {}".format(
+ config_file_path, err))