summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/cc_disk_setup.py2
-rw-r--r--cloudinit/config/cc_growpart.py15
-rw-r--r--cloudinit/config/cc_lxd.py12
-rw-r--r--cloudinit/config/cc_mounts.py8
-rw-r--r--cloudinit/config/cc_ntp.py8
-rw-r--r--cloudinit/config/cc_power_state_change.py58
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py159
-rw-r--r--cloudinit/config/cc_reset_rmc.py143
-rw-r--r--cloudinit/config/cc_resizefs.py68
-rw-r--r--cloudinit/config/cc_resizefs_vyos.py68
-rw-r--r--cloudinit/config/cc_resolv_conf.py2
-rwxr-xr-xcloudinit/config/cc_ssh.py31
-rw-r--r--cloudinit/config/cc_users_groups.py16
-rw-r--r--cloudinit/config/schema.py41
-rw-r--r--cloudinit/config/tests/test_mounts.py33
-rw-r--r--cloudinit/config/tests/test_ssh.py68
16 files changed, 531 insertions, 201 deletions
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index a7bdc703..d1200694 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -35,7 +35,7 @@ either a size or a list containing a size and the numerical value for a
partition type. The size for partitions is specified in **percentage** of disk
space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space).
The ``overwrite`` option controls whether this module tries to be safe about
-writing partition talbes or not. If ``overwrite: false`` is set, the device
+writing partition tables or not. If ``overwrite: false`` is set, the device
will be checked for a partition table and for a file system and if either is
found, the operation will be skipped. If ``overwrite: true`` is set, no checks
will be performed.
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 237c3d02..9f338ad1 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -16,12 +16,13 @@ This is useful for cloud instances with a larger amount of disk space available
than the pristine image uses, as it allows the instance to automatically make
use of the extra space.
-The devices run growpart on are specified as a list under the ``devices`` key.
-Each entry in the devices list can be either the path to the device's
-mountpoint in the filesystem or a path to the block device in ``/dev``.
+The devices on which to run growpart are specified as a list under the
+``devices`` key. Each entry in the devices list can be either the path to the
+device's mountpoint in the filesystem or a path to the block device in
+``/dev``.
The utility to use for resizing can be selected using the ``mode`` config key.
-If ``mode`` key is set to ``auto``, then any available utility (either
+If the ``mode`` key is set to ``auto``, then any available utility (either
``growpart`` or BSD ``gpart``) will be used. If neither utility is available,
no error will be raised. If ``mode`` is set to ``growpart``, then the
``growpart`` utility will be used. If this utility is not available on the
@@ -34,7 +35,7 @@ where one tool is able to function and the other is not. The default
configuration for both should work for most cloud instances. To explicitly
prevent ``cloud-initramfs-tools`` from running ``growroot``, the file
``/etc/growroot-disabled`` can be created. By default, both ``growroot`` and
-``cc_growpart`` will check for the existance of this file and will not run if
+``cc_growpart`` will check for the existence of this file and will not run if
it is present. However, this file can be ignored for ``cc_growpart`` by setting
``ignore_growroot_disabled`` to ``true``. For more information on
``cloud-initramfs-tools`` see: https://launchpad.net/cloud-initramfs-tools
@@ -196,10 +197,6 @@ class ResizeGpart(object):
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
raise ResizeFailedException(e) from e
- # Since growing the FS requires a reboot, make sure we reboot
- # first when this module has finished.
- open('/var/run/reboot-required', 'a').close()
-
return (before, get_size(partdev))
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 7129c9c6..486037d9 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -283,14 +283,18 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
fail_assume_enoent = "failed. Assuming it did not exist."
succeeded = "succeeded."
if create:
- msg = "Deletion of lxd network '%s' %s"
+ msg = "Detach of lxd network '%s' from profile '%s' %s"
try:
- _lxc(["network", "delete", net_name])
- LOG.debug(msg, net_name, succeeded)
+ _lxc(["network", "detach-profile", net_name, profile])
+ LOG.debug(msg, net_name, profile, succeeded)
except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
- LOG.debug(msg, net_name, fail_assume_enoent)
+ LOG.debug(msg, net_name, profile, fail_assume_enoent)
+ else:
+ msg = "Deletion of lxd network '%s' %s"
+ _lxc(["network", "delete", net_name])
+ LOG.debug(msg, net_name, succeeded)
if attach:
msg = "Removal of device '%s' from profile '%s' %s"
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 54f2f878..c22d1698 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -255,8 +255,9 @@ def create_swapfile(fname: str, size: str) -> None:
try:
subp.subp(cmd, capture=True)
except subp.ProcessExecutionError as e:
- LOG.warning(errmsg, fname, size, method, e)
+ LOG.info(errmsg, fname, size, method, e)
util.del_file(fname)
+ raise
swap_dir = os.path.dirname(fname)
util.ensure_dir(swap_dir)
@@ -269,9 +270,8 @@ def create_swapfile(fname: str, size: str) -> None:
else:
try:
create_swap(fname, size, "fallocate")
- except subp.ProcessExecutionError as e:
- LOG.warning(errmsg, fname, size, "dd", e)
- LOG.warning("Will attempt with dd.")
+ except subp.ProcessExecutionError:
+ LOG.info("fallocate swap creation failed, will attempt with dd")
create_swap(fname, size, "dd")
if os.path.exists(fname):
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 3d7279d6..e183993f 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -80,6 +80,14 @@ DISTRO_CLIENT_CONFIG = {
'confpath': '/etc/chrony/chrony.conf',
},
},
+ 'rhel': {
+ 'ntp': {
+ 'service_name': 'ntpd',
+ },
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
+ },
'opensuse': {
'chrony': {
'service_name': 'chronyd',
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 6fcb8a7d..5780a7e9 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,7 +22,7 @@ The ``delay`` key specifies a duration to be added onto any shutdown command
used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
the maximum amount of time between cloud-init starting and the system shutting
down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
-key must have an argument in either the form ``+5`` for 5 minutes or ``now``
+key must have an argument in either the form ``'+5'`` for 5 minutes or ``now``
for immediate shutdown.
Optionally, a command can be run to determine whether or not
@@ -117,7 +117,7 @@ def check_condition(cond, log=None):
def handle(_name, cfg, cloud, log, _args):
try:
- (args, timeout, condition) = load_power_state(cfg, cloud.distro.name)
+ (args, timeout, condition) = load_power_state(cfg, cloud.distro)
if args is None:
log.debug("no power_state provided. doing nothing")
return
@@ -144,19 +144,7 @@ def handle(_name, cfg, cloud, log, _args):
condition, execmd, [args, devnull_fp])
-def convert_delay(delay, fmt=None, scale=None):
- if not fmt:
- fmt = "+%s"
- if not scale:
- scale = 1
-
- if delay != "now":
- delay = fmt % int(int(delay) * int(scale))
-
- return delay
-
-
-def load_power_state(cfg, distro_name):
+def load_power_state(cfg, distro):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
pstate = cfg.get('power_state')
@@ -167,44 +155,16 @@ def load_power_state(cfg, distro_name):
if not isinstance(pstate, dict):
raise TypeError("power_state is not a dict.")
- opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
-
+ modes_ok = ['halt', 'poweroff', 'reboot']
mode = pstate.get("mode")
- if mode not in opt_map:
+ if mode not in distro.shutdown_options_map:
raise TypeError(
"power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(opt_map.keys()), mode))
-
- delay = pstate.get("delay", "now")
- message = pstate.get("message")
- scale = 1
- fmt = "+%s"
- command = ["shutdown", opt_map[mode]]
-
- if distro_name == 'alpine':
- # Convert integer 30 or string '30' to '1800' (seconds) as Alpine's
- # halt/poweroff/reboot commands take seconds rather than minutes.
- scale = 60
- # No "+" in front of delay value as not supported by Alpine's commands.
- fmt = "%s"
- if delay == "now":
- # Alpine's commands do not understand "now".
- delay = "0"
- command = [mode, "-d"]
- # Alpine's commands don't support a message.
- message = None
-
- try:
- delay = convert_delay(delay, fmt=fmt, scale=scale)
- except ValueError as e:
- raise TypeError(
- "power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay
- ) from e
+ (','.join(modes_ok), mode))
- args = command + [delay]
- if message:
- args.append(message)
+ args = distro.shutdown_command(mode=mode,
+ delay=pstate.get("delay", "now"),
+ message=pstate.get("message"))
try:
timeout = float(pstate.get('timeout', 30.0))
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
new file mode 100644
index 00000000..146758ad
--- /dev/null
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -0,0 +1,159 @@
+# (c) Copyright IBM Corp. 2020 All Rights Reserved
+#
+# Author: Aman Kumar Sinha <amansi26@in.ibm.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""
+Refresh IPv6 interface and RMC
+------------------------------
+**Summary:** Ensure Network Manager is not managing IPv6 interface
+
+This module is IBM PowerVM Hypervisor specific
+
+Reliable Scalable Cluster Technology (RSCT) is a set of software components
+that together provide a comprehensive clustering environment(RAS features)
+for IBM PowerVM based virtual machines. RSCT includes the Resource
+Monitoring and Control (RMC) subsystem. RMC is a generalized framework used
+for managing, monitoring, and manipulating resources. RMC runs as a daemon
+process on individual machines and needs creation of unique node id and
+restarts during VM boot.
+More details refer
+https://www.ibm.com/support/knowledgecenter/en/SGVKBA_3.2/admin/bl503_ovrv.htm
+
+This module handles
+- Refreshing RMC
+- Disabling NetworkManager from handling IPv6 interface, as IPv6 interface
+ is used for communication between RMC daemon and PowerVM hypervisor.
+
+**Internal name:** ``cc_refresh_rmc_and_interface``
+
+**Module frequency:** per always
+
+**Supported distros:** RHEL
+
+"""
+
+from cloudinit import log as logging
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+from cloudinit import subp
+from cloudinit import netinfo
+
+import errno
+
+frequency = PER_ALWAYS
+
+LOG = logging.getLogger(__name__)
+# Ensure that /opt/rsct/bin has been added to standard PATH of the
+# distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl .
+RMCCTRL = 'rmcctrl'
+
+
+def handle(name, _cfg, _cloud, _log, _args):
+ if not subp.which(RMCCTRL):
+ LOG.debug("No '%s' in path, disabled", RMCCTRL)
+ return
+
+ LOG.debug(
+ 'Making the IPv6 up explicitly. '
+ 'Ensuring IPv6 interface is not being handled by NetworkManager '
+ 'and it is restarted to re-establish the communication with '
+ 'the hypervisor')
+
+ ifaces = find_ipv6_ifaces()
+
+ # Setting NM_CONTROLLED=no for IPv6 interface
+ # making it down and up
+
+ if len(ifaces) == 0:
+ LOG.debug("Did not find any interfaces with ipv6 addresses.")
+ else:
+ for iface in ifaces:
+ refresh_ipv6(iface)
+ disable_ipv6(sysconfig_path(iface))
+ restart_network_manager()
+
+
+def find_ipv6_ifaces():
+ info = netinfo.netdev_info()
+ ifaces = []
+ for iface, data in info.items():
+ if iface == "lo":
+ LOG.debug('Skipping localhost interface')
+ if len(data.get("ipv4", [])) != 0:
+ # skip this interface, as it has ipv4 addrs
+ continue
+ ifaces.append(iface)
+ return ifaces
+
+
+def refresh_ipv6(interface):
+ # IPv6 interface is explicitly brought up, subsequent to which the
+ # RMC services are restarted to re-establish the communication with
+ # the hypervisor.
+ subp.subp(['ip', 'link', 'set', interface, 'down'])
+ subp.subp(['ip', 'link', 'set', interface, 'up'])
+
+
+def sysconfig_path(iface):
+ return '/etc/sysconfig/network-scripts/ifcfg-' + iface
+
+
+def restart_network_manager():
+ subp.subp(['systemctl', 'restart', 'NetworkManager'])
+
+
+def disable_ipv6(iface_file):
+ # Ensuring that the communication b/w the hypervisor and VM is not
+ # interrupted due to NetworkManager. For this purpose, as part of
+ # this function, the NM_CONTROLLED is explicitly set to No for IPV6
+ # interface and NetworkManager is restarted.
+ try:
+ contents = util.load_file(iface_file)
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ LOG.debug("IPv6 interface file %s does not exist\n",
+ iface_file)
+ else:
+ raise e
+
+ if 'IPV6INIT' not in contents:
+ LOG.debug("Interface file %s did not have IPV6INIT", iface_file)
+ return
+
+ LOG.debug("Editing interface file %s ", iface_file)
+
+ # Dropping any NM_CONTROLLED or IPV6 lines from IPv6 interface file.
+ lines = contents.splitlines()
+ lines = [line for line in lines if not search(line)]
+ lines.append("NM_CONTROLLED=no")
+
+ with open(iface_file, "w") as fp:
+ fp.write("\n".join(lines) + "\n")
+
+
+def search(contents):
+ # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file.
+ return(
+ contents.startswith("IPV6ADDR") or
+ contents.startswith("IPADDR6") or
+ contents.startswith("IPV6INIT") or
+ contents.startswith("NM_CONTROLLED"))
+
+
+def refresh_rmc():
+ # To make a healthy connection between RMC daemon and hypervisor we
+ # refresh RMC. With refreshing RMC we are ensuring that making IPv6
+ # down and up shouldn't impact communication between RMC daemon and
+ # hypervisor.
+ # -z : stop Resource Monitoring & Control subsystem and all resource
+ # managers, but the command does not return control to the user
+ # until the subsystem and all resource managers are stopped.
+ # -s : start Resource Monitoring & Control subsystem.
+ try:
+ subp.subp([RMCCTRL, '-z'])
+ subp.subp([RMCCTRL, '-s'])
+ except Exception:
+ util.logexc(LOG, 'Failed to refresh the RMC subsystem.')
+ raise
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
new file mode 100644
index 00000000..1cd72774
--- /dev/null
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -0,0 +1,143 @@
+# (c) Copyright IBM Corp. 2020 All Rights Reserved
+#
+# Author: Aman Kumar Sinha <amansi26@in.ibm.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+"""
+Reset RMC
+------------
+**Summary:** reset rsct node id
+
+Reset RMC module is IBM PowerVM Hypervisor specific
+
+Reliable Scalable Cluster Technology (RSCT) is a set of software components,
+that together provide a comprehensive clustering environment (RAS features)
+for IBM PowerVM based virtual machines. RSCT includes the Resource monitoring
+and control (RMC) subsystem. RMC is a generalized framework used for managing,
+monitoring, and manipulating resources. RMC runs as a daemon process on
+individual machines and needs creation of unique node id and restarts
+during VM boot.
+More details refer
+https://www.ibm.com/support/knowledgecenter/en/SGVKBA_3.2/admin/bl503_ovrv.htm
+
+This module handles
+- creation of the unique RSCT node id to every instance/virtual machine
+ and ensure once set, it isn't changed subsequently by cloud-init.
+ In order to do so, it restarts RSCT service.
+
+Prerequisite of using this module is to install RSCT packages.
+
+**Internal name:** ``cc_reset_rmc``
+
+**Module frequency:** per instance
+
+**Supported distros:** rhel, sles and ubuntu
+
+"""
+import os
+
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+from cloudinit import subp
+
+frequency = PER_INSTANCE
+
+# RMCCTRL is expected to be in system PATH (/opt/rsct/bin)
+# The symlink for RMCCTRL and RECFGCT are
+# /usr/sbin/rsct/bin/rmcctrl and
+# /usr/sbin/rsct/install/bin/recfgct respectively.
+RSCT_PATH = '/opt/rsct/install/bin'
+RMCCTRL = 'rmcctrl'
+RECFGCT = 'recfgct'
+
+LOG = logging.getLogger(__name__)
+
+NODE_ID_FILE = '/etc/ct_node_id'
+
+
+def handle(name, _cfg, cloud, _log, _args):
+ # Ensuring node id has to be generated only once during first boot
+ if cloud.datasource.platform_type == 'none':
+ LOG.debug('Skipping creation of new ct_node_id node')
+ return
+
+ if not os.path.isdir(RSCT_PATH):
+ LOG.debug("module disabled, RSCT_PATH not present")
+ return
+
+ orig_path = os.environ.get('PATH')
+ try:
+ add_path(orig_path)
+ reset_rmc()
+ finally:
+ if orig_path:
+ os.environ['PATH'] = orig_path
+ else:
+ del os.environ['PATH']
+
+
+def reconfigure_rsct_subsystems():
+ # Reconfigure the RSCT subsystems, which includes removing all RSCT data
+ # under the /var/ct directory, generating a new node ID, and making it
+ # appear as if the RSCT components were just installed
+ try:
+ out = subp.subp([RECFGCT])[0]
+ LOG.debug(out.strip())
+ return out
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.')
+ raise
+
+
+def get_node_id():
+ try:
+ fp = util.load_file(NODE_ID_FILE)
+ node_id = fp.split('\n')[0]
+ return node_id
+ except Exception:
+ util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE)
+ raise
+
+
+def add_path(orig_path):
+ # Adding the RSCT_PATH to env standard path
+ # So thet cloud init automatically find and
+ # run RECFGCT to create new node_id.
+ suff = ":" + orig_path if orig_path else ""
+ os.environ['PATH'] = RSCT_PATH + suff
+ return os.environ['PATH']
+
+
+def rmcctrl():
+ # Stop the RMC subsystem and all resource managers so that we can make
+ # some changes to it
+ try:
+ return subp.subp([RMCCTRL, '-z'])
+ except Exception:
+ util.logexc(LOG, 'Failed to stop the RMC subsystem.')
+ raise
+
+
+def reset_rmc():
+ LOG.debug('Attempting to reset RMC.')
+
+ node_id_before = get_node_id()
+ LOG.debug('Node ID at beginning of module: %s', node_id_before)
+
+ # Stop the RMC subsystem and all resource managers so that we can make
+ # some changes to it
+ rmcctrl()
+ reconfigure_rsct_subsystems()
+
+ node_id_after = get_node_id()
+ LOG.debug('Node ID at end of module: %s', node_id_after)
+
+ # Check if new node ID is generated or not
+ # by comparing old and new node ID
+ if node_id_after == node_id_before:
+ msg = 'New node ID did not get generated.'
+ LOG.error(msg)
+ raise Exception(msg)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 978d2ee0..9afbb847 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -9,10 +9,7 @@
"""Resizefs: cloud-config module which resizes the filesystem"""
import errno
-import getopt
import os
-import re
-import shlex
import stat
from textwrap import dedent
@@ -88,56 +85,23 @@ def _resize_zfs(mount_point, devpth):
return ('zpool', 'online', '-e', mount_point, devpth)
-def _get_dumpfs_output(mount_point):
- return subp.subp(['dumpfs', '-m', mount_point])[0]
-
-
-def _get_gpart_output(part):
- return subp.subp(['gpart', 'show', part])[0]
-
-
def _can_skip_resize_ufs(mount_point, devpth):
- # extract the current fs sector size
- """
- # dumpfs -m /
- # newfs command for / (/dev/label/rootfs)
- newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384
- -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf
- """
- cur_fs_sz = None
- frag_sz = None
- dumpfs_res = _get_dumpfs_output(mount_point)
- for line in dumpfs_res.splitlines():
- if not line.startswith('#'):
- newfs_cmd = shlex.split(line)
- opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:'
- optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
- for o, a in optlist:
- if o == "-s":
- cur_fs_sz = int(a)
- if o == "-f":
- frag_sz = int(a)
- # check the current partition size
- # Example output from `gpart show /dev/da0`:
- # => 40 62914480 da0 GPT (30G)
- # 40 1024 1 freebsd-boot (512K)
- # 1064 58719232 2 freebsd-ufs (28G)
- # 58720296 3145728 3 freebsd-swap (1.5G)
- # 61866024 1048496 - free - (512M)
- expect_sz = None
- m = re.search('^(/dev/.+)p([0-9])$', devpth)
- gpart_res = _get_gpart_output(m.group(1))
- for line in gpart_res.splitlines():
- if re.search(r"freebsd-ufs", line):
- fields = line.split()
- expect_sz = int(fields[1])
- # Normalize the gpart sector size,
- # because the size is not exactly the same as fs size.
- normal_expect_sz = (expect_sz - expect_sz % (frag_sz / 512))
- if normal_expect_sz == cur_fs_sz:
- return True
- else:
- return False
+ # possible errors cases on the code-path to growfs -N following:
+ # https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c
+ # This is the "good" error:
+ skip_start = "growfs: requested size"
+ skip_contain = "is not larger than the current filesystem size"
+ # growfs exits with 1 for almost all cases up to this one.
+ # This means we can't just use rcs=[0, 1] as subp parameter:
+ try:
+ subp.subp(['growfs', '-N', devpth])
+ except subp.ProcessExecutionError as e:
+ if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
+ # This FS is already at the desired size
+ return True
+ else:
+ raise e
+ return False
# Do not use a dictionary as these commands should be able to be used
diff --git a/cloudinit/config/cc_resizefs_vyos.py b/cloudinit/config/cc_resizefs_vyos.py
index f5555afc..f8eb84fe 100644
--- a/cloudinit/config/cc_resizefs_vyos.py
+++ b/cloudinit/config/cc_resizefs_vyos.py
@@ -9,10 +9,7 @@
"""Resizefs: cloud-config module which resizes the filesystem"""
import errno
-import getopt
import os
-import re
-import shlex
import stat
from textwrap import dedent
@@ -101,56 +98,23 @@ def _resize_zfs(mount_point, devpth):
return ('zpool', 'online', '-e', mount_point, devpth)
-def _get_dumpfs_output(mount_point):
- return subp.subp(['dumpfs', '-m', mount_point])[0]
-
-
-def _get_gpart_output(part):
- return subp.subp(['gpart', 'show', part])[0]
-
-
def _can_skip_resize_ufs(mount_point, devpth):
- # extract the current fs sector size
- """
- # dumpfs -m /
- # newfs command for / (/dev/label/rootfs)
- newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384
- -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf
- """
- cur_fs_sz = None
- frag_sz = None
- dumpfs_res = _get_dumpfs_output(mount_point)
- for line in dumpfs_res.splitlines():
- if not line.startswith('#'):
- newfs_cmd = shlex.split(line)
- opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:'
- optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
- for o, a in optlist:
- if o == "-s":
- cur_fs_sz = int(a)
- if o == "-f":
- frag_sz = int(a)
- # check the current partition size
- # Example output from `gpart show /dev/da0`:
- # => 40 62914480 da0 GPT (30G)
- # 40 1024 1 freebsd-boot (512K)
- # 1064 58719232 2 freebsd-ufs (28G)
- # 58720296 3145728 3 freebsd-swap (1.5G)
- # 61866024 1048496 - free - (512M)
- expect_sz = None
- m = re.search('^(/dev/.+)p([0-9])$', devpth)
- gpart_res = _get_gpart_output(m.group(1))
- for line in gpart_res.splitlines():
- if re.search(r"freebsd-ufs", line):
- fields = line.split()
- expect_sz = int(fields[1])
- # Normalize the gpart sector size,
- # because the size is not exactly the same as fs size.
- normal_expect_sz = (expect_sz - expect_sz % (frag_sz / 512))
- if normal_expect_sz == cur_fs_sz:
- return True
- else:
- return False
+ # possible errors cases on the code-path to growfs -N following:
+ # https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c
+ # This is the "good" error:
+ skip_start = "growfs: requested size"
+ skip_contain = "is not larger than the current filesystem size"
+ # growfs exits with 1 for almost all cases up to this one.
+ # This means we can't just use rcs=[0, 1] as subp parameter:
+ try:
+ subp.subp(['growfs', '-N', devpth])
+ except subp.ProcessExecutionError as e:
+ if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
+ # This FS is already at the desired size
+ return True
+ else:
+ raise e
+ return False
# Do not use a dictionary as these commands should be able to be used
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 519e66eb..7beb11ca 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -14,7 +14,7 @@ Resolv Conf
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
where configuration management such as puppet or chef own dns configuration.
-As Debian/Ubuntu will, by default, utilize resovlconf, and similarly RedHat
+As Debian/Ubuntu will, by default, utilize resolvconf, and similarly RedHat
will use sysconfig, this module is likely to be of little use unless those
are configured correctly.
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 9b2a333a..05a16dbc 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -83,8 +83,9 @@ enabled by default.
Host keys can be added using the ``ssh_keys`` configuration key. The argument
to this config key should be a dictionary entries for the public and private
keys of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private`` and ``<key type>_public``,
-e.g. ``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported
+have keys in the format ``<key type>_private``, ``<key type>_public``, and,
+optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``,
+``rsa_public: <key>``, and ``rsa_certificate: <key>``. See below for supported
key types. Not all key types have to be specified, ones left unspecified will
not be used. If this config option is used, then no keys will be generated.
@@ -94,7 +95,8 @@ not be used. If this config option is used, then no keys will be generated.
secure
.. note::
- to specify multiline private host keys, use yaml multiline syntax
+ to specify multiline private host keys and certificates, use yaml
+ multiline syntax
If no host keys are specified using ``ssh_keys``, then keys will be generated
using ``ssh-keygen``. By default one public/private pair of each supported
@@ -128,12 +130,17 @@ config flags are:
...
-----END RSA PRIVATE KEY-----
rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ rsa_certificate: |
+ ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
dsa_private: |
-----BEGIN DSA PRIVATE KEY-----
MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
...
-----END DSA PRIVATE KEY-----
dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ dsa_certificate: |
+ ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
+
ssh_genkeytypes: <key type>
disable_root: <true/false>
disable_root_opts: <disable root options string>
@@ -169,6 +176,8 @@ for k in GENERATE_KEY_NAMES:
CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
CONFIG_KEY_TO_FILE.update(
{"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+ CONFIG_KEY_TO_FILE.update(
+ {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)})
PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
@@ -186,12 +195,18 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Failed deleting key file %s", f)
if "ssh_keys" in cfg:
- # if there are keys in cloud-config, use them
+ # if there are keys and/or certificates in cloud-config, use them
for (key, val) in cfg["ssh_keys"].items():
- if key in CONFIG_KEY_TO_FILE:
- tgt_fn = CONFIG_KEY_TO_FILE[key][0]
- tgt_perms = CONFIG_KEY_TO_FILE[key][1]
- util.write_file(tgt_fn, val, tgt_perms)
+ # skip entry if unrecognized
+ if key not in CONFIG_KEY_TO_FILE:
+ continue
+ tgt_fn = CONFIG_KEY_TO_FILE[key][0]
+ tgt_perms = CONFIG_KEY_TO_FILE[key][1]
+ util.write_file(tgt_fn, val, tgt_perms)
+ # set server to present the most recently identified certificate
+ if '_certificate' in key:
+ cert_config = {'HostCertificate': tgt_fn}
+ ssh_util.update_ssh_config(cert_config)
for (priv, pub) in PRIV_TO_PUB.items():
if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 426498a3..ac4a4410 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -26,13 +26,14 @@ entry of the ``users`` list. Each entry in the ``users`` list, other than a
config keys for an entry in ``users`` are as follows:
- ``name``: The user's login name
- - ``expiredate``: Optional. Date on which the user's login will be
+ - ``expiredate``: Optional. Date on which the user's account will be
disabled. Default: none
- ``gecos``: Optional. Comment about the user, usually a comma-separated
string of real name and contact information. Default: none
- ``groups``: Optional. Additional groups to add the user to. Default: none
- ``homedir``: Optional. Home dir for user. Default is ``/home/<username>``
- - ``inactive``: Optional. Mark user inactive. Default: false
+ - ``inactive``: Optional. Number of days after a password expires until
+ the account is permanently disabled. Default: none
- ``lock_passwd``: Optional. Disable password login. Default: true
- ``no_create_home``: Optional. Do not create home directory. Default:
false
@@ -80,10 +81,9 @@ config keys for an entry in ``users`` are as follows:
.. note::
Most of these configuration options will not be honored if the user
- already exists. Following options are the exceptions and they are
- applicable on already-existing users:
- - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo',
- 'ssh_authorized_keys', 'ssh_redirect_user'.
+ already exists. The following options are the exceptions; they are applied
+ to already-existing users: ``plain_text_passwd``, ``hashed_passwd``,
+ ``lock_passwd``, ``sudo``, ``ssh_authorized_keys``, ``ssh_redirect_user``.
**Internal name:** ``cc_users_groups``
@@ -103,11 +103,11 @@ config keys for an entry in ``users`` are as follows:
- name: <some_restricted_user>
sudo: false
- name: <username>
- expiredate: <date>
+ expiredate: '<date>'
gecos: <comment>
groups: <additional groups>
homedir: <home directory>
- inactive: <true/false>
+ inactive: '<number of days>'
lock_passwd: <true/false>
no_create_home: <true/false>
no_log_init: <true/false>
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 8a966aee..456bab2c 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
+from cloudinit.cmd.devel import read_cfg_paths
from cloudinit import importer
from cloudinit.util import find_modules, load_file
@@ -173,7 +174,8 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
def validate_cloudconfig_file(config_path, schema, annotate=False):
"""Validate cloudconfig file adheres to a specific jsonschema.
- @param config_path: Path to the yaml cloud-config file to parse.
+ @param config_path: Path to the yaml cloud-config file to parse, or None
+ to default to system userdata from Paths object.
@param schema: Dict describing a valid jsonschema to validate against.
@param annotate: Boolean set True to print original config file with error
annotations on the offending lines.
@@ -181,9 +183,24 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
@raises SchemaValidationError containing any of schema_errors encountered.
@raises RuntimeError when config_path does not exist.
"""
- if not os.path.exists(config_path):
- raise RuntimeError('Configfile {0} does not exist'.format(config_path))
- content = load_file(config_path, decode=False)
+ if config_path is None:
+ # Use system's raw userdata path
+ if os.getuid() != 0:
+ raise RuntimeError(
+ "Unable to read system userdata as non-root user."
+ " Try using sudo"
+ )
+ paths = read_cfg_paths()
+ user_data_file = paths.get_ipath_cur("userdata_raw")
+ content = load_file(user_data_file, decode=False)
+ else:
+ if not os.path.exists(config_path):
+ raise RuntimeError(
+ 'Configfile {0} does not exist'.format(
+ config_path
+ )
+ )
+ content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = (
('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
@@ -425,6 +442,8 @@ def get_parser(parser=None):
description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
+ parser.add_argument('--system', action='store_true', default=False,
+ help='Validate the system cloud-config userdata')
parser.add_argument('-d', '--docs', nargs='+',
help=('Print schema module docs. Choices: all or'
' space-delimited cc_names.'))
@@ -435,11 +454,11 @@ def get_parser(parser=None):
def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
- exclusive_args = [args.config_file, args.docs]
- if not any(exclusive_args) or all(exclusive_args):
- error('Expected either --config-file argument or --docs')
+ exclusive_args = [args.config_file, args.docs, args.system]
+ if len([arg for arg in exclusive_args if arg]) != 1:
+ error('Expected one of --config-file, --system or --docs arguments')
full_schema = get_schema()
- if args.config_file:
+ if args.config_file or args.system:
try:
validate_cloudconfig_file(
args.config_file, full_schema, args.annotate)
@@ -449,7 +468,11 @@ def handle_schema_args(name, args):
except RuntimeError as e:
error(str(e))
else:
- print("Valid cloud-config file {0}".format(args.config_file))
+ if args.config_file is None:
+ cfg_name = "system userdata"
+ else:
+ cfg_name = args.config_file
+ print("Valid cloud-config:", cfg_name)
elif args.docs:
schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
schema_ids += ['all']
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
index 764a33e3..56510fd6 100644
--- a/cloudinit/config/tests/test_mounts.py
+++ b/cloudinit/config/tests/test_mounts.py
@@ -4,6 +4,7 @@ from unittest import mock
import pytest
from cloudinit.config.cc_mounts import create_swapfile
+from cloudinit.subp import ProcessExecutionError
M_PATH = 'cloudinit.config.cc_mounts.'
@@ -26,3 +27,35 @@ class TestCreateSwapfile:
create_swapfile(fname, '')
assert mock.call(['mkswap', fname]) in m_subp.call_args_list
+
+ @mock.patch(M_PATH + "util.get_mount_info")
+ @mock.patch(M_PATH + "subp.subp")
+ def test_fallback_from_fallocate_to_dd(
+ self, m_subp, m_get_mount_info, caplog, tmpdir
+ ):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ def subp_side_effect(cmd, *args, **kwargs):
+ # Mock fallocate failing, to initiate fallback
+ if cmd[0] == "fallocate":
+ raise ProcessExecutionError()
+
+ m_subp.side_effect = subp_side_effect
+ # Use ext4 so both fallocate and dd are valid swap creation methods
+ m_get_mount_info.return_value = (mock.ANY, "ext4")
+
+ create_swapfile(fname, "")
+
+ cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list]
+ assert "fallocate" in cmds, "fallocate was not called"
+ assert "dd" in cmds, "fallocate failure did not fallback to dd"
+
+ assert cmds.index("dd") > cmds.index(
+ "fallocate"
+ ), "dd ran before fallocate"
+
+ assert mock.call(["mkswap", fname]) in m_subp.call_args_list
+
+ msg = "fallocate swap creation failed, will attempt with dd"
+ assert msg in caplog.text
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
index 0c554414..87ccdb60 100644
--- a/cloudinit/config/tests/test_ssh.py
+++ b/cloudinit/config/tests/test_ssh.py
@@ -10,6 +10,8 @@ import logging
LOG = logging.getLogger(__name__)
MODPATH = "cloudinit.config.cc_ssh."
+KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES
+ if name not in 'dsa']
@mock.patch(MODPATH + "ssh_util.setup_user_keys")
@@ -25,7 +27,7 @@ class TestHandleSsh(CiTestCase):
}
self.test_hostkey_files = []
hostkey_tmpdir = self.tmp_dir()
- for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']:
+ for key_type in cc_ssh.GENERATE_KEY_NAMES:
key_data = self.test_hostkeys[key_type]
filename = 'ssh_host_%s_key.pub' % key_type
filepath = os.path.join(hostkey_tmpdir, filename)
@@ -223,7 +225,7 @@ class TestHandleSsh(CiTestCase):
cfg = {}
expected_call = [self.test_hostkeys[key_type] for key_type
- in ['ecdsa', 'ed25519', 'rsa']]
+ in KEY_NAMES_NO_DSA]
cc_ssh.handle("name", cfg, cloud, LOG, None)
self.assertEqual([mock.call(expected_call)],
cloud.datasource.publish_host_keys.call_args_list)
@@ -252,7 +254,7 @@ class TestHandleSsh(CiTestCase):
cfg = {'ssh_publish_hostkeys': {'enabled': True}}
expected_call = [self.test_hostkeys[key_type] for key_type
- in ['ecdsa', 'ed25519', 'rsa']]
+ in KEY_NAMES_NO_DSA]
cc_ssh.handle("name", cfg, cloud, LOG, None)
self.assertEqual([mock.call(expected_call)],
cloud.datasource.publish_host_keys.call_args_list)
@@ -339,7 +341,65 @@ class TestHandleSsh(CiTestCase):
cfg = {'ssh_publish_hostkeys': {'enabled': True,
'blacklist': []}}
expected_call = [self.test_hostkeys[key_type] for key_type
- in ['dsa', 'ecdsa', 'ed25519', 'rsa']]
+ in cc_ssh.GENERATE_KEY_NAMES]
cc_ssh.handle("name", cfg, cloud, LOG, None)
self.assertEqual([mock.call(expected_call)],
cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "util.write_file")
+ def test_handle_ssh_keys_in_cfg(self, m_write_file, m_nug, m_setup_keys):
+ """Test handle with ssh keys and certificate."""
+ # Populate a config dictionary to pass to handle() as well
+ # as the expected file-writing calls.
+ cfg = {"ssh_keys": {}}
+
+ expected_calls = []
+ for key_type in cc_ssh.GENERATE_KEY_NAMES:
+ private_name = "{}_private".format(key_type)
+ public_name = "{}_public".format(key_type)
+ cert_name = "{}_certificate".format(key_type)
+
+ # Actual key contents don"t have to be realistic
+ private_value = "{}_PRIVATE_KEY".format(key_type)
+ public_value = "{}_PUBLIC_KEY".format(key_type)
+ cert_value = "{}_CERT_KEY".format(key_type)
+
+ cfg["ssh_keys"][private_name] = private_value
+ cfg["ssh_keys"][public_name] = public_value
+ cfg["ssh_keys"][cert_name] = cert_value
+
+ expected_calls.extend([
+ mock.call(
+ '/etc/ssh/ssh_host_{}_key'.format(key_type),
+ private_value,
+ 384
+ ),
+ mock.call(
+ '/etc/ssh/ssh_host_{}_key.pub'.format(key_type),
+ public_value,
+ 384
+ ),
+ mock.call(
+ '/etc/ssh/ssh_host_{}_key-cert.pub'.format(key_type),
+ cert_value,
+ 384
+ ),
+ mock.call(
+ '/etc/ssh/sshd_config',
+ ('HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub'
+ '\n'.format(key_type)),
+ preserve_mode=True
+ )
+ ])
+
+ # Run the handler.
+ m_nug.return_value = ([], {})
+ with mock.patch(MODPATH + 'ssh_util.parse_ssh_config',
+ return_value=[]):
+ cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'),
+ LOG, None)
+
+ # Check that all expected output has been done.
+ for call_ in expected_calls:
+ self.assertIn(call_, m_write_file.call_args_list)