summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/cc_apt_configure.py18
-rw-r--r--cloudinit/config/cc_apt_pipelining.py4
-rwxr-xr-xcloudinit/config/cc_byobu.py6
-rw-r--r--cloudinit/config/cc_chef.py27
-rw-r--r--cloudinit/config/cc_debug.py6
-rw-r--r--cloudinit/config/cc_disk_setup.py5
-rw-r--r--cloudinit/config/cc_emit_upstart.py2
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py31
-rw-r--r--cloudinit/config/cc_keys_to_console.py12
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_lxd.py19
-rw-r--r--cloudinit/config/cc_mcollective.py10
-rw-r--r--cloudinit/config/cc_mounts.py86
-rw-r--r--cloudinit/config/cc_ntp.py20
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py7
-rw-r--r--cloudinit/config/cc_phone_home.py12
-rw-r--r--cloudinit/config/cc_power_state_change.py22
-rw-r--r--cloudinit/config/cc_puppet.py47
-rw-r--r--cloudinit/config/cc_resizefs.py23
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py6
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py7
-rw-r--r--cloudinit/config/cc_rsyslog.py11
-rw-r--r--cloudinit/config/cc_salt_minion.py13
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py4
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py7
-rw-r--r--cloudinit/config/cc_scripts_per_once.py9
-rw-r--r--cloudinit/config/cc_scripts_user.py4
-rw-r--r--cloudinit/config/cc_scripts_vendor.py4
-rw-r--r--cloudinit/config/cc_seed_random.py5
-rw-r--r--cloudinit/config/cc_set_hostname.py12
-rwxr-xr-xcloudinit/config/cc_set_passwords.py86
-rw-r--r--cloudinit/config/cc_snap_config.py184
-rw-r--r--cloudinit/config/cc_snappy.py321
-rwxr-xr-xcloudinit/config/cc_ssh.py153
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py14
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py8
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py223
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py160
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py8
-rw-r--r--cloudinit/config/cc_users_groups.py6
-rw-r--r--cloudinit/config/cc_vyos.py141
-rw-r--r--cloudinit/config/cc_write_files.py3
-rw-r--r--cloudinit/config/cc_yum_add_repo.py22
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py3
-rw-r--r--cloudinit/config/schema.py2
-rw-r--r--cloudinit/config/tests/test_apt_pipelining.py28
-rw-r--r--cloudinit/config/tests/test_set_passwords.py88
-rw-r--r--cloudinit/config/tests/test_snap.py2
-rw-r--r--cloudinit/config/tests/test_ssh.py202
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py347
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py237
-rw-r--r--cloudinit/config/tests/test_users_groups.py28
54 files changed, 1590 insertions, 1124 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index e18944ec..c44dec45 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -127,7 +127,7 @@ to ``^[\\w-]+:\\w``
Source list entries can be specified as a dictionary under the ``sources``
config key, with key in the dict representing a different source file. The key
-The key of each source entry will be used as an id that can be referenced in
+of each source entry will be used as an id that can be referenced in
other config entries, as well as the filename for the source's configuration
under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
it will be appended. If there is no configuration for a key in ``sources``, no
@@ -253,7 +253,7 @@ def get_default_mirrors(arch=None, target=None):
architecture, for more see:
https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
if arch is None:
- arch = util.get_architecture(target)
+ arch = util.get_dpkg_architecture(target)
if arch in PRIMARY_ARCHES:
return PRIMARY_ARCH_MIRRORS.copy()
if arch in PORTS_ARCHES:
@@ -303,13 +303,13 @@ def apply_apt(cfg, cloud, target):
LOG.debug("handling apt config: %s", cfg)
release = util.lsb_release(target=target)['codename']
- arch = util.get_architecture(target)
+ arch = util.get_dpkg_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
if util.is_false(cfg.get('preserve_sources_list', False)):
generate_sources_list(cfg, release, mirrors, cloud)
- rename_apt_lists(mirrors, target)
+ rename_apt_lists(mirrors, target, arch)
try:
apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
@@ -332,6 +332,8 @@ def apply_apt(cfg, cloud, target):
def debconf_set_selections(selections, target=None):
+ if not selections.endswith(b'\n'):
+ selections += b'\n'
util.subp(['debconf-set-selections'], data=selections, target=target,
capture=True)
@@ -374,7 +376,7 @@ def apply_debconf_selections(cfg, target=None):
selections = '\n'.join(
[selsets[key] for key in sorted(selsets.keys())])
- debconf_set_selections(selections.encode() + b"\n", target=target)
+ debconf_set_selections(selections.encode(), target=target)
# get a complete list of packages listed in input
pkgs_cfgd = set()
@@ -425,9 +427,9 @@ def mirrorurl_to_apt_fileprefix(mirror):
return string
-def rename_apt_lists(new_mirrors, target=None):
+def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
- default_mirrors = get_default_mirrors(util.get_architecture(target))
+ default_mirrors = get_default_mirrors(arch)
pre = util.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
@@ -894,7 +896,7 @@ def find_apt_mirror_info(cfg, cloud, arch=None):
"""
if arch is None:
- arch = util.get_architecture()
+ arch = util.get_dpkg_architecture()
LOG.debug("got arch for mirror selection: %s", arch)
pmirror = get_mirror(cfg, "primary", arch, cloud)
LOG.debug("got primary mirror: %s", pmirror)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index cdf28cd9..225d0905 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -49,7 +49,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
def handle(_name, cfg, _cloud, log, _args):
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
+ apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os')
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
@@ -59,7 +59,7 @@ def handle(_name, cfg, _cloud, log, _args):
elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else:
- log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
+ log.warning("Invalid option for apt_pipelining: %s", apt_pipe_value)
def write_apt_snippet(setting, log, f_name):
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 8570da15..0b4352c8 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -60,7 +60,7 @@ def handle(name, cfg, cloud, log, args):
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
if value not in valid:
- log.warn("Unknown value %s for byobu_by_default", value)
+ log.warning("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
@@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if not user:
- log.warn(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
+ log.warning(("No default byobu user provided, "
+ "can not launch %s for the default user"), bl_inst)
else:
shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 46abedd1..01d61fa1 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -51,6 +51,7 @@ file).
chef:
client_key:
+ encrypted_data_bag_secret:
environment:
file_backup_path:
file_cache_path:
@@ -78,8 +79,6 @@ from cloudinit import templater
from cloudinit import url_helper
from cloudinit import util
-import six
-
RUBY_VERSION_DEFAULT = "1.8"
CHEF_DIRS = tuple([
@@ -114,6 +113,7 @@ CHEF_RB_TPL_DEFAULTS = {
'file_backup_path': "/var/backups/chef",
'pid_file': "/var/run/chef/client.pid",
'show_time': True,
+ 'encrypted_data_bag_secret': None,
}
CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
CHEF_RB_TPL_PATH_KEYS = frozenset([
@@ -124,6 +124,7 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'json_attribs',
'file_cache_path',
'pid_file',
+ 'encrypted_data_bag_secret',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
@@ -193,7 +194,7 @@ def handle(name, cfg, cloud, log, _args):
# If there isn't a chef key in the configuration don't do anything
if 'chef' not in cfg:
log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
+ " no 'chef' key in configuration"), name)
return
chef_cfg = cfg['chef']
@@ -212,9 +213,9 @@ def handle(name, cfg, cloud, log, _args):
if vcert != "system":
util.write_file(vkey_path, vcert)
elif not os.path.isfile(vkey_path):
- log.warn("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
+ log.warning("chef validation_cert provided as 'system', but "
+ "validation_key path '%s' does not exist.",
+ vkey_path)
# Create the chef config from template
template_fn = cloud.get_template_filename('chef_client.rb')
@@ -231,8 +232,8 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
- log.warn("No template found, not rendering to %s",
- CHEF_RB_PATH)
+ log.warning("No template found, not rendering to %s",
+ CHEF_RB_PATH)
# Set the firstboot json
fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
@@ -270,12 +271,12 @@ def run_chef(chef_cfg, log):
cmd_args = chef_cfg['exec_arguments']
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
- elif isinstance(cmd_args, six.string_types):
+ elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
- log.warn("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
+ log.warning("Unknown type %s provided for chef"
+ " 'exec_arguments' expected list, tuple,"
+ " or string", type(cmd_args))
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
@@ -331,7 +332,7 @@ def install_chef(cloud, chef_cfg, log):
retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
omnibus_version=omnibus_version)
else:
- log.warn("Unknown chef install type '%s'", install_type)
+ log.warning("Unknown chef install type '%s'", install_type)
run = False
return run
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 0a039eb3..4d5a6aa2 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -28,11 +28,11 @@ location that this cloud-init has been configured with when running.
"""
import copy
-
-from six import StringIO
+from io import StringIO
from cloudinit import type_utils
from cloudinit import util
+from cloudinit import safeyaml
SKIP_KEYS = frozenset(['log_cfgs'])
@@ -49,7 +49,7 @@ def _make_header(text):
def _dumps(obj):
- text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False)
+ text = safeyaml.dumps(obj, explicit_start=False, explicit_end=False)
return text.rstrip()
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 29e192e8..0796cb7b 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -825,6 +825,7 @@ def lookup_force_flag(fs):
'btrfs': '-f',
'xfs': '-f',
'reiserfs': '-f',
+ 'swap': '-f',
}
if 'ext' in fs.lower():
@@ -982,7 +983,9 @@ def mkfs(fs_cfg):
# File systems that support the -F flag
if overwrite or device_type(device) == "disk":
- fs_cmd.append(lookup_force_flag(fs_type))
+ force_flag = lookup_force_flag(fs_type)
+ if force_flag:
+ fs_cmd.append(force_flag)
# Add the extends FS options
if fs_opts:
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index eb9fbe66..b342e04d 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -69,6 +69,6 @@ def handle(name, _cfg, cloud, log, args):
util.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
- log.warn("Emission of upstart event %s failed due to: %s", n, e)
+ log.warning("Emission of upstart event %s failed due to: %s", n, e)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index c61f03d4..fd141541 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -83,6 +83,6 @@ def handle(_name, cfg, cloud, log, args):
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
if cloud.datasource.is_disconnected:
- log.warn("Used fallback datasource")
+ log.warning("Used fallback datasource")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index bafca9d8..1b512a06 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -22,11 +22,11 @@ mountpoint in the filesystem or a path to the block device in ``/dev``.
The utility to use for resizing can be selected using the ``mode`` config key.
If ``mode`` key is set to ``auto``, then any available utility (either
-``growpart`` or ``gpart``) will be used. If neither utility is available, no
-error will be raised. If ``mode`` is set to ``growpart``, then the ``growpart``
-utility will be used. If this utility is not available on the system, this will
-result in an error. If ``mode`` is set to ``off`` or ``false``, then
-``cc_growpart`` will take no action.
+``growpart`` or BSD ``gpart``) will be used. If neither utility is available,
+no error will be raised. If ``mode`` is set to ``growpart``, then the
+``growpart`` utility will be used. If this utility is not available on the
+system, this will result in an error. If ``mode`` is set to ``off`` or
+``false``, then ``cc_growpart`` will take no action.
There is some functionality overlap between this module and the ``growroot``
functionality of ``cloud-initramfs-tools``. However, there are some situations
@@ -132,7 +132,7 @@ class ResizeGrowPart(object):
try:
(out, _err) = util.subp(["growpart", "--help"], env=myenv)
- if re.search(r"--update\s+", out, re.DOTALL):
+ if re.search(r"--update\s+", out):
return True
except util.ProcessExecutionError:
@@ -161,9 +161,17 @@ class ResizeGrowPart(object):
class ResizeGpart(object):
def available(self):
- if not util.which('gpart'):
- return False
- return True
+ myenv = os.environ.copy()
+ myenv['LANG'] = 'C'
+
+ try:
+ (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ if re.search(r"gpart recover ", err):
+ return True
+
+ except util.ProcessExecutionError:
+ pass
+ return False
def resize(self, diskdev, partnum, partdev):
"""
@@ -215,7 +223,8 @@ def device_part_info(devpath):
# FreeBSD doesn't know of sysfs so just get everything we need from
# the device, like /dev/vtbd0p2.
if util.is_FreeBSD():
- m = re.search('^(/dev/.+)p([0-9])$', devpath)
+ freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
+ m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
return (m.group(1), m.group(2))
if not os.path.exists(syspath):
@@ -320,7 +329,7 @@ def handle(_name, cfg, _cloud, log, _args):
mycfg = cfg.get('growpart')
if not isinstance(mycfg, dict):
- log.warn("'growpart' in config was not a dict")
+ log.warning("'growpart' in config was not a dict")
return
mode = mycfg.get('mode', "auto")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index aff4010e..3d2ded3d 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,10 +9,10 @@
"""
Keys to Console
---------------
-**Summary:** control which ssh keys may be written to console
+**Summary:** control which SSH keys may be written to console
-For security reasons it may be desirable not to write ssh fingerprints and keys
-to the console. To avoid the fingerprint of types of ssh keys being written to
+For security reasons it may be desirable not to write SSH fingerprints and keys
+to the console. To avoid the fingerprint of types of SSH keys being written to
console the ``ssh_fp_console_blacklist`` config key can be used. By default all
types of keys will have their fingerprints written to console. To avoid keys
of a key type being written to console the ``ssh_key_console_blacklist`` config
@@ -52,8 +52,8 @@ def _get_helper_tool_path(distro):
def handle(name, cfg, cloud, log, _args):
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
- log.warn(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
+ log.warning(("Unable to activate module %s,"
+ " helper tool not found at %s"), name, helper_path)
return
fp_blacklist = util.get_cfg_option_list(cfg,
@@ -68,7 +68,7 @@ def handle(name, cfg, cloud, log, _args):
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
- log.warn("Writing keys to the system console failed!")
+ log.warning("Writing keys to the system console failed!")
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index eaf1e940..a9c04d86 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -56,8 +56,7 @@ The following default client config is provided, but can be overridden::
"""
import os
-
-from six import BytesIO
+from io import BytesIO
from configobj import ConfigObj
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 24a8ebea..151a9844 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -66,21 +66,21 @@ def handle(name, cfg, cloud, log, args):
name)
return
if not isinstance(lxd_cfg, dict):
- log.warn("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
+ log.warning("lxd config must be a dictionary. found a '%s'",
+ type(lxd_cfg))
return
# Grab the configuration
init_cfg = lxd_cfg.get('init')
if not isinstance(init_cfg, dict):
- log.warn("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
+ log.warning("lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg))
init_cfg = {}
bridge_cfg = lxd_cfg.get('bridge', {})
if not isinstance(bridge_cfg, dict):
- log.warn("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
+ log.warning("lxd/bridge config must be a dictionary. found a '%s'",
+ type(bridge_cfg))
bridge_cfg = {}
# Install the needed packages
@@ -89,13 +89,13 @@ def handle(name, cfg, cloud, log, args):
packages.append('lxd')
if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
- packages.append('zfs')
+ packages.append('zfsutils-linux')
if len(packages):
try:
cloud.distro.install_packages(packages)
except util.ProcessExecutionError as exc:
- log.warn("failed to install packages %s: %s", packages, exc)
+ log.warning("failed to install packages %s: %s", packages, exc)
return
# Set up lxd if init config is given
@@ -152,7 +152,7 @@ def handle(name, cfg, cloud, log, args):
if cmd_attach:
log.debug("Setting up default lxd bridge: %s" %
- " ".join(cmd_create))
+ " ".join(cmd_attach))
_lxc(cmd_attach)
elif bridge_cfg:
@@ -301,5 +301,4 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
-
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index d5f63f5f..351183f1 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -49,9 +49,7 @@ private certificates for mcollective. Their values will be written to
"""
import errno
-
-import six
-from six import BytesIO
+import io
# Used since this can maintain comments
# and doesn't need a top level section
@@ -73,7 +71,7 @@ def configure(config, server_cfg=SERVER_CFG,
# original file in order to be able to mix the rest up.
try:
old_contents = util.load_file(server_cfg, quiet=False, decode=False)
- mcollective_config = ConfigObj(BytesIO(old_contents))
+ mcollective_config = ConfigObj(io.BytesIO(old_contents))
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -93,7 +91,7 @@ def configure(config, server_cfg=SERVER_CFG,
'plugin.ssl_server_private'] = pricert_file
mcollective_config['securityprovider'] = 'ssl'
else:
- if isinstance(cfg, six.string_types):
+ if isinstance(cfg, str):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
@@ -119,7 +117,7 @@ def configure(config, server_cfg=SERVER_CFG,
raise
# Now we got the whole (new) file, write to disk...
- contents = BytesIO()
+ contents = io.BytesIO()
mcollective_config.write(contents)
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 339baba9..4ae3f1fc 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -25,7 +25,7 @@ mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``).
The ``mount_default_fields`` config key allows default options to be specified
for the values in a ``mounts`` entry that are not specified, aside from the
-``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 7
+``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6
values. It defaults to::
mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"]
@@ -223,13 +223,58 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
return size
+def create_swapfile(fname, size):
+ """Size is in MiB."""
+
+ errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s"
+
+ def create_swap(fname, size, method):
+ LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
+ fname, fstype, method)
+
+ if method == "fallocate":
+ cmd = ['fallocate', '-l', '%dM' % size, fname]
+ elif method == "dd":
+ cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
+ 'count=%d' % size]
+
+ try:
+ util.subp(cmd, capture=True)
+ except util.ProcessExecutionError as e:
+ LOG.warning(errmsg, fname, size, method, e)
+ util.del_file(fname)
+
+ swap_dir = os.path.dirname(fname)
+ util.ensure_dir(swap_dir)
+
+ fstype = util.get_mount_info(swap_dir)[1]
+
+ if fstype in ("xfs", "btrfs"):
+ create_swap(fname, size, "dd")
+ else:
+ try:
+ create_swap(fname, size, "fallocate")
+ except util.ProcessExecutionError as e:
+ LOG.warning(errmsg, fname, size, "dd", e)
+ LOG.warning("Will attempt with dd.")
+ create_swap(fname, size, "dd")
+
+ util.chmod(fname, 0o600)
+ try:
+ util.subp(['mkswap', fname])
+ except util.ProcessExecutionError:
+ util.del_file(fname)
+ raise
+
+
def setup_swapfile(fname, size=None, maxsize=None):
"""
fname: full path string of filename to setup
size: the size to create. set to "auto" for recommended
maxsize: the maximum size
"""
- tdir = os.path.dirname(fname)
+ swap_dir = os.path.dirname(fname)
+ mibsize = str(int(size / (2 ** 20)))
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
@@ -237,28 +282,16 @@ def setup_swapfile(fname, size=None, maxsize=None):
LOG.debug("Not creating swap: failed to read meminfo")
return
- util.ensure_dir(tdir)
- size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
+ util.ensure_dir(swap_dir)
+ size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
memsize=memsize)
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
- mbsize = str(int(size / (2 ** 20)))
- msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
- try:
- util.ensure_dir(tdir)
- util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
-
- except Exception as e:
- raise IOError("Failed %s: %s" % (msg, e))
+ util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
+ args=[fname, mibsize])
return fname
@@ -347,8 +380,8 @@ def handle(_name, cfg, cloud, log, _args):
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
- log.warn("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
+ log.warning("Mount option %s not a list, got a %s instead",
+ (i + 1), type_utils.obj_name(cfgmnt[i]))
continue
start = str(cfgmnt[i][0])
@@ -439,6 +472,7 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines = []
needswap = False
+ need_mount_all = False
dirs = []
for line in actlist:
# write 'comment' in the fs_mntops, entry, claiming this
@@ -449,11 +483,18 @@ def handle(_name, cfg, cloud, log, _args):
dirs.append(line[1])
cc_lines.append('\t'.join(line))
+ mount_points = [v['mountpoint'] for k, v in util.mounts().items()
+ if 'mountpoint' in v]
for d in dirs:
try:
util.ensure_dir(d)
except Exception:
util.logexc(log, "Failed to make '%s' config-mount", d)
+ # dirs is list of directories on which a volume should be mounted.
+ # If any of them does not already show up in the list of current
+ # mount points, we will definitely need to do mount -a.
+ if not need_mount_all and d not in mount_points:
+ need_mount_all = True
sadds = [WS.sub(" ", n) for n in cc_lines]
sdrops = [WS.sub(" ", n) for n in fstab_removed]
@@ -473,6 +514,9 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("No changes to /etc/fstab made.")
else:
log.debug("Changes to fstab: %s", sops)
+ need_mount_all = True
+
+ if need_mount_all:
activate_cmds.append(["mount", "-a"])
if uses_systemd:
activate_cmds.append(["systemctl", "daemon-reload"])
@@ -484,7 +528,7 @@ def handle(_name, cfg, cloud, log, _args):
util.subp(cmd)
log.debug(fmt, "PASS")
except util.ProcessExecutionError:
- log.warn(fmt, "FAIL")
+ log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 9e074bda..5498bbaa 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -6,19 +6,17 @@
"""NTP: enable and configure ntp"""
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+import copy
+import os
+from textwrap import dedent
+
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
-
-import copy
-import os
-import six
-from textwrap import dedent
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -460,7 +458,7 @@ def supplemental_schema_validation(ntp_config):
for key, value in sorted(ntp_config.items()):
keypath = 'ntp:config:' + key
if key == 'confpath':
- if not all([value, isinstance(value, six.string_types)]):
+ if not all([value, isinstance(value, str)]):
errors.append(
'Expected a config file path {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
@@ -472,11 +470,11 @@ def supplemental_schema_validation(ntp_config):
elif key in ('template', 'template_name'):
if value is None: # Either template or template_name can be none
continue
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
errors.append(
'Expected a string type for {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
- elif not isinstance(value, six.string_types):
+ elif not isinstance(value, str):
errors.append(
'Expected a string type for {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 17b91011..86afffef 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -108,7 +108,8 @@ def handle(_name, cfg, cloud, log, _args):
reboot_fn_exists = os.path.isfile(REBOOT_FILE)
if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
try:
- log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE)
+ log.warning("Rebooting after upgrade or install per "
+ "%s", REBOOT_FILE)
# Flush the above warning + anything else out...
logging.flushLoggers(log)
_fire_reboot(log)
@@ -117,8 +118,8 @@ def handle(_name, cfg, cloud, log, _args):
errors.append(e)
if len(errors):
- log.warn("%s failed with exceptions, re-raising the last one",
- len(errors))
+ log.warning("%s failed with exceptions, re-raising the last one",
+ len(errors))
raise errors[-1]
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 3be0d1c1..b8e27090 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -79,8 +79,8 @@ def handle(name, cfg, cloud, log, args):
ph_cfg = cfg['phone_home']
if 'url' not in ph_cfg:
- log.warn(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
+ log.warning(("Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration"), name)
return
url = ph_cfg['url']
@@ -91,7 +91,7 @@ def handle(name, cfg, cloud, log, args):
except Exception:
tries = 10
util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
+ "using %s instead", tries)
if post_list == "all":
post_list = POST_LIST_ALL
@@ -112,7 +112,7 @@ def handle(name, cfg, cloud, log, args):
all_keys[n] = util.load_file(path)
except Exception:
util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
+ "data!", path)
submit_keys = {}
for k in post_list:
@@ -120,8 +120,8 @@ def handle(name, cfg, cloud, log, args):
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
- log.warn(("Requested key %s from 'post'"
- " configuration list not available"), k)
+ log.warning(("Requested key %s from 'post'"
+ " configuration list not available"), k)
# Get them read to be posted
real_submit_keys = {}
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 50b37470..3e81a3c7 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -49,16 +49,15 @@ key returns 0.
condition: <true/false/command>
"""
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
import errno
import os
import re
-import six
import subprocess
import time
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
frequency = PER_INSTANCE
EXIT_FAIL = 254
@@ -103,24 +102,23 @@ def check_condition(cond, log=None):
return False
else:
if log:
- log.warn(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
+ log.warning(pre + "unexpected exit %s. " % ret +
+ "do not apply change.")
return False
except Exception as e:
if log:
- log.warn(pre + "Unexpected error: %s" % e)
+ log.warning(pre + "Unexpected error: %s" % e)
return False
def handle(_name, cfg, _cloud, log, _args):
-
try:
(args, timeout, condition) = load_power_state(cfg)
if args is None:
log.debug("no power_state provided. doing nothing")
return
except Exception as e:
- log.warn("%s Not performing power state change!" % str(e))
+ log.warning("%s Not performing power state change!" % str(e))
return
if condition is False:
@@ -131,7 +129,7 @@ def handle(_name, cfg, _cloud, log, _args):
cmdline = givecmdline(mypid)
if not cmdline:
- log.warn("power_state: failed to get cmdline of current process")
+ log.warning("power_state: failed to get cmdline of current process")
return
devnull_fp = open(os.devnull, "w")
@@ -184,7 +182,7 @@ def load_power_state(cfg):
pstate['timeout'])
condition = pstate.get("condition", True)
- if not isinstance(condition, six.string_types + (list, bool)):
+ if not isinstance(condition, (str, list, bool)):
raise TypeError("condition type %s invalid. must be list, bool, str")
return (args, timeout, condition)
@@ -214,7 +212,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
def fatal(msg):
if log:
- log.warn(msg)
+ log.warning(msg)
doexit(EXIT_FAIL)
known_errnos = (errno.ENOENT, errno.ESRCH)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 4190a20b..c01f5b8f 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -24,9 +24,10 @@ module will attempt to start puppet even if no installation was performed.
The module also provides keys for configuring the new puppet 4 paths and
installing the puppet package from the puppetlabs repositories:
https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html
-The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their
-values will default to ones that work with puppet 3.x and with distributions
-that ship modified puppet 4.x that uses the old paths.
+The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
+``csr_attributes_path``. If unset, their values will default to
+ones that work with puppet 3.x and with distributions that ship modified
+puppet 4.x that uses the old paths.
Puppet configuration can be specified under the ``conf`` key. The
configuration is specified as a dictionary containing high-level ``<section>``
@@ -40,6 +41,10 @@ If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
instead will be used as the puppermaster certificate. It should be specified
in pem format as a multi-line string (using the ``|`` yaml notation).
+Additionally it's possible to create a csr_attributes.yaml for
+CSR attributes and certificate extension requests.
+See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
+
**Internal name:** ``cc_puppet``
**Module frequency:** per instance
@@ -53,6 +58,7 @@ in pem format as a multi-line string (using the ``|`` yaml notation).
version: <version>
conf_file: '/etc/puppet/puppet.conf'
ssl_dir: '/var/lib/puppet/ssl'
+ csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
package_name: 'puppet'
conf:
agent:
@@ -62,28 +68,38 @@ in pem format as a multi-line string (using the ``|`` yaml notation).
-------BEGIN CERTIFICATE-------
<cert data>
-------END CERTIFICATE-------
+ csr_attributes:
+ custom_attributes:
+ 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
+ extension_requests:
+ pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
+ pp_image_name: my_ami_image
+ pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
"""
-from six import StringIO
-
import os
import socket
+import yaml
+from io import StringIO
from cloudinit import helpers
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
+PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml'
PUPPET_PACKAGE_NAME = 'puppet'
class PuppetConstants(object):
- def __init__(self, puppet_conf_file, puppet_ssl_dir, log):
+ def __init__(self, puppet_conf_file, puppet_ssl_dir,
+ csr_attributes_path, log):
self.conf_path = puppet_conf_file
self.ssl_dir = puppet_ssl_dir
self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs")
self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem")
+ self.csr_attributes_path = csr_attributes_path
def _autostart_puppet(log):
@@ -98,8 +114,8 @@ def _autostart_puppet(log):
elif os.path.exists('/sbin/chkconfig'):
util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
- log.warn(("Sorry we do not know how to enable"
- " puppet services on this system"))
+ log.warning(("Sorry we do not know how to enable"
+ " puppet services on this system"))
def handle(name, cfg, cloud, log, _args):
@@ -118,11 +134,13 @@ def handle(name, cfg, cloud, log, _args):
conf_file = util.get_cfg_option_str(
puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)
+ csr_attributes_path = util.get_cfg_option_str(
+ puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH)
- p_constants = PuppetConstants(conf_file, ssl_dir, log)
+ p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
if not install and version:
- log.warn(("Puppet install set false but version supplied,"
- " doing nothing."))
+ log.warning(("Puppet install set false but version supplied,"
+ " doing nothing."))
elif install:
log.debug(("Attempting to install puppet %s,"),
version if version else 'latest')
@@ -141,7 +159,7 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
- puppet_config.readfp( # pylint: disable=W1505
+ puppet_config.readfp( # pylint: disable=W1505
StringIO(cleaned_contents),
filename=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
@@ -176,6 +194,11 @@ def handle(name, cfg, cloud, log, _args):
% (p_constants.conf_path))
util.write_file(p_constants.conf_path, puppet_config.stringify())
+ if 'csr_attributes' in puppet_cfg:
+ util.write_file(p_constants.csr_attributes_path,
+ yaml.dump(puppet_cfg['csr_attributes'],
+ default_flow_style=False))
+
# Set it up so it autostarts
_autostart_puppet(log)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 076b9d5a..01dfc125 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -8,7 +8,6 @@
"""Resizefs: cloud-config module which resizes the filesystem"""
-
import errno
import getopt
import os
@@ -81,7 +80,7 @@ def _resize_xfs(mount_point, devpth):
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', devpth)
+ return ('growfs', '-y', mount_point)
def _resize_zfs(mount_point, devpth):
@@ -101,7 +100,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
"""
# dumpfs -m /
# newfs command for / (/dev/label/rootfs)
- newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384
+ newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384
-h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf
"""
cur_fs_sz = None
@@ -110,7 +109,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
for line in dumpfs_res.splitlines():
if not line.startswith('#'):
newfs_cmd = shlex.split(line)
- opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'
+ opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:'
optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
for o, a in optlist:
if o == "-s":
@@ -183,7 +182,7 @@ def maybe_get_writable_device_path(devpath, info, log):
not container):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
- log.warn("Unable to find device '/dev/root'")
+ log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
@@ -212,8 +211,8 @@ def maybe_get_writable_device_path(devpath, info, log):
log.debug("Device '%s' did not exist in container. "
"cannot resize: %s", devpath, info)
elif exc.errno == errno.ENOENT:
- log.warn("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning("Device '%s' did not exist. cannot resize: %s",
+ devpath, info)
else:
raise exc
return None
@@ -223,8 +222,8 @@ def maybe_get_writable_device_path(devpath, info, log):
log.debug("device '%s' not a block device in container."
" cannot resize: %s" % (devpath, info))
else:
- log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning("device '%s' not a block device. cannot resize: %s" %
+ (devpath, info))
return None
return devpath # The writable block devpath
@@ -243,7 +242,7 @@ def handle(name, cfg, _cloud, log, args):
resize_what = "/"
result = util.get_mount_info(resize_what, log)
if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
+ log.warning("Could not determine filesystem type of %s", resize_what)
return
(devpth, fs_type, mount_point) = result
@@ -280,8 +279,8 @@ def handle(name, cfg, _cloud, log, args):
break
if not resizer:
- log.warn("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
+ log.warning("Not resizing unknown filesystem type %s for %s",
+ fs_type, resize_what)
return
resize_cmd = resizer(resize_what, devpth)
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 9812562a..69f4768a 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -102,11 +102,11 @@ def handle(name, cfg, cloud, log, _args):
return
if "resolv_conf" not in cfg:
- log.warn("manage_resolv_conf True but no parameters provided!")
+ log.warning("manage_resolv_conf True but no parameters provided!")
template_fn = cloud.get_template_filename('resolv.conf')
if not template_fn:
- log.warn("No template found, not rendering /etc/resolv.conf")
+ log.warning("No template found, not rendering /etc/resolv.conf")
return
generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index edee01e5..28c79b83 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -249,14 +249,14 @@ class SubscriptionManager(object):
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
- if line is not '':
+ if line != '':
self.log_warn(line)
else:
self.log_warn("Setting the service level failed with: "
"{0}".format(e.stderr.strip()))
return False
for line in return_out.split("\n"):
- if line is not "":
+ if line != "":
self.log.debug(line)
return True
@@ -268,7 +268,7 @@ class SubscriptionManager(object):
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
- if line is not "":
+ if line != "":
self.log.debug(line)
return True
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 4e34c7e9..a5aca038 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -50,13 +50,12 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
#
import os
+from urllib.parse import parse_qs
from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from six.moves.urllib_parse import parse_qs
-
frequency = PER_INSTANCE
MY_NAME = "cc_rightscale_userdata"
@@ -111,8 +110,8 @@ def handle(name, _cfg, cloud, log, _args):
log.debug("%s urls were skipped or failed", skipped)
if captured_excps:
- log.warn("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
+ log.warning("%s failed with exceptions, re-raising the last one",
+ len(captured_excps))
raise captured_excps[-1]
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 27d2366c..5df0137d 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -180,7 +180,6 @@ config entries. Legacy to new mappings are as follows:
import os
import re
-import six
from cloudinit import log as logging
from cloudinit import util
@@ -203,7 +202,7 @@ LOG = logging.getLogger(__name__)
COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
HOST_PORT_RE = re.compile(
r'^(?P<proto>[@]{0,2})'
- r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+ r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
r'([:](?P<port>[0-9]+))?$')
@@ -233,9 +232,9 @@ def load_config(cfg):
fillup = (
(KEYNAME_CONFIGS, [], list),
- (KEYNAME_DIR, DEF_DIR, six.string_types),
- (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
- (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
+ (KEYNAME_DIR, DEF_DIR, str),
+ (KEYNAME_FILENAME, DEF_FILENAME, str),
+ (KEYNAME_RELOAD, DEF_RELOAD, (str, list)),
(KEYNAME_REMOTES, DEF_REMOTES, dict))
for key, default, vtypes in fillup:
@@ -432,7 +431,7 @@ def handle(name, cfg, cloud, log, _args):
systemd=cloud.distro.uses_systemd()),
except util.ProcessExecutionError as e:
restarted = False
- log.warn("Failed to reload syslog", e)
+ log.warning("Failed to reload syslog", e)
if restarted:
# This only needs to run if we *actually* restarted
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index d6a21d72..5dd8de37 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -45,7 +45,9 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
-from cloudinit import util
+from cloudinit import safeyaml, util
+from cloudinit.distros import rhel_util
+
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -59,7 +61,7 @@ class SaltConstants(object):
# constants tailored for FreeBSD
if util.is_FreeBSD():
- self.pkg_name = 'py27-salt'
+ self.pkg_name = 'py36-salt'
self.srv_name = 'salt_minion'
self.conf_dir = '/usr/local/etc/salt'
# constants for any other OS
@@ -97,13 +99,13 @@ def handle(name, cfg, cloud, log, _args):
if 'conf' in s_cfg:
# Add all sections from the conf object to minion config file
minion_config = os.path.join(const.conf_dir, 'minion')
- minion_data = util.yaml_dumps(s_cfg.get('conf'))
+ minion_data = safeyaml.dumps(s_cfg.get('conf'))
util.write_file(minion_config, minion_data)
if 'grains' in s_cfg:
# add grains to /etc/salt/grains
grains_config = os.path.join(const.conf_dir, 'grains')
- grains_data = util.yaml_dumps(s_cfg.get('grains'))
+ grains_data = safeyaml.dumps(s_cfg.get('grains'))
util.write_file(grains_config, grains_data)
# ... copy the key pair if specified
@@ -123,7 +125,8 @@ def handle(name, cfg, cloud, log, _args):
# we need to have the salt minion service enabled in rc in order to be
# able to start the service. this does only apply on FreeBSD servers.
if cloud.distro.osfamily == 'freebsd':
- cloud.distro.updatercconf('salt_minion_enable', 'YES')
+ rhel_util.update_sysconfig_file(
+ '/etc/rc.conf', {'salt_minion_enable': 'YES'})
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index b03255c7..588e1b03 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -40,8 +40,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index baee5cc4..75549b52 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -15,6 +15,9 @@ Any scripts in the ``scripts/per-instance`` directory on the datasource will
be run when a new instance is first booted. Scripts will be run in alphabetical
order. This module does not accept any config keys.
+Some cloud platforms change instance-id if a significant change was made to
+the system. As a result per-instance scripts will run again.
+
**Internal name:** ``cc_scripts_per_instance``
**Module frequency:** per instance
@@ -40,8 +43,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 4943e9aa..259bdfab 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -12,8 +12,9 @@ Scripts Per Once
**Summary:** run one time scripts
Any scripts in the ``scripts/per-once`` directory on the datasource will be run
-only once. Scripts will be run in alphabetical order. This module does not
-accept any config keys.
+only once. Changes to the instance will not force a re-run. The only way to
+re-run these scripts is to run the clean subcommand and reboot. Scripts will
+be run in alphabetical order. This module does not accept any config keys.
**Internal name:** ``cc_scripts_per_once``
@@ -40,8 +41,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 6c66481e..d940dbd6 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -44,8 +44,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 0292eafb..faac9242 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -48,8 +48,8 @@ def handle(name, cfg, cloud, log, _args):
try:
util.runparts(runparts_path, exe_prefix=prefix)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 65f6e777..b65f3ed9 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -61,8 +61,7 @@ used::
import base64
import os
-
-from six import BytesIO
+from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
@@ -131,7 +130,7 @@ def handle(name, cfg, cloud, log, _args):
env['RANDOM_SEED_FILE'] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
- log.warn("handling random command [%s] failed: %s", command, e)
+ log.warning("handling random command [%s] failed: %s", command, e)
raise e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 3d2b2da3..10d6d197 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -21,7 +21,17 @@ key, and the fqdn of the cloud wil be used. If a fqdn specified with the
the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
will be used.
-**Internal name:** per instance
+This module will run in the init-local stage before networking is configured
+if the hostname is set by metadata or user data on the local system.
+
+This will occur on datasources like nocloud and ovf where metadata and user
+data are available locally. This ensures that the desired hostname is applied
+before any DHCP requests are preformed on these platforms where dynamic DNS is
+based on initial hostname.
+
+**Internal name:** ``cc_set_hostname``
+
+**Module frequency:** per always
**Supported distros:** all
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 5ef97376..4943d545 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -9,27 +9,40 @@
"""
Set Passwords
-------------
-**Summary:** Set user passwords
-
-Set system passwords and enable or disable ssh password authentication.
-The ``chpasswd`` config key accepts a dictionary containing a single one of two
-keys, either ``expire`` or ``list``. If ``expire`` is specified and is set to
-``false``, then the ``password`` global config key is used as the password for
-all user accounts. If the ``expire`` key is specified and is set to ``true``
-then user passwords will be expired, preventing the default system passwords
-from being used.
-
-If the ``list`` key is provided, a list of
-``username:password`` pairs can be specified. The usernames specified
-must already exist on the system, or have been created using the
-``cc_users_groups`` module. A password can be randomly generated using
-``username:RANDOM`` or ``username:R``. A hashed password can be specified
-using ``username:$6$salt$hash``. Password ssh authentication can be
-enabled, disabled, or left to system defaults using ``ssh_pwauth``.
+**Summary:** Set user passwords and enable/disable SSH password authentication
+
+This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd``
+and ``password``.
+
+The ``ssh_pwauth`` config key determines whether or not sshd will be configured
+to accept password authentication. True values will enable password auth,
+false values will disable password auth, and the literal string ``unchanged``
+will leave it unchanged. Setting no value will also leave the current setting
+on-disk unchanged.
+
+The ``chpasswd`` config key accepts a dictionary containing either or both of
+``expire`` and ``list``.
+
+If the ``list`` key is provided, it should contain a list of
+``username:password`` pairs. This can be either a YAML list (of strings), or a
+multi-line string with one pair per line. Each user will have the
+corresponding password set. A password can be randomly generated by specifying
+``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool
+like ``mkpasswd``, can be specified; a regex
+(``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value
+should be treated as a hash.
.. note::
- if using ``expire: true`` then a ssh authkey should be specified or it may
- not be possible to login to the system
+ The users specified must already exist on the system. Users will have been
+ created by the ``cc_users_groups`` module at this point.
+
+By default, all users on the system will have their passwords expired (meaning
+that they will have to be reset the next time the user logs in). To disable
+this behaviour, set ``expire`` under ``chpasswd`` to a false value.
+
+If a ``list`` of user/password pairs is not specified under ``chpasswd``, then
+the value of the ``password`` config key will be used to set the default user's
+password.
**Internal name:** ``cc_set_passwords``
@@ -99,7 +112,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
elif util.is_false(pw_auth):
cfg_val = 'no'
else:
- bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
+ bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
if pw_auth is None or pw_auth.lower() == 'unchanged':
LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
else:
@@ -108,7 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
updated = update_ssh_config({cfg_name: cfg_val})
if not updated:
- LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
+ LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
if 'systemctl' in service_cmd:
@@ -116,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
else:
cmd = list(service_cmd) + [service_name, "restart"]
util.subp(cmd)
- LOG.debug("Restarted the ssh daemon.")
+ LOG.debug("Restarted the SSH daemon.")
def handle(_name, cfg, cloud, log, args):
@@ -151,7 +164,7 @@ def handle(_name, cfg, cloud, log, args):
if user:
plist = ["%s:%s" % (user, password)]
else:
- log.warn("No default or defined user to change password for.")
+ log.warning("No default or defined user to change password for.")
errors = []
if plist:
@@ -160,24 +173,27 @@ def handle(_name, cfg, cloud, log, args):
hashed_users = []
randlist = []
users = []
- prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}')
+ # N.B. This regex is included in the documentation (i.e. the module
+ # docstring), so any changes to it should be reflected there.
+ prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}')
for line in plist:
u, p = line.split(':', 1)
if prog.match(p) is not None and ":" not in p:
- hashed_plist_in.append("%s:%s" % (u, p))
+ hashed_plist_in.append(line)
hashed_users.append(u)
else:
+ # in this else branch, we potentially change the password
+ # hence, a deviation from .append(line)
if p == "R" or p == "RANDOM":
p = rand_user_password()
randlist.append("%s:%s" % (u, p))
plist_in.append("%s:%s" % (u, p))
users.append(u)
-
ch_in = '\n'.join(plist_in) + '\n'
if users:
try:
log.debug("Changing password for %s:", users)
- util.subp(['chpasswd'], ch_in)
+ chpasswd(cloud.distro, ch_in)
except Exception as e:
errors.append(e)
util.logexc(
@@ -187,7 +203,7 @@ def handle(_name, cfg, cloud, log, args):
if hashed_users:
try:
log.debug("Setting hashed password for %s:", hashed_users)
- util.subp(['chpasswd', '-e'], hashed_ch_in)
+ chpasswd(cloud.distro, hashed_ch_in, hashed=True)
except Exception as e:
errors.append(e)
util.logexc(
@@ -203,7 +219,7 @@ def handle(_name, cfg, cloud, log, args):
expired_users = []
for u in users:
try:
- util.subp(['passwd', '--expire', u])
+ cloud.distro.expire_passwd(u)
expired_users.append(u)
except Exception as e:
errors.append(e)
@@ -220,7 +236,17 @@ def handle(_name, cfg, cloud, log, args):
raise errors[-1]
-def rand_user_password(pwlen=9):
+def rand_user_password(pwlen=20):
return util.rand_str(pwlen, select_from=PW_SET)
+
+def chpasswd(distro, plist_in, hashed=False):
+ if util.is_FreeBSD():
+ for pentry in plist_in.splitlines():
+ u, p = pentry.split(":")
+ distro.set_passwd(u, p, hashed=hashed)
+ else:
+ cmd = ['chpasswd'] + (['-e'] if hashed else [])
+ util.subp(cmd, plist_in)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
deleted file mode 100644
index afe297ee..00000000
--- a/cloudinit/config/cc_snap_config.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# RELEASE_BLOCKER: Remove this deprecated module in 18.3
-"""
-Snap Config
------------
-**Summary:** snap_config modules allows configuration of snapd.
-
-**Deprecated**: Use :ref:`snap` module instead. This module will not exist
-in cloud-init 18.3.
-
-This module uses the same ``snappy`` namespace for configuration but
-acts only only a subset of the configuration.
-
-If ``assertions`` is set and the user has included a list of assertions
-then cloud-init will collect the assertions into a single assertion file
-and invoke ``snap ack <path to file with assertions>`` which will attempt
-to load the provided assertions into the snapd assertion database.
-
-If ``email`` is set, this value is used to create an authorized user for
-contacting and installing snaps from the Ubuntu Store. This is done by
-calling ``snap create-user`` command.
-
-If ``known`` is set to True, then it is expected the user also included
-an assertion of type ``system-user``. When ``snap create-user`` is called
-cloud-init will append '--known' flag which instructs snapd to look for
-a system-user assertion with the details. If ``known`` is not set, then
-``snap create-user`` will contact the Ubuntu SSO for validating and importing
-a system-user for the instance.
-
-.. note::
- If the system is already managed, then cloud-init will not attempt to
- create a system-user.
-
-**Internal name:** ``cc_snap_config``
-
-**Module frequency:** per instance
-
-**Supported distros:** any with 'snapd' available
-
-**Config keys**::
-
- #cloud-config
- snappy:
- assertions:
- - |
- <assertion 1>
- - |
- <assertion 2>
- email: user@user.org
- known: true
-
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snap"
-ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
-
-
-"""
-snappy:
- assertions:
- - |
- <snap assertion 1>
- - |
- <snap assertion 2>
- email: foo@foo.io
- known: true
-"""
-
-
-def add_assertions(assertions=None):
- """Import list of assertions.
-
- Import assertions by concatenating each assertion into a
- string separated by a '\n'. Write this string to a instance file and
- then invoke `snap ack /path/to/file` and check for errors.
- If snap exits 0, then all assertions are imported.
- """
- if not assertions:
- assertions = []
-
- if not isinstance(assertions, list):
- raise ValueError(
- 'assertion parameter was not a list: {assertions}'.format(
- assertions=assertions))
-
- snap_cmd = [SNAPPY_CMD, 'ack']
- combined = "\n".join(assertions)
- if len(combined) == 0:
- raise ValueError("Assertion list is empty")
-
- for asrt in assertions:
- LOG.debug('Acking: %s', asrt.split('\n')[0:2])
-
- util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
-
-
-def add_snap_user(cfg=None):
- """Add a snap system-user if provided with email under snappy config.
-
- - Check that system is not already managed.
- - Check that if using a system-user assertion, that it's
- imported into snapd.
-
- Returns a dictionary to be passed to Distro.create_user
- """
-
- if not cfg:
- cfg = {}
-
- if not isinstance(cfg, dict):
- raise ValueError(
- 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg))
-
- snapuser = cfg.get('email', None)
- if not snapuser:
- return
-
- usercfg = {
- 'snapuser': snapuser,
- 'known': cfg.get('known', False),
- }
-
- # query if we're already registered
- out, _ = util.subp([SNAPPY_CMD, 'managed'], capture=True)
- if out.strip() == "true":
- LOG.warning('This device is already managed. '
- 'Skipping system-user creation')
- return
-
- if usercfg.get('known'):
- # Check that we imported a system-user assertion
- out, _ = util.subp([SNAPPY_CMD, 'known', 'system-user'],
- capture=True)
- if len(out) == 0:
- LOG.error('Missing "system-user" assertion. '
- 'Check "snappy" user-data assertions.')
- return
-
- return usercfg
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- LOG.debug('No snappy config provided, skipping')
- return
-
- log.warning(
- 'DEPRECATION: snap_config module will be dropped in 18.3 release.'
- ' Use snap module instead')
- if not(util.system_is_snappy()):
- LOG.debug("%s: system not snappy", name)
- return
-
- assertions = cfgin.get('assertions', [])
- if len(assertions) > 0:
- LOG.debug('Importing user-provided snap assertions')
- add_assertions(assertions)
-
- # Create a snap user if requested.
- # Snap systems contact the store with a user's email
- # and extract information needed to create a local user.
- # A user may provide a 'system-user' assertion which includes
- # the required information. Using such an assertion to create
- # a local user requires specifying 'known: true' in the supplied
- # user-data.
- usercfg = add_snap_user(cfg=cfgin)
- if usercfg:
- cloud.distro.create_user(usercfg.get('snapuser'), **usercfg)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
deleted file mode 100644
index 15bee2d3..00000000
--- a/cloudinit/config/cc_snappy.py
+++ /dev/null
@@ -1,321 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# RELEASE_BLOCKER: Remove this deprecated module in 18.3
-"""
-Snappy
-------
-**Summary:** snappy modules allows configuration of snappy.
-
-**Deprecated**: Use :ref:`snap` module instead. This module will not exist
-in cloud-init 18.3.
-
-The below example config config would install ``etcd``, and then install
-``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has
-``config-blob`` inside it. If ``pkgname`` is installed already, then
-``snappy config pkgname <file>``
-will be called where ``file`` has ``pkgname-config-blob`` as its content.
-
-Entries in ``config`` can be namespaced or non-namespaced for a package.
-In either case, the config provided to snappy command is non-namespaced.
-The package name is provided as it appears.
-
-If ``packages_dir`` has files in it that end in ``.snap``, then they are
-installed. Given 3 files:
-
- - <packages_dir>/foo.snap
- - <packages_dir>/foo.config
- - <packages_dir>/bar.snap
-
-cloud-init will invoke:
-
- - snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- - snappy install <packages_dir>/bar.snap
-
-.. note::
- that if provided a ``config`` entry for ``ubuntu-core``, then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
-
-The ``ssh_enabled`` key controls the system's ssh service. The default value
-is ``auto``. Options are:
-
- - **True:** enable ssh service
- - **False:** disable ssh service
- - **auto:** enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
-**Internal name:** ``cc_snappy``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import temp_utils
-from cloudinit import util
-
-import glob
-import os
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snappy"
-NAMESPACE_DELIM = '.'
-
-BUILTIN_CFG = {
- 'packages': [],
- 'packages_dir': '/writable/user-data/cloud-init/snaps',
- 'ssh_enabled': "auto",
- 'system_snappy': "auto",
- 'config': {},
-}
-
-distros = ['ubuntu']
-
-
-def parse_filename(fname):
- fname = os.path.basename(fname)
- fname_noext = fname.rpartition(".")[0]
- name = fname_noext.partition("_")[0]
- shortname = name.partition(".")[0]
- return(name, shortname, fname_noext)
-
-
-def get_fs_package_ops(fspath):
- if not fspath:
- return []
- ops = []
- for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
- (name, shortname, fname_noext) = parse_filename(snapfile)
- cfg = None
- for cand in (fname_noext, name, shortname):
- fpcand = os.path.sep.join([fspath, cand]) + ".config"
- if os.path.isfile(fpcand):
- cfg = fpcand
- break
- ops.append(makeop('install', name, config=None,
- path=snapfile, cfgfile=cfg))
- return ops
-
-
-def makeop(op, name, config=None, path=None, cfgfile=None):
- return({'op': op, 'name': name, 'config': config, 'path': path,
- 'cfgfile': cfgfile})
-
-
-def get_package_config(configs, name):
- # load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
- # over short name entry (config-example)
- if name in configs:
- return configs[name]
- return configs.get(name.partition(NAMESPACE_DELIM)[0])
-
-
-def get_package_ops(packages, configs, installed=None, fspath=None):
- # get the install an config operations that should be done
- if installed is None:
- installed = read_installed_packages()
- short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
-
- if not packages:
- packages = []
- if not configs:
- configs = {}
-
- ops = []
- ops += get_fs_package_ops(fspath)
-
- for name in packages:
- ops.append(makeop('install', name, get_package_config(configs, name)))
-
- to_install = [f['name'] for f in ops]
- short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
-
- for name in configs:
- if name in to_install:
- continue
- shortname = name.partition(NAMESPACE_DELIM)[0]
- if shortname in short_to_install:
- continue
- if name in installed or shortname in short_installed:
- ops.append(makeop('config', name,
- config=get_package_config(configs, name)))
-
- # prefer config entries to filepath entries
- for op in ops:
- if op['op'] != 'install' or not op['cfgfile']:
- continue
- name = op['name']
- fromcfg = get_package_config(configs, op['name'])
- if fromcfg:
- LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
- op['cfgfile'] = None
- op['config'] = fromcfg
-
- return ops
-
-
-def render_snap_op(op, name, path=None, cfgfile=None, config=None):
- if op not in ('install', 'config'):
- raise ValueError("cannot render op '%s'" % op)
-
- shortname = name.partition(NAMESPACE_DELIM)[0]
- try:
- cfg_tmpf = None
- if config is not None:
- # input to 'snappy config packagename' must have nested data. odd.
- # config:
- # packagename:
- # config
- # Note, however, we do not touch config files on disk.
- nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = temp_utils.mkstemp()
- os.write(fd, util.yaml_dumps(nested_cfg).encode())
- os.close(fd)
- cfgfile = cfg_tmpf
-
- cmd = [SNAPPY_CMD, op]
- if op == 'install':
- if path:
- cmd.append("--allow-unauthenticated")
- cmd.append(path)
- else:
- cmd.append(name)
- if cfgfile:
- cmd.append(cfgfile)
- elif op == 'config':
- cmd += [name, cfgfile]
-
- util.subp(cmd)
-
- finally:
- if cfg_tmpf:
- os.unlink(cfg_tmpf)
-
-
-def read_installed_packages():
- ret = []
- for (name, _date, _version, dev) in read_pkg_data():
- if dev:
- ret.append(NAMESPACE_DELIM.join([name, dev]))
- else:
- ret.append(name)
- return ret
-
-
-def read_pkg_data():
- out, _err = util.subp([SNAPPY_CMD, "list"])
- pkg_data = []
- for line in out.splitlines()[1:]:
- toks = line.split(sep=None, maxsplit=3)
- if len(toks) == 3:
- (name, date, version) = toks
- dev = None
- else:
- (name, date, version, dev) = toks
- pkg_data.append((name, date, version, dev,))
- return pkg_data
-
-
-def disable_enable_ssh(enabled):
- LOG.debug("setting enablement of ssh to: %s", enabled)
- # do something here that would enable or disable
- not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
- if enabled:
- util.del_file(not_to_be_run)
- # this is an indempotent operation
- util.subp(["systemctl", "start", "ssh"])
- else:
- # this is an indempotent operation
- util.subp(["systemctl", "stop", "ssh"])
- util.write_file(not_to_be_run, "cloud-init\n")
-
-
-def set_snappy_command():
- global SNAPPY_CMD
- if util.which("snappy-go"):
- SNAPPY_CMD = "snappy-go"
- elif util.which("snappy"):
- SNAPPY_CMD = "snappy"
- else:
- SNAPPY_CMD = "snap"
- LOG.debug("snappy command is '%s'", SNAPPY_CMD)
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- sys_snappy = str(mycfg.get("system_snappy", "auto"))
- if util.is_false(sys_snappy):
- LOG.debug("%s: System is not snappy. disabling", name)
- return
-
- if sys_snappy.lower() == "auto" and not(util.system_is_snappy()):
- LOG.debug("%s: 'auto' mode, and system not snappy", name)
- return
-
- log.warning(
- 'DEPRECATION: snappy module will be dropped in 18.3 release.'
- ' Use snap module instead')
-
- set_snappy_command()
-
- pkg_ops = get_package_ops(packages=mycfg['packages'],
- configs=mycfg['config'],
- fspath=mycfg['packages_dir'])
-
- fails = []
- for pkg_op in pkg_ops:
- try:
- render_snap_op(**pkg_op)
- except Exception as e:
- fails.append((pkg_op, e,))
- LOG.warning("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
-
- # Default to disabling SSH
- ssh_enabled = mycfg.get('ssh_enabled', "auto")
-
- # If the user has not explicitly enabled or disabled SSH, then enable it
- # when password SSH authentication is requested or there are SSH keys
- if ssh_enabled == "auto":
- user_ssh_keys = cloud.get_public_ssh_keys() or None
- password_auth_enabled = cfg.get('ssh_pwauth', False)
- if user_ssh_keys:
- LOG.debug("Enabling SSH, ssh keys found in datasource")
- ssh_enabled = True
- elif cfg.get('ssh_authorized_keys'):
- LOG.debug("Enabling SSH, ssh keys found in config")
- elif password_auth_enabled:
- LOG.debug("Enabling SSH, password authentication requested")
- ssh_enabled = True
- elif ssh_enabled not in (True, False):
- LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled)
-
- disable_enable_ssh(ssh_enabled)
-
- if fails:
- raise Exception("failed to install/configure snaps")
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index f8f7cb35..163cce99 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -9,43 +9,23 @@
"""
SSH
---
-**Summary:** configure ssh and ssh keys
+**Summary:** configure SSH and SSH keys (host and authorized)
-This module handles most configuration for ssh and ssh keys. Many images have
-default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing
-default keys is usually the desired behavior this option is enabled by default.
+This module handles most configuration for SSH and both host and authorized SSH
+keys.
-Keys can be added using the ``ssh_keys`` configuration key. The argument to
-this config key should be a dictionary entries for the public and private keys
-of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g.
-``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key
-types. Not all key types have to be specified, ones left unspecified will not
-be used. If this config option is used, then no keys will be generated.
+Authorized Keys
+^^^^^^^^^^^^^^^
-.. note::
- when specifying private keys in cloud-config, care should be taken to
- ensure that the communication between the data source and the instance is
- secure
+Authorized keys are a list of public SSH keys that are allowed to connect to a
+a user account on a system. They are stored in `.ssh/authorized_keys` in that
+account's home directory. Authorized keys for the default user defined in
+``users`` can be specified using ``ssh_authorized_keys``. Keys
+should be specified as a list of public keys.
.. note::
- to specify multiline private keys, use yaml multiline syntax
-
-If no keys are specified using ``ssh_keys``, then keys will be generated using
-``ssh-keygen``. By default one public/private pair of each supported key type
-will be generated. The key types to generate can be specified using the
-``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For
-each key type for which this module has been instructed to create a keypair, if
-a key of the same type is already present on the system (i.e. if
-``ssh_deletekeys`` was false), no key will be generated.
-
-Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config
-flags are:
-
- - rsa
- - dsa
- - ecdsa
- - ed25519
+ see the ``cc_set_passwords`` module documentation to enable/disable SSH
+ password authentication
Root login can be enabled/disabled using the ``disable_root`` config key. Root
login options can be manually specified with ``disable_root_opts``. If
@@ -55,13 +35,46 @@ root login is disabled, and root login opts are set to::
no-port-forwarding,no-agent-forwarding,no-X11-forwarding
-Authorized keys for the default user/first user defined in ``users`` can be
-specified using `ssh_authorized_keys``. Keys should be specified as a list of
-public keys.
+Host Keys
+^^^^^^^^^
+
+Host keys are for authenticating a specific instance. Many images have default
+host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents
+re-use of a private host key from an image on multiple machines. Since
+removing default host keys is usually the desired behavior this option is
+enabled by default.
+
+Host keys can be added using the ``ssh_keys`` configuration key. The argument
+to this config key should be a dictionary entries for the public and private
+keys of each desired key type. Entries in the ``ssh_keys`` config dict should
+have keys in the format ``<key type>_private`` and ``<key type>_public``,
+e.g. ``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported
+key types. Not all key types have to be specified, ones left unspecified will
+not be used. If this config option is used, then no keys will be generated.
.. note::
- see the ``cc_set_passwords`` module documentation to enable/disable ssh
- password authentication
+ when specifying private host keys in cloud-config, care should be taken to
+ ensure that the communication between the data source and the instance is
+ secure
+
+.. note::
+ to specify multiline private host keys, use yaml multiline syntax
+
+If no host keys are specified using ``ssh_keys``, then keys will be generated
+using ``ssh-keygen``. By default one public/private pair of each supported
+host key type will be generated. The key types to generate can be specified
+using the ``ssh_genkeytypes`` config flag, which accepts a list of host key
+types to use. For each host key type for which this module has been instructed
+to create a keypair, if a key of the same type is already present on the
+system (i.e. if ``ssh_deletekeys`` was false), no key will be generated.
+
+Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
+config flags are:
+
+ - rsa
+ - dsa
+ - ecdsa
+ - ed25519
**Internal name:** ``cc_ssh``
@@ -91,6 +104,10 @@ public keys.
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
+ allow_public_ssh_keys: <true/false>
+ ssh_publish_hostkeys:
+ enabled: <true/false> (Defaults to true)
+ blacklist: <list of key types> (Defaults to [dsa])
"""
import glob
@@ -104,6 +121,10 @@ from cloudinit import util
GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+PUBLISH_HOST_KEYS = True
+# Don't publish the dsa hostkey by default since OpenSSH recommends not using
+# it.
+HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
@@ -176,6 +197,23 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Failed generating key type %s to "
"file %s", keytype, keyfile)
+ if "ssh_publish_hostkeys" in cfg:
+ host_key_blacklist = util.get_cfg_option_list(
+ cfg["ssh_publish_hostkeys"], "blacklist",
+ HOST_KEY_PUBLISH_BLACKLIST)
+ publish_hostkeys = util.get_cfg_option_bool(
+ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
+ else:
+ host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
+ publish_hostkeys = PUBLISH_HOST_KEYS
+
+ if publish_hostkeys:
+ hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
+ try:
+ cloud.datasource.publish_host_keys(hostkeys)
+ except Exception:
+ util.logexc(log, "Publishing host keys failed!")
+
try:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
@@ -183,14 +221,20 @@ def handle(_name, cfg, cloud, log, _args):
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
ssh_util.DISABLE_USER_OPTS)
- keys = cloud.get_public_ssh_keys() or []
+ keys = []
+ if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
+ keys = cloud.get_public_ssh_keys() or []
+ else:
+ log.debug('Skipping import of publish SSH keys per '
+ 'config setting: allow_public_ssh_keys=False')
+
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
apply_credentials(keys, user, disable_root, disable_root_opts)
except Exception:
- util.logexc(log, "Applying ssh credentials failed!")
+ util.logexc(log, "Applying SSH credentials failed!")
def apply_credentials(keys, user, disable_root, disable_root_opts):
@@ -209,4 +253,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
+
+def get_public_host_keys(blacklist=None):
+ """Read host keys from /etc/ssh/*.pub files and return them as a list.
+
+ @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa']
+ @returns: List of keys, each formatted as a two-element tuple.
+ e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
+ """
+ public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+ key_list = []
+ blacklist_files = []
+ if blacklist:
+ # Convert blacklist to filenames:
+ # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'
+ blacklist_files = [public_key_file_tmpl % (key_type,)
+ for key_type in blacklist]
+ # Get list of public key files and filter out blacklisted files.
+ file_list = [hostfile for hostfile
+ in glob.glob(public_key_file_tmpl % ('*',))
+ if hostfile not in blacklist_files]
+
+ # Read host key files, retrieve first two fields as a tuple and
+ # append that tuple to key_list.
+ for file_name in file_list:
+ file_contents = util.load_file(file_name)
+ key_data = file_contents.split()
+ if key_data and len(key_data) > 1:
+ key_list.append(tuple(key_data[:2]))
+ return key_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 98b0e665..7ac1c8cf 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -7,11 +7,11 @@
"""
SSH Authkey Fingerprints
------------------------
-**Summary:** log fingerprints of user ssh keys
+**Summary:** log fingerprints of user SSH keys
Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
-the keys can be specified, but defaults to ``md5``.
+the keys can be specified, but defaults to ``sha256``.
**Internal name:** `` cc_ssh_authkey_fingerprints``
@@ -42,7 +42,7 @@ def _split_hash(bin_hash):
return split_up
-def _gen_fingerprint(b64_text, hash_meth='md5'):
+def _gen_fingerprint(b64_text, hash_meth='sha256'):
if not b64_text:
return ''
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
@@ -65,10 +65,10 @@ def _is_printable_key(entry):
return False
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
+def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
prefix='ci-info: '):
if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
+ message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message)
return
@@ -98,10 +98,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
def handle(name, cfg, cloud, log, _args):
if util.is_true(cfg.get('no_ssh_fingerprints', False)):
log.debug(("Skipping module named %s, "
- "logging of ssh fingerprints disabled"), name)
+ "logging of SSH fingerprints disabled"), name)
return
- hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
+ hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 6b46dafe..63f87298 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -9,9 +9,9 @@
"""
SSH Import Id
-------------
-**Summary:** import ssh id
+**Summary:** import SSH id
-This module imports ssh keys from either a public keyserver, usually launchpad
+This module imports SSH keys from either a public keyserver, usually launchpad
or github using ``ssh-import-id``. Keys are referenced by the username they are
associated with on the keyserver. The keyserver can be specified by prepending
either ``lp:`` for launchpad or ``gh:`` for github to the username.
@@ -98,12 +98,12 @@ def import_ssh_ids(ids, user, log):
raise exc
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
- log.debug("Importing ssh ids for user %s.", user)
+ log.debug("Importing SSH ids for user %s.", user)
try:
util.subp(cmd, capture=False)
except util.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s ssh ids", user)
+ util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 5e082bd6..8b6d2a1a 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -1,150 +1,141 @@
-# Copyright (C) 2018 Canonical Ltd.
-#
# This file is part of cloud-init. See LICENSE file for license information.
-"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical."""
+"""ubuntu_advantage: Configure Ubuntu Advantage support services"""
-import sys
from textwrap import dedent
-from cloudinit import log as logging
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
-from cloudinit.subp import prepend_base_command
from cloudinit import util
-distros = ['ubuntu']
-frequency = PER_INSTANCE
+UA_URL = 'https://ubuntu.com/advantage'
-LOG = logging.getLogger(__name__)
+distros = ['ubuntu']
schema = {
'id': 'cc_ubuntu_advantage',
'name': 'Ubuntu Advantage',
- 'title': 'Install, configure and manage ubuntu-advantage offerings',
+ 'title': 'Configure Ubuntu Advantage support services',
'description': dedent("""\
- This module provides configuration options to setup ubuntu-advantage
- subscriptions.
-
- .. note::
- Both ``commands`` value can be either a dictionary or a list. If
- the configuration provided is a dictionary, the keys are only used
- to order the execution of the commands and the dictionary is
- merged with any vendor-data ubuntu-advantage configuration
- provided. If a ``commands`` is provided as a list, any vendor-data
- ubuntu-advantage ``commands`` are ignored.
-
- Ubuntu-advantage ``commands`` is a dictionary or list of
- ubuntu-advantage commands to run on the deployed machine.
- These commands can be used to enable or disable subscriptions to
- various ubuntu-advantage products. See 'man ubuntu-advantage' for more
- information on supported subcommands.
-
- .. note::
- Each command item can be a string or list. If the item is a list,
- 'ubuntu-advantage' can be omitted and it will automatically be
- inserted as part of the command.
+ Attach machine to an existing Ubuntu Advantage support contract and
+ enable or disable support services such as Livepatch, ESM,
+ FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
+ one can also specify services to enable. When the 'enable'
+ list is present, any named service will be enabled and all absent
+ services will remain disabled.
+
+ Note that when enabling FIPS or FIPS updates you will need to schedule
+ a reboot to ensure the machine is running the FIPS-compliant kernel.
+ See :ref:`Power State Change` for information on how to configure
+ cloud-init to perform this reboot.
"""),
'distros': distros,
'examples': [dedent("""\
- # Enable Extended Security Maintenance using your service auth token
- ubuntu-advantage:
- commands:
- 00: ubuntu-advantage enable-esm <token>
- """), dedent("""\
- # Enable livepatch by providing your livepatch token
+ # Attach the machine to an Ubuntu Advantage support contract with a
+ # UA contract token obtained from %s.
+ ubuntu_advantage:
+ token: <ua_contract_token>
+ """ % UA_URL), dedent("""\
+ # Attach the machine to an Ubuntu Advantage support contract enabling
+ # only fips and esm services. Services will only be enabled if
+ # the environment supports said service. Otherwise warnings will
+ # be logged for incompatible services specified.
ubuntu-advantage:
- commands:
- 00: ubuntu-advantage enable-livepatch <livepatch-token>
-
+ token: <ua_contract_token>
+ enable:
+ - fips
+ - esm
"""), dedent("""\
- # Convenience: the ubuntu-advantage command can be omitted when
- # specifying commands as a list and 'ubuntu-advantage' will
- # automatically be prepended.
- # The following commands are equivalent
+ # Attach the machine to an Ubuntu Advantage support contract and enable
+ # the FIPS service. Perform a reboot once cloud-init has
+ # completed.
+ power_state:
+ mode: reboot
ubuntu-advantage:
- commands:
- 00: ['enable-livepatch', 'my-token']
- 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token']
- 02: ubuntu-advantage enable-livepatch my-token
- 03: 'ubuntu-advantage enable-livepatch my-token'
- """)],
+ token: <ua_contract_token>
+ enable:
+ - fips
+ """)],
'frequency': PER_INSTANCE,
'type': 'object',
'properties': {
- 'ubuntu-advantage': {
+ 'ubuntu_advantage': {
'type': 'object',
'properties': {
- 'commands': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
- },
- 'additionalItems': False, # Reject non-string & non-list
- 'minItems': 1,
- 'minProperties': 1,
+ 'enable': {
+ 'type': 'array',
+ 'items': {'type': 'string'},
+ },
+ 'token': {
+ 'type': 'string',
+ 'description': (
+ 'A contract token obtained from %s.' % UA_URL)
}
},
- 'additionalProperties': False, # Reject keys not in schema
- 'required': ['commands']
+ 'required': ['token'],
+ 'additionalProperties': False
}
}
}
-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
-# Once python-jsonschema supports schema draft 6 add support for arbitrary
-# object keys with 'patternProperties' constraint to validate string values.
-
__doc__ = get_schema_doc(schema) # Supplement python help()
-UA_CMD = "ubuntu-advantage"
-
-
-def run_commands(commands):
- """Run the commands provided in ubuntu-advantage:commands config.
+LOG = logging.getLogger(__name__)
- Commands are run individually. Any errors are collected and reported
- after attempting all commands.
- @param commands: A list or dict containing commands to run. Keys of a
- dict will be used to order the commands provided as dict values.
- """
- if not commands:
- return
- LOG.debug('Running user-provided ubuntu-advantage commands')
- if isinstance(commands, dict):
- # Sort commands based on dictionary key
- commands = [v for _, v in sorted(commands.items())]
- elif not isinstance(commands, list):
- raise TypeError(
- 'commands parameter was not a list or dict: {commands}'.format(
- commands=commands))
-
- fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands)
-
- cmd_failures = []
- for command in fixed_ua_commands:
- shell = isinstance(command, str)
- try:
- util.subp(command, shell=shell, status_cb=sys.stderr.write)
- except util.ProcessExecutionError as e:
- cmd_failures.append(str(e))
- if cmd_failures:
- msg = (
- 'Failures running ubuntu-advantage commands:\n'
- '{cmd_failures}'.format(
- cmd_failures=cmd_failures))
+def configure_ua(token=None, enable=None):
+ """Call ua commandline client to attach or enable services."""
+ error = None
+ if not token:
+ error = ('ubuntu_advantage: token must be provided')
+ LOG.error(error)
+ raise RuntimeError(error)
+
+ if enable is None:
+ enable = []
+ elif isinstance(enable, str):
+ LOG.warning('ubuntu_advantage: enable should be a list, not'
+ ' a string; treating as a single enable')
+ enable = [enable]
+ elif not isinstance(enable, list):
+ LOG.warning('ubuntu_advantage: enable should be a list, not'
+ ' a %s; skipping enabling services',
+ type(enable).__name__)
+ enable = []
+
+ attach_cmd = ['ua', 'attach', token]
+ LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
+ try:
+ util.subp(attach_cmd)
+ except util.ProcessExecutionError as e:
+ msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
+ error=str(e))
util.logexc(LOG, msg)
raise RuntimeError(msg)
+ enable_errors = []
+ for service in enable:
+ try:
+ cmd = ['ua', 'enable', service]
+ util.subp(cmd, capture=True)
+ except util.ProcessExecutionError as e:
+ enable_errors.append((service, e))
+ if enable_errors:
+ for service, error in enable_errors:
+ msg = 'Failure enabling "{service}":\n{error}'.format(
+ service=service, error=str(error))
+ util.logexc(LOG, msg)
+ raise RuntimeError(
+ 'Failure enabling Ubuntu Advantage service(s): {}'.format(
+ ', '.join('"{}"'.format(service)
+ for service, _ in enable_errors)))
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if util.which('ubuntu-advantage'):
+ if util.which('ua'):
return
try:
cloud.distro.update_package_sources()
@@ -159,14 +150,28 @@ def maybe_install_ua_tools(cloud):
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('ubuntu-advantage')
- if cfgin is None:
- LOG.debug(("Skipping module named %s,"
- " no 'ubuntu-advantage' key in configuration"), name)
+ ua_section = None
+ if 'ubuntu-advantage' in cfg:
+ LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
+ ' Expected underscore delimited "ubuntu_advantage"; will'
+ ' attempt to continue.')
+ ua_section = cfg['ubuntu-advantage']
+ if 'ubuntu_advantage' in cfg:
+ ua_section = cfg['ubuntu_advantage']
+ if ua_section is None:
+ LOG.debug("Skipping module named %s,"
+ " no 'ubuntu_advantage' configuration found", name)
return
-
validate_cloudconfig_schema(cfg, schema)
+ if 'commands' in ua_section:
+ msg = (
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"')
+ LOG.error(msg)
+ raise RuntimeError(msg)
+
maybe_install_ua_tools(cloud)
- run_commands(cfgin.get('commands', []))
+ configure_ua(token=ua_section.get('token'),
+ enable=ua_section.get('enable'))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
new file mode 100644
index 00000000..297451d6
--- /dev/null
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -0,0 +1,160 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Ubuntu Drivers: Interact with third party drivers in Ubuntu."""
+
+import os
+from textwrap import dedent
+
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import temp_utils
+from cloudinit import type_utils
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+distros = ['ubuntu']
+schema = {
+ 'id': 'cc_ubuntu_drivers',
+ 'name': 'Ubuntu Drivers',
+ 'title': 'Interact with third party drivers in Ubuntu.',
+ 'description': dedent("""\
+ This module interacts with the 'ubuntu-drivers' command to install
+ third party driver packages."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ drivers:
+ nvidia:
+ license-accepted: true
+ """)],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'drivers': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'nvidia': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['license-accepted'],
+ 'properties': {
+ 'license-accepted': {
+ 'type': 'boolean',
+ 'description': ("Do you accept the NVIDIA driver"
+ " license?"),
+ },
+ 'version': {
+ 'type': 'string',
+ 'description': (
+ 'The version of the driver to install (e.g.'
+ ' "390", "410"). Defaults to the latest'
+ ' version.'),
+ },
+ },
+ },
+ },
+ },
+ },
+}
+OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
+
+# Use a debconf template to configure a global debconf variable
+# (linux/nvidia/latelink) setting this to "true" allows the
+# 'linux-restricted-modules' deb to accept the NVIDIA EULA and the package
+# will automatically link the drivers to the running kernel.
+
+# EOL_XENIAL: can then drop this script and use python3-debconf which is only
+# available in Bionic and later. Can't use python3-debconf currently as it
+# isn't in Xenial and doesn't yet support X_LOADTEMPLATEFILE debconf command.
+
+NVIDIA_DEBCONF_CONTENT = """\
+Template: linux/nvidia/latelink
+Type: boolean
+Default: true
+Description: Late-link NVIDIA kernel modules?
+ Enable this to link the NVIDIA kernel modules in cloud-init and
+ make them available for use.
+"""
+
+NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT = """\
+#!/bin/sh
+# Allow cloud-init to trigger EULA acceptance via registering a debconf
+# template to set linux/nvidia/latelink true
+. /usr/share/debconf/confmodule
+db_x_loadtemplatefile "$1" cloud-init
+"""
+
+
+def install_drivers(cfg, pkg_install_func):
+ if not isinstance(cfg, dict):
+ raise TypeError(
+ "'drivers' config expected dict, found '%s': %s" %
+ (type_utils.obj_name(cfg), cfg))
+
+ cfgpath = 'nvidia/license-accepted'
+ # Call translate_bool to ensure that we treat string values like "yes" as
+ # acceptance and _don't_ treat string values like "nah" as acceptance
+ # because they're True-ish
+ nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath))
+ if not nv_acc:
+ LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
+ return
+
+ if not util.which('ubuntu-drivers'):
+ LOG.debug("'ubuntu-drivers' command not available. "
+ "Installing ubuntu-drivers-common")
+ pkg_install_func(['ubuntu-drivers-common'])
+
+ driver_arg = 'nvidia'
+ version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
+ if version_cfg:
+ driver_arg += ':{}'.format(version_cfg)
+
+ LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)",
+ cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
+
+ # Register and set debconf selection linux/nvidia/latelink = true
+ tdir = temp_utils.mkdtemp(needs_exe=True)
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_script = os.path.join(tdir, 'nvidia-debconf.sh')
+ try:
+ util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
+ util.write_file(
+ debconf_script,
+ util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
+ mode=0o755)
+ util.subp([debconf_script, debconf_file])
+ except Exception as e:
+ util.logexc(
+ LOG, "Failed to register NVIDIA debconf template: %s", str(e))
+ raise
+ finally:
+ if os.path.isdir(tdir):
+ util.del_dir(tdir)
+
+ try:
+ util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ except util.ProcessExecutionError as exc:
+ if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
+ LOG.warning('the available version of ubuntu-drivers is'
+ ' too old to perform requested driver installation')
+ elif 'No drivers found for installation.' in exc.stdout:
+ LOG.warning('ubuntu-drivers found no drivers for installation')
+ raise
+
+
+def handle(name, cfg, cloud, log, _args):
+ if "drivers" not in cfg:
+ log.debug("Skipping module named %s, no 'drivers' key in config", name)
+ return
+
+ validate_cloudconfig_schema(cfg, schema)
+ install_drivers(cfg['drivers'], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index c96eede1..03fffb96 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -62,8 +62,8 @@ def handle(name, cfg, cloud, log, _args):
if util.translate_bool(manage_hosts, addons=['template']):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(("Option 'manage_etc_hosts' was set,"
+ " but no hostname was found"))
return
# Render from a template file
@@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, _args):
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(("Option 'manage_etc_hosts' was set,"
+ " but no hostname was found"))
return
log.debug("Managing localhost in /etc/hosts")
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index c32a743a..13764e60 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows:
a Snappy user through ``snap create-user``. If an Ubuntu SSO account is
associated with the address, username and SSH keys will be requested from
there. Default: none
- - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
+ - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's
authkeys file. Default: none. This key can not be combined with
``ssh_redirect_user``.
- ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
This key can not be combined with ``ssh_redirect_user``.
- ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
- logins for this user. When specified, all cloud meta-data public ssh
- keys will be set up in a disabled state for this username. Any ssh login
+ logins for this user. When specified, all cloud meta-data public SSH
+ keys will be set up in a disabled state for this username. Any SSH login
as this username will timeout and prompt with a message to login instead
as the configured <default_username> for this instance. Default: false.
This key can not be combined with ``ssh_import_id`` or
diff --git a/cloudinit/config/cc_vyos.py b/cloudinit/config/cc_vyos.py
index bd595397..e51ed7f2 100644
--- a/cloudinit/config/cc_vyos.py
+++ b/cloudinit/config/cc_vyos.py
@@ -24,17 +24,22 @@ import os
import re
import sys
import ast
-import subprocess
-from ipaddress import IPv4Network
+import ipaddress
from cloudinit import stages
from cloudinit import util
from cloudinit.distros import ug_util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import handlers
+from cloudinit import log as logging
from vyos.configtree import ConfigTree
+# configure logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
frequency = PER_INSTANCE
class VyosError(Exception):
@@ -43,6 +48,7 @@ class VyosError(Exception):
"""
pass
+# configure user account with password
def set_pass_login(config, user, password, encrypted_pass):
if encrypted_pass:
config.set(['system', 'login', 'user', user, 'authentication', 'encrypted-password'], value=password, replace=True)
@@ -50,16 +56,15 @@ def set_pass_login(config, user, password, encrypted_pass):
config.set(['system', 'login', 'user', user, 'authentication', 'plaintext-password'], value=password, replace=True)
config.set_tag(['system', 'login', 'user'])
- config.set(['system', 'login', 'user', user, 'level'], value='admin', replace=True)
-
-def set_ssh_login(config, log, user, key_string, key_x):
+# configure user account with ssh key
+def set_ssh_login(config, user, key_string, key_x):
key_type = None
key_data = None
key_name = None
if key_string == '':
- log.debug("No keys found.")
+ logger.error("No keys found.")
return
key_parts = key_string.split(None)
@@ -72,11 +77,11 @@ def set_ssh_login(config, log, user, key_string, key_x):
key_data = key
if not key_type:
- util.logexc(log, 'Key type not defined, wrong ssh key format.')
+ logger.error("Key type not defined, wrong ssh key format.")
return
if not key_data:
- util.logexc(log, 'Key base64 not defined, wrong ssh key format.')
+ logger.error("Key base64 not defined, wrong ssh key format.")
return
if len(key_parts) > 2:
@@ -91,9 +96,9 @@ def set_ssh_login(config, log, user, key_string, key_x):
config.set(['system', 'login', 'user', user, 'authentication', 'public-keys', key_name , 'type'], value=key_type, replace=True)
config.set_tag(['system', 'login', 'user'])
config.set_tag(['system', 'login', 'user', user, 'authentication', 'public-keys'])
- config.set(['system', 'login', 'user', user, 'level'], value='admin', replace=True)
+# configure system parameters from OVF template
def set_config_ovf(config, hostname, metadata):
ip_0 = metadata['ip0']
mask_0 = metadata['netmask0']
@@ -105,7 +110,7 @@ def set_config_ovf(config, hostname, metadata):
APIDEBUG = metadata['APIDEBUG']
if ip_0 and ip_0 != 'null' and mask_0 and mask_0 != 'null' and gateway and gateway != 'null':
- cidr = str(IPv4Network('0.0.0.0/' + mask_0).prefixlen)
+ cidr = str(ipaddress.IPv4Network('0.0.0.0/' + mask_0).prefixlen)
ipcidr = ip_0 + '/' + cidr
config.set(['interfaces', 'ethernet', 'eth0', 'address'], value=ipcidr, replace=True)
@@ -148,59 +153,83 @@ def set_config_ovf(config, hostname, metadata):
config.set(['system', 'host-name'], value='vyos', replace=True)
-def set_config_interfaces(config, interface):
- for item in interface['subnets']:
- if item['type'] == 'static':
- if 'address' in item and runcommand("/usr/bin/ipaddrcheck --is-ipv4 " + item['address']) == 0:
- cidr = str(IPv4Network('0.0.0.0/' + item['netmask']).prefixlen)
- ipcidr = item['address'] + '/' + cidr
- config.set(['interfaces', 'ethernet', interface['name'], 'address'], value=ipcidr, replace=True)
- config.set_tag(['interfaces', 'ethernet'])
- if item['gateway']:
- config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=item['gateway'], replace=True)
- config.set_tag(['protocols', 'static', 'route'])
- config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+# configure interface
+def set_config_interfaces(config, iface_name, iface_config):
+ # configure DHCP client
+ if 'dhcp4' in iface_config:
+ if iface_config['dhcp4'] == True:
+ config.set(['interfaces', 'ethernet', iface_name, 'address'], value='dhcp', replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+ if 'dhcp6' in iface_config:
+ if iface_config['dhcp6'] == True:
+ config.set(['interfaces', 'ethernet', iface_name, 'address'], value='dhcp6', replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
- if 'address' in item and runcommand("/usr/bin/ipaddrcheck --is-ipv6 " + item['address']) == 0:
- config.set(['interfaces', 'ethernet', interface['name'], 'address'], value=item['address'], replace=False)
- config.set_tag(['interfaces', 'ethernet'])
- if item['gateway']:
- config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=item['gateway'], replace=True)
- config.set_tag(['protocols', 'static', 'route6'])
- config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop'])
- else:
- config.set(['interfaces', 'ethernet', interface['name'], 'address'], value='dhcp', replace=True)
+ # configure static addresses
+ if 'addresses' in iface_config:
+ for item in iface_config['addresses']:
+ config.set(['interfaces', 'ethernet', iface_name, 'address'], value=item, replace=True)
config.set_tag(['interfaces', 'ethernet'])
+ # configure gateways
+ if 'gateway4' in iface_config:
+ config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=item, replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+ if 'gateway6' in iface_config:
+ config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=item, replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop'])
+
+ # configre MTU
+ if 'mtu' in iface_config:
+ config.set(['interfaces', 'ethernet', iface_name, 'mtu'], value=iface_config['mtu'], replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+
+ # configure routes
+ if 'routes' in iface_config:
+ for item in iface_config['routes']:
+ try:
+ if ipaddress.ip_network(item['to']).version == 4:
+ config.set(['protocols', 'static', 'route', item['to'], 'next-hop'], value=item['via'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', item['to'], 'next-hop'])
+ if ipaddress.ip_network(item['to']).version == 6:
+ config.set(['protocols', 'static', 'route6', item['to'], 'next-hop'], value=item['via'], replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', item['to'], 'next-hop'])
+ except Exception as err:
+ logger.error("Impossible to detect IP protocol version: {}".format(err))
-def set_config_nameserver(config, log, interface):
- if 'address' in interface:
- for server in interface['address']:
- config.set(['system', 'name-server'], value=server, replace=False)
- else:
- log.debug("No name-servers found.")
- if 'search' in interface:
- for server in interface['search']:
- config.set(['system', 'domain-search'], value=server, replace=False)
- else:
- log.debug("No search-domains found.")
+ # configure nameservers
+ if 'nameservers' in iface_config:
+ if 'search' in iface_config['nameservers']:
+ for item in iface_config['nameservers']['search']:
+ config.set(['system', 'domain-search'], value=item, replace=False)
+ if 'addresses' in iface_config['nameservers']:
+ for item in iface_config['nameservers']['addresses']:
+ config.set(['system', 'name-server'], value=item, replace=False)
+# configure DHCP client for interface
def set_config_dhcp(config):
config.set(['interfaces', 'ethernet', 'eth0', 'address'], value='dhcp', replace=True)
config.set_tag(['interfaces', 'ethernet'])
+# configure SSH server service
def set_config_ssh(config):
config.set(['service', 'ssh'], replace=True)
config.set(['service', 'ssh', 'port'], value='22', replace=True)
config.set(['service', 'ssh', 'client-keepalive-interval'], value='180', replace=True)
+# configure hostname
def set_config_hostname(config, hostname):
config.set(['system', 'host-name'], value=hostname, replace=True)
+# configure SSH, eth0 interface and hostname
def set_config_cloud(config, hostname):
config.set(['service', 'ssh'], replace=True)
config.set(['service', 'ssh', 'port'], value='22', replace=True)
@@ -210,16 +239,7 @@ def set_config_cloud(config, hostname):
config.set(['system', 'host-name'], value=hostname, replace=True)
-def runcommand(cmd):
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- universal_newlines=True)
- std_out, std_err = proc.communicate()
- return proc.returncode
-
-
+# main config handler
def handle(name, cfg, cloud, log, _args):
init = stages.Init()
dc = init.fetch()
@@ -256,7 +276,7 @@ def handle(name, cfg, cloud, log, _args):
vyos_keys = metadata['public-keys']
for ssh_key in vyos_keys:
- set_ssh_login(config, log, user, ssh_key, key_x)
+ set_ssh_login(config, user, ssh_key, key_x)
key_x = key_x + 1
else:
encrypted_pass = False
@@ -284,20 +304,17 @@ def handle(name, cfg, cloud, log, _args):
vyos_keys.extend(cfgkeys)
for ssh_key in vyos_keys:
- set_ssh_login(config, log, user, ssh_key, key_x)
+ set_ssh_login(config, user, ssh_key, key_x)
key_x = key_x + 1
if 'OVF' in dc.dsname:
set_config_ovf(config, hostname, metadata)
key_y = 1
elif netcfg:
- for interface in netcfg['config']:
- if interface['type'] == 'physical':
- key_y = 1
- set_config_interfaces(config, interface)
-
- if interface['type'] == 'nameserver':
- set_config_nameserver(config, log, interface)
+ if 'ethernets' in netcfg:
+ key_y = 1
+ for interface_name, interface_config in netcfg['ethernets'].items():
+ set_config_interfaces(config, interface_name, interface_config)
set_config_ssh(config)
set_config_hostname(config, hostname)
@@ -313,4 +330,4 @@ def handle(name, cfg, cloud, log, _args):
with open(cfg_file_name, 'w') as f:
f.write(config.to_string())
except Exception as e:
- util.logexc(log, "Failed to write configs into file %s error %s", file_name, e)
+ logger.error("Failed to write configs into file %s error %s", file_name, e)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 0b6546e2..bd87e9e5 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -57,7 +57,6 @@ binary gzip data can be specified and will be decoded before being written.
import base64
import os
-import six
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
@@ -126,7 +125,7 @@ def decode_perms(perm, default):
if perm is None:
return default
try:
- if isinstance(perm, six.integer_types + (float,)):
+ if isinstance(perm, (int, float)):
# Just 'downcast' it (if a float)
return int(perm)
else:
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 6a42f499..3673166a 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -30,13 +30,9 @@ entry, the config entry will be skipped.
# any repository configuration options (see man yum.conf)
"""
+import io
import os
-
-try:
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
-import six
+from configparser import ConfigParser
from cloudinit import util
@@ -57,7 +53,7 @@ def _format_repo_value(val):
# Can handle 'lists' in certain cases
# See: https://linux.die.net/man/5/yum.conf
return "\n".join([_format_repo_value(v) for v in val])
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
return str(val)
return val
@@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config):
# For now assume that people using this know
# the format of yum and don't verify keys/values further
to_be.set(repo_id, k, _format_repo_value(v))
- to_be_stream = six.StringIO()
+ to_be_stream = io.StringIO()
to_be.write(to_be_stream)
to_be_stream.seek(0)
lines = to_be_stream.readlines()
@@ -113,16 +109,16 @@ def handle(name, cfg, _cloud, log, _args):
missing_required = 0
for req_field in ['baseurl']:
if req_field not in repo_config:
- log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
+ log.warning(("Repository %s does not contain a %s"
+ " configuration 'required' entry"),
+ repo_id, req_field)
missing_required += 1
if not missing_required:
repo_configs[canon_repo_id] = repo_config
repo_locations[canon_repo_id] = repo_fn_pth
else:
- log.warn("Repository %s is missing %s required fields, skipping!",
- repo_id, missing_required)
+ log.warning("Repository %s is missing %s required fields, "
+ "skipping!", repo_id, missing_required)
for (c_repo_id, path) in repo_locations.items():
repo_blob = _format_repository_config(c_repo_id,
repo_configs.get(c_repo_id))
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index aba26952..05855b0c 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -7,7 +7,6 @@
import configobj
import os
-from six import string_types
from textwrap import dedent
from cloudinit.config.schema import get_schema_doc
@@ -110,7 +109,7 @@ def _format_repo_value(val):
return 1 if val else 0
if isinstance(val, (list, tuple)):
return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, string_types):
+ if not isinstance(val, str):
return str(val)
return val
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 080a6d06..807c3eee 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -367,7 +367,7 @@ def handle_schema_args(name, args):
if not args.annotate:
error(str(e))
except RuntimeError as e:
- error(str(e))
+ error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
if args.doc:
diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py
new file mode 100644
index 00000000..2a6bb10b
--- /dev/null
+++ b/cloudinit/config/tests/test_apt_pipelining.py
@@ -0,0 +1,28 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_apt_pipelining handler"""
+
+import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+
+class TestAptPipelining(CiTestCase):
+
+ @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
+ def test_not_disabled_by_default(self, m_write_file):
+ """ensure that default behaviour is to not disable pipelining"""
+ cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None)
+ self.assertEqual(0, m_write_file.call_count)
+
+ @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
+ def test_false_disables_pipelining(self, m_write_file):
+ """ensure that pipelining can be disabled with correct config"""
+ cc_apt_pipelining.handle(
+ 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None)
+ self.assertEqual(1, m_write_file.call_count)
+ args, _ = m_write_file.call_args
+ self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0])
+ self.assertIn('Pipeline-Depth "0"', args[1])
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index b051ec82..8247c388 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import mock
+from unittest import mock
from cloudinit.config import cc_set_passwords as setpass
from cloudinit.tests.helpers import CiTestCase
@@ -45,7 +45,7 @@ class TestHandleSshPwauth(CiTestCase):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
m_subp.assert_not_called()
- self.assertIn("No need to restart ssh", self.logs.getvalue())
+ self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
@mock.patch(MODPATH + "util.subp")
@@ -68,4 +68,88 @@ class TestHandleSshPwauth(CiTestCase):
m_update.assert_called_with({optname: optval})
m_subp.assert_not_called()
+
+class TestSetPasswordsHandle(CiTestCase):
+ """Test cc_set_passwords.handle"""
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestSetPasswordsHandle, self).setUp()
+ self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err')
+
+ def test_handle_on_empty_config(self, *args):
+ """handle logs that no password has changed when config is empty."""
+ cloud = self.tmp_cloud(distro='ubuntu')
+ setpass.handle(
+ 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[])
+ self.assertEqual(
+ "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
+ 'ssh_pwauth=None\n',
+ self.logs.getvalue())
+
+ @mock.patch(MODPATH + "util.subp")
+ def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
+ """handle parses command password hashes."""
+ cloud = self.tmp_cloud(distro='ubuntu')
+ valid_hashed_pwds = [
+ 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/'
+ 'Dlew1Va',
+ 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
+ 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
+ cfg = {'chpasswd': {'list': valid_hashed_pwds}}
+ with mock.patch(MODPATH + 'util.subp') as m_subp:
+ setpass.handle(
+ 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
+ self.assertIn(
+ 'DEBUG: Handling input for chpasswd as list.',
+ self.logs.getvalue())
+ self.assertIn(
+ "DEBUG: Setting hashed password for ['root', 'ubuntu']",
+ self.logs.getvalue())
+ self.assertEqual(
+ [mock.call(['chpasswd', '-e'],
+ '\n'.join(valid_hashed_pwds) + '\n')],
+ m_subp.call_args_list)
+
+ @mock.patch(MODPATH + "util.is_FreeBSD")
+ @mock.patch(MODPATH + "util.subp")
+ def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_freebsd):
+ """FreeBSD calls custom pw commands instead of chpasswd and passwd"""
+ m_is_freebsd.return_value = True
+ cloud = self.tmp_cloud(distro='freebsd')
+ valid_pwds = ['ubuntu:passw0rd']
+ cfg = {'chpasswd': {'list': valid_pwds}}
+ setpass.handle(
+ 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
+ self.assertEqual([
+ mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd',
+ logstring="chpasswd for ubuntu"),
+ mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
+ m_subp.call_args_list)
+
+ @mock.patch(MODPATH + "util.is_FreeBSD")
+ @mock.patch(MODPATH + "util.subp")
+ def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
+ m_is_freebsd):
+ """handle parses command set random passwords."""
+ m_is_freebsd.return_value = False
+ cloud = self.tmp_cloud(distro='ubuntu')
+ valid_random_pwds = [
+ 'root:R',
+ 'ubuntu:RANDOM']
+ cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
+ with mock.patch(MODPATH + 'util.subp') as m_subp:
+ setpass.handle(
+ 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
+ self.assertIn(
+ 'DEBUG: Handling input for chpasswd as list.',
+ self.logs.getvalue())
+ self.assertNotEqual(
+ [mock.call(['chpasswd'],
+ '\n'.join(valid_random_pwds) + '\n')],
+ m_subp.call_args_list)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index 3c472891..cbbb173d 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import re
-from six import StringIO
+from io import StringIO
from cloudinit.config.cc_snap import (
ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse,
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
index c8a4271f..0c554414 100644
--- a/cloudinit/config/tests/test_ssh.py
+++ b/cloudinit/config/tests/test_ssh.py
@@ -1,9 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os.path
from cloudinit.config import cc_ssh
from cloudinit import ssh_util
from cloudinit.tests.helpers import CiTestCase, mock
+import logging
+
+LOG = logging.getLogger(__name__)
MODPATH = "cloudinit.config.cc_ssh."
@@ -12,6 +16,25 @@ MODPATH = "cloudinit.config.cc_ssh."
class TestHandleSsh(CiTestCase):
"""Test cc_ssh handling of ssh config."""
+ def _publish_hostkey_test_setup(self):
+ self.test_hostkeys = {
+ 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'),
+ 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'),
+ 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'),
+ 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'),
+ }
+ self.test_hostkey_files = []
+ hostkey_tmpdir = self.tmp_dir()
+ for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']:
+ key_data = self.test_hostkeys[key_type]
+ filename = 'ssh_host_%s_key.pub' % key_type
+ filepath = os.path.join(hostkey_tmpdir, filename)
+ self.test_hostkey_files.append(filepath)
+ with open(filepath, 'w') as f:
+ f.write(' '.join(key_data))
+
+ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key')
+
def test_apply_credentials_with_user(self, m_setup_keys):
"""Apply keys for the given user and root."""
keys = ["key1"]
@@ -64,9 +87,10 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ([], {})
+ cc_ssh.PUBLISH_HOST_KEYS = False
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
options = options.replace("$DISABLE_USER", "root")
m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
@@ -82,6 +106,31 @@ class TestHandleSsh(CiTestCase):
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
+ def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test allow_public_ssh_keys=False ignores ssh public keys from
+ platform.
+ """
+ cfg = {"allow_public_ssh_keys": False}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(), user),
+ mock.call(set(), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
m_glob, m_setup_keys):
"""Test handle with no config and a default distro user."""
@@ -94,7 +143,7 @@ class TestHandleSsh(CiTestCase):
m_nug.return_value = ({user: {"default": user}}, {})
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
@@ -119,7 +168,7 @@ class TestHandleSsh(CiTestCase):
m_nug.return_value = ({user: {"default": user}}, {})
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
@@ -144,8 +193,153 @@ class TestHandleSsh(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
self.assertEqual([mock.call(set(keys), user),
mock.call(set(keys), "root", options="")],
m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_default(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['ecdsa', 'ed25519', 'rsa']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_enable(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = False
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': True}}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['ecdsa', 'ed25519', 'rsa']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_disable(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': False}}
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
+ cloud.datasource.publish_host_keys.assert_not_called()
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_blacklist(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': True,
+ 'blacklist': ['dsa', 'rsa']}}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['ecdsa', 'ed25519']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_empty_blacklist(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': True,
+ 'blacklist': []}}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['dsa', 'ecdsa', 'ed25519', 'rsa']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index b7cf9bee..8c4161ef 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -1,10 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import re
-from six import StringIO
-
from cloudinit.config.cc_ubuntu_advantage import (
- handle, maybe_install_ua_tools, run_commands, schema)
+ configure_ua, handle, maybe_install_ua_tools, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
from cloudinit.tests.helpers import (
@@ -20,90 +17,120 @@ class FakeCloud(object):
self.distro = distro
-class TestRunCommands(CiTestCase):
+class TestConfigureUA(CiTestCase):
with_logs = True
allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
def setUp(self):
- super(TestRunCommands, self).setUp()
+ super(TestConfigureUA, self).setUp()
self.tmp = self.tmp_dir()
@mock.patch('%s.util.subp' % MPATH)
- def test_run_commands_on_empty_list(self, m_subp):
- """When provided with an empty list, run_commands does nothing."""
- run_commands([])
- self.assertEqual('', self.logs.getvalue())
- m_subp.assert_not_called()
-
- def test_run_commands_on_non_list_or_dict(self):
- """When provided an invalid type, run_commands raises an error."""
- with self.assertRaises(TypeError) as context_manager:
- run_commands(commands="I'm Not Valid")
+ def test_configure_ua_attach_error(self, m_subp):
+ """Errors from ua attach command are raised."""
+ m_subp.side_effect = util.ProcessExecutionError(
+ 'Invalid token SomeToken')
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token='SomeToken')
self.assertEqual(
- "commands parameter was not a list or dict: I'm Not Valid",
+ 'Failure attaching Ubuntu Advantage:\nUnexpected error while'
+ ' running command.\nCommand: -\nExit code: -\nReason: -\n'
+ 'Stdout: Invalid token SomeToken\nStderr: -',
str(context_manager.exception))
- def test_run_command_logs_commands_and_exit_codes_to_stderr(self):
- """All exit codes are logged to stderr."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'bogus command'
- cmd3 = 'echo "MOM" >> %s' % outfile
- commands = [cmd1, cmd2, cmd3]
-
- mock_path = '%s.sys.stderr' % MPATH
- with mock.patch(mock_path, new_callable=StringIO) as m_stderr:
- with self.assertRaises(RuntimeError) as context_manager:
- run_commands(commands=commands)
-
- self.assertIsNotNone(
- re.search(r'bogus: (command )?not found',
- str(context_manager.exception)),
- msg='Expected bogus command not found')
- expected_stderr_log = '\n'.join([
- 'Begin run command: {cmd}'.format(cmd=cmd1),
- 'End run command: exit(0)',
- 'Begin run command: {cmd}'.format(cmd=cmd2),
- 'ERROR: End run command: exit(127)',
- 'Begin run command: {cmd}'.format(cmd=cmd3),
- 'End run command: exit(0)\n'])
- self.assertEqual(expected_stderr_log, m_stderr.getvalue())
-
- def test_run_command_as_lists(self):
- """When commands are specified as a list, run them in order."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'echo "MOM" >> %s' % outfile
- commands = [cmd1, cmd2]
- with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
- run_commands(commands=commands)
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_token(self, m_subp):
+ """When token is provided, attach the machine to ua using the token."""
+ configure_ua(token='SomeToken')
+ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
+ self.assertEqual(
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
+
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_on_service_error(self, m_subp):
+ """all services should be enabled and then any failures raised"""
+ def fake_subp(cmd, capture=None):
+ fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
+ if cmd in fail_cmds and capture:
+ svc = cmd[-1]
+ raise util.ProcessExecutionError(
+ 'Invalid {} credentials'.format(svc.upper()))
+
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips'])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken']),
+ mock.call(['ua', 'enable', 'esm'], capture=True),
+ mock.call(['ua', 'enable', 'cc'], capture=True),
+ mock.call(['ua', 'enable', 'fips'], capture=True)])
self.assertIn(
- 'DEBUG: Running user-provided ubuntu-advantage commands',
+ 'WARNING: Failure enabling "esm":\nUnexpected error'
+ ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
+ 'Stdout: Invalid ESM credentials\nStderr: -\n',
self.logs.getvalue())
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
self.assertIn(
- 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage'
- ' config:',
+ 'WARNING: Failure enabling "cc":\nUnexpected error'
+ ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
+ 'Stdout: Invalid CC credentials\nStderr: -\n',
+ self.logs.getvalue())
+ self.assertEqual(
+ 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
+ str(context_manager.exception))
+
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_empty_services(self, m_subp):
+ """When services is an empty list, do not auto-enable attach."""
+ configure_ua(token='SomeToken', enable=[])
+ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
+ self.assertEqual(
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- def test_run_command_dict_sorted_as_command_script(self):
- """When commands are a dict, sort them and run."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'echo "MOM" >> %s' % outfile
- commands = {'02': cmd1, '01': cmd2}
- with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
- run_commands(commands=commands)
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_specific_services(self, m_subp):
+ """When services a list, only enable specific services."""
+ configure_ua(token='SomeToken', enable=['fips'])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken']),
+ mock.call(['ua', 'enable', 'fips'], capture=True)])
+ self.assertEqual(
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
+
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_string_services(self, m_subp):
+ """When services a string, treat as singleton list and warn"""
+ configure_ua(token='SomeToken', enable='fips')
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken']),
+ mock.call(['ua', 'enable', 'fips'], capture=True)])
+ self.assertEqual(
+ 'WARNING: ubuntu_advantage: enable should be a list, not a'
+ ' string; treating as a single enable\n'
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
- expected_messages = [
- 'DEBUG: Running user-provided ubuntu-advantage commands']
- for message in expected_messages:
- self.assertIn(message, self.logs.getvalue())
- self.assertEqual('MOM\nHI\n', util.load_file(outfile))
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_weird_services(self, m_subp):
+ """When services not string or list, warn but still attach"""
+ configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken'])])
+ self.assertEqual(
+ 'WARNING: ubuntu_advantage: enable should be a list, not a'
+ ' dict; skipping enabling services\n'
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
@skipUnlessJsonSchema()
@@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
with_logs = True
schema = schema
- def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
- """If ubuntu-advantage configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema)
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
+ """If ubuntu_advantage configuration is not a dict, emit a warning."""
+ validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not"
+ "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not"
" of type 'object'\n",
self.logs.getvalue())
- @mock.patch('%s.run_commands' % MPATH)
- def test_schema_disallows_unknown_keys(self, _):
- """Unknown keys in ubuntu-advantage configuration emit warnings."""
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_schema_disallows_unknown_keys(self, _cfg, _):
+ """Unknown keys in ubuntu_advantage configuration emit warnings."""
validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}},
+ {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}},
schema)
self.assertIn(
- 'WARNING: Invalid config:\nubuntu-advantage: Additional properties'
+ 'WARNING: Invalid config:\nubuntu_advantage: Additional properties'
" are not allowed ('invalid-key' was unexpected)",
self.logs.getvalue())
- def test_warn_schema_requires_commands(self):
- """Warn when ubuntu-advantage configuration lacks commands."""
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a"
- " required property\n",
- self.logs.getvalue())
-
- @mock.patch('%s.run_commands' % MPATH)
- def test_warn_schema_commands_is_not_list_or_dict(self, _):
- """Warn when ubuntu-advantage:commands config is not a list or dict."""
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_warn_schema_requires_token(self, _cfg, _):
+ """Warn if ubuntu_advantage configuration lacks token."""
validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': 'broken'}}, schema)
+ {'ubuntu_advantage': {'enable': ['esm']}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is"
- " not of type 'object', 'array'\n",
- self.logs.getvalue())
+ "WARNING: Invalid config:\nubuntu_advantage:"
+ " 'token' is a required property\n", self.logs.getvalue())
- @mock.patch('%s.run_commands' % MPATH)
- def test_warn_schema_when_commands_is_empty(self, _):
- """Emit warnings when ubuntu-advantage:commands is empty."""
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': []}}, schema)
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
+ """Warn when ubuntu_advantage:enable config is not a list."""
validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': {}}}, schema)
+ {'ubuntu_advantage': {'enable': 'needslist'}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too"
- " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}"
- " does not have enough properties\n",
+ "WARNING: Invalid config:\nubuntu_advantage: 'token' is a"
+ " required property\nubuntu_advantage.enable: 'needslist'"
+ " is not of type 'array'\n",
self.logs.getvalue())
- @mock.patch('%s.run_commands' % MPATH)
- def test_schema_when_commands_are_list_or_dict(self, _):
- """No warnings when ubuntu-advantage:commands are a list or dict."""
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': ['valid']}}, schema)
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
- self.assertEqual('', self.logs.getvalue())
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo" "bye"]]},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- {'commands': ["echo bye", "echo bye"]},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_dict_array(self):
- """Duplicated commands dict/array entries are allowed."""
- self.assertSchemaValid(
- {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_dict_string(self):
- """Duplicated commands dict/string entries are allowed."""
- self.assertSchemaValid(
- {'commands': {'00': "echo bye", '01': "echo bye"}},
- "command entries can be duplicate.")
-
class TestHandle(CiTestCase):
@@ -205,41 +192,89 @@ class TestHandle(CiTestCase):
super(TestHandle, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.run_commands' % MPATH)
@mock.patch('%s.validate_cloudconfig_schema' % MPATH)
- def test_handle_no_config(self, m_schema, m_run):
+ def test_handle_no_config(self, m_schema):
"""When no ua-related configuration is provided, nothing happens."""
cfg = {}
handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)
self.assertIn(
- "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key"
- " in config",
+ "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
+ ' configuration found',
self.logs.getvalue())
m_schema.assert_not_called()
- m_run.assert_not_called()
+ @mock.patch('%s.configure_ua' % MPATH)
@mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install):
+ def test_handle_tries_to_install_ubuntu_advantage_tools(
+ self, m_install, m_cfg):
"""If ubuntu_advantage is provided, try installing ua-tools package."""
- cfg = {'ubuntu-advantage': {}}
+ cfg = {'ubuntu_advantage': {'token': 'valid'}}
mycloud = FakeCloud(None)
handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
m_install.assert_called_once_with(mycloud)
+ @mock.patch('%s.configure_ua' % MPATH)
@mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_runs_commands_provided(self, m_install):
- """When commands are specified as a list, run them."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
+ def test_handle_passes_credentials_and_services_to_configure_ua(
+ self, m_install, m_configure_ua):
+ """All ubuntu_advantage config keys are passed to configure_ua."""
+ cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}}
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ m_configure_ua.assert_called_once_with(
+ token='token', enable=['esm'])
+
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
+ self, m_configure_ua):
+ """Warning when ubuntu-advantage key is present with new config"""
+ cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}}
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ ' will attempt to continue.',
+ self.logs.getvalue().splitlines()[0])
+ m_configure_ua.assert_called_once_with(
+ token='token', enable=['esm'])
+
+ def test_handle_error_on_deprecated_commands_key_dashed(self):
+ """Error when commands is present in ubuntu-advantage key."""
+ cfg = {'ubuntu-advantage': {'commands': 'nogo'}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception))
+
+ def test_handle_error_on_deprecated_commands_key_underscored(self):
+ """Error when commands is present in ubuntu_advantage key."""
+ cfg = {'ubuntu_advantage': {'commands': 'nogo'}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception))
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_handle_prefers_new_style_config(
+ self, m_configure_ua):
+ """ubuntu_advantage should be preferred over ubuntu-advantage"""
cfg = {
- 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,
- 'echo "MOM" >> %s' % outfile]}}
- mock_path = '%s.sys.stderr' % MPATH
- with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
- with mock.patch(mock_path, new_callable=StringIO):
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger,
- args=None)
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
+ 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']},
+ 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']},
+ }
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ ' will attempt to continue.',
+ self.logs.getvalue().splitlines()[0])
+ m_configure_ua.assert_called_once_with(
+ token='token', enable=['esm'])
class TestMaybeInstallUATools(CiTestCase):
@@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase):
@mock.patch('%s.util.which' % MPATH)
def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
"""Do nothing if ubuntu-advantage-tools already exists."""
- m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed
+ m_which.return_value = '/usr/bin/ua' # already installed
distro = mock.MagicMock()
distro.update_package_sources.side_effect = RuntimeError(
'Some apt error')
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
new file mode 100644
index 00000000..46952692
--- /dev/null
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -0,0 +1,237 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import os
+
+from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
+from cloudinit.config.schema import (
+ SchemaValidationError, validate_cloudconfig_schema)
+from cloudinit.config import cc_ubuntu_drivers as drivers
+from cloudinit.util import ProcessExecutionError
+
+MPATH = "cloudinit.config.cc_ubuntu_drivers."
+M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
+OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
+ "(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+
+
+class AnyTempScriptAndDebconfFile(object):
+
+ def __init__(self, tmp_dir, debconf_file):
+ self.tmp_dir = tmp_dir
+ self.debconf_file = debconf_file
+
+ def __eq__(self, cmd):
+ if not len(cmd) == 2:
+ return False
+ script, debconf_file = cmd
+ if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')):
+ return debconf_file == self.debconf_file
+ return False
+
+
+class TestUbuntuDrivers(CiTestCase):
+ cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}}
+ install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia']
+
+ with_logs = True
+
+ @skipUnlessJsonSchema()
+ def test_schema_requires_boolean_for_license_accepted(self):
+ with self.assertRaisesRegex(
+ SchemaValidationError, ".*license-accepted.*TRUE.*boolean"):
+ validate_cloudconfig_schema(
+ {'drivers': {'nvidia': {'license-accepted': "TRUE"}}},
+ schema=drivers.schema, strict=True)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def _assert_happy_path_taken(
+ self, config, m_which, m_subp, m_tmp):
+ """Positive path test through handle. Package should be installed."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+ drivers.handle('ubuntu_drivers', config, myCloud, None, None)
+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
+ myCloud.distro.install_packages.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+
+ def test_handle_does_package_install(self):
+ self._assert_happy_path_taken(self.cfg_accepted)
+
+ def test_trueish_strings_are_considered_approval(self):
+ for true_value in ['yes', 'true', 'on', '1']:
+ new_config = copy.deepcopy(self.cfg_accepted)
+ new_config['drivers']['nvidia']['license-accepted'] = true_value
+ self._assert_happy_path_taken(new_config)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp")
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def test_handle_raises_error_if_no_drivers_found(
+ self, m_which, m_subp, m_tmp):
+ """If ubuntu-drivers doesn't install any drivers, raise an error."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+
+ def fake_subp(cmd):
+ if cmd[0].startswith(tdir):
+ return
+ raise ProcessExecutionError(
+ stdout='No drivers found for installation.\n', exit_code=1)
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(Exception):
+ drivers.handle(
+ 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
+ myCloud.distro.install_packages.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+ self.assertIn('ubuntu-drivers found no drivers for installation',
+ self.logs.getvalue())
+
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def _assert_inert_with_config(self, config, m_which, m_subp):
+ """Helper to reduce repetition when testing negative cases"""
+ myCloud = mock.MagicMock()
+ drivers.handle('ubuntu_drivers', config, myCloud, None, None)
+ self.assertEqual(0, myCloud.distro.install_packages.call_count)
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_handle_inert_if_license_not_accepted(self):
+ """Ensure we don't do anything if the license is rejected."""
+ self._assert_inert_with_config(
+ {'drivers': {'nvidia': {'license-accepted': False}}})
+
+ def test_handle_inert_if_garbage_in_license_field(self):
+ """Ensure we don't do anything if unknown text is in license field."""
+ self._assert_inert_with_config(
+ {'drivers': {'nvidia': {'license-accepted': 'garbage'}}})
+
+ def test_handle_inert_if_no_license_key(self):
+ """Ensure we don't do anything if no license key."""
+ self._assert_inert_with_config({'drivers': {'nvidia': {}}})
+
+ def test_handle_inert_if_no_nvidia_key(self):
+ """Ensure we don't do anything if other license accepted."""
+ self._assert_inert_with_config(
+ {'drivers': {'acme': {'license-accepted': True}}})
+
+ def test_handle_inert_if_string_given(self):
+ """Ensure we don't do anything if string refusal given."""
+ for false_value in ['no', 'false', 'off', '0']:
+ self._assert_inert_with_config(
+ {'drivers': {'nvidia': {'license-accepted': false_value}}})
+
+ @mock.patch(MPATH + "install_drivers")
+ def test_handle_no_drivers_does_nothing(self, m_install_drivers):
+ """If no 'drivers' key in the config, nothing should be done."""
+ myCloud = mock.MagicMock()
+ myLog = mock.MagicMock()
+ drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None)
+ self.assertIn('Skipping module named',
+ myLog.debug.call_args_list[0][0][0])
+ self.assertEqual(0, m_install_drivers.call_count)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=True)
+ def test_install_drivers_no_install_if_present(
+ self, m_which, m_subp, m_tmp):
+ """If 'ubuntu-drivers' is present, no package install should occur."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ pkg_install = mock.MagicMock()
+ drivers.install_drivers(self.cfg_accepted['drivers'],
+ pkg_install_func=pkg_install)
+ self.assertEqual(0, pkg_install.call_count)
+ self.assertEqual([mock.call('ubuntu-drivers')],
+ m_which.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+
+ def test_install_drivers_rejects_invalid_config(self):
+ """install_drivers should raise TypeError if not given a config dict"""
+ pkg_install = mock.MagicMock()
+ with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
+ drivers.install_drivers("mystring", pkg_install_func=pkg_install)
+ self.assertEqual(0, pkg_install.call_count)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp")
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
+ self, m_which, m_subp, m_tmp):
+ """Older ubuntu-drivers versions should emit message and raise error"""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+
+ def fake_subp(cmd):
+ if cmd[0].startswith(tdir):
+ return
+ raise ProcessExecutionError(
+ stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(Exception):
+ drivers.handle(
+ 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
+ myCloud.distro.install_packages.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+ self.assertIn('WARNING: the available version of ubuntu-drivers is'
+ ' too old to perform requested driver installation',
+ self.logs.getvalue())
+
+
+# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
+class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
+ cfg_accepted = {
+ 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}}
+ install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+ version_none_cfg = {
+ 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}}
+ drivers.handle(
+ 'ubuntu_drivers', version_none_cfg, myCloud, None, None)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])],
+ m_subp.call_args_list)
+
+ def test_specifying_a_version_doesnt_override_license_acceptance(self):
+ self._assert_inert_with_config({
+ 'drivers': {'nvidia': {'license-accepted': False,
+ 'version': '123'}}
+ })
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index ba0afae3..f620b597 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -46,6 +46,34 @@ class TestHandleUsersGroups(CiTestCase):
mock.call('me2', default=False)])
m_group.assert_not_called()
+ @mock.patch('cloudinit.distros.freebsd.Distro.create_group')
+ @mock.patch('cloudinit.distros.freebsd.Distro.create_user')
+ def test_handle_users_in_cfg_calls_create_users_on_bsd(
+ self,
+ m_fbsd_user,
+ m_fbsd_group,
+ m_linux_user,
+ m_linux_group,
+ ):
+ """When users in config, create users with freebsd.create_user."""
+ cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True,
+ 'groups': ['wheel'],
+ 'shell': '/bin/tcsh'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_fbsd_user.call_args_list,
+ [mock.call('freebsd', groups='wheel', lock_passwd=True,
+ shell='/bin/tcsh'),
+ mock.call('me2', default=False)])
+ m_fbsd_group.assert_not_called()
+ m_linux_group.assert_not_called()
+ m_linux_user.assert_not_called()
+
def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
"""When ssh_redirect_user is True pass default user and cloud keys."""
cfg = {