diff options
Diffstat (limited to 'cloudinit/config')
28 files changed, 1208 insertions, 273 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 5b9cbca0..e18944ec 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults -to ``^[\w-]+:\w`` +to ``^[\\w-]+:\\w`` **Add source list entries:** @@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None): # get a complete list of packages listed in input pkgs_cfgd = set() - for key, content in selsets.items(): + for _key, content in selsets.items(): for line in content.splitlines(): if line.startswith("#"): continue diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 233da1ef..6813f534 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -42,7 +42,13 @@ schema = { .. note:: bootcmd should only be used for things that could not be done later - in the boot process."""), + in the boot process. + + .. note:: + + when writing files, do not use /tmp dir as it races with + systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. + """), 'distros': distros, 'examples': [dedent("""\ bootcmd: @@ -63,7 +69,6 @@ schema = { 'additionalProperties': False, 'minItems': 1, 'required': [], - 'uniqueItems': True } } } diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index c56319b5..885b3138 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject'] +REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] +REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: - util.subp(REJECT_CMD, capture=False) + reject_cmd = None + if util.which('ip'): + reject_cmd = REJECT_CMD_IP + elif util.which('ifconfig'): + reject_cmd = REJECT_CMD_IF + else: + log.error(('Neither "route" nor "ip" command found, unable to ' + 'manipulate routing table')) + return + util.subp(reject_cmd, capture=False) else: log.debug(("Skipping module named %s," " disabling the ec2 route not enabled"), name) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index c3e8c484..943089e0 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -680,13 +680,13 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udevadm_settle() + util.udevadm_settle() try: util.subp(blkdev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) - udevadm_settle() + util.udevadm_settle() def exec_mkpart_mbr(device, layout): @@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) -def udevadm_settle(): - util.subp(['udevadm', 'settle']) - - def assert_and_settle_device(device): """Assert that device exists and settle so it is fully recognized.""" if not os.path.exists(device): - udevadm_settle() + util.udevadm_settle() if not os.path.exists(device): raise RuntimeError("Device %s did not exist and was not created " "with a udevamd settle." % device) @@ -752,7 +748,7 @@ def assert_and_settle_device(device): # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have # not yet finished. So settle again. - udevadm_settle() + util.udevadm_settle() def mkpart(device, definition): diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 69dc2d5e..eb9fbe66 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -43,7 +43,7 @@ def is_upstart_system(): del myenv['UPSTART_SESSION'] check_cmd = ['initctl', 'version'] try: - (out, err) = util.subp(check_cmd, env=myenv) + (out, _err) = util.subp(check_cmd, env=myenv) return 'upstart' in out except util.ProcessExecutionError as e: LOG.debug("'%s' returned '%s', not using upstart", diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 09374d2e..24a8ebea 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly. domain: <domain> """ +from cloudinit import log as logging from cloudinit import util import os distros = ['ubuntu'] +LOG = logging.getLogger(__name__) + +_DEFAULT_NETWORK_NAME = "lxdbr0" + def handle(name, cfg, cloud, log, args): # Get config @@ -99,6 +104,7 @@ def handle(name, cfg, cloud, log, args): 'network_address', 'network_port', 'storage_backend', 'storage_create_device', 'storage_create_loop', 'storage_pool', 'trust_password') + util.subp(['lxd', 'waitready', '--timeout=300']) cmd = ['lxd', 'init', '--auto'] for k in init_keys: if init_cfg.get(k): @@ -109,6 +115,7 @@ def handle(name, cfg, cloud, log, args): # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: + net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) if os.path.exists("/etc/default/lxd-bridge") \ and util.which(dconf_comm): # Bridge configured through packaging @@ -135,15 +142,18 @@ def handle(name, cfg, cloud, log, args): else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) + maybe_cleanup_default( + net_name=net_name, did_init=bool(init_cfg), + create=bool(cmd_create), attach=bool(cmd_attach)) if cmd_create: log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) - util.subp(cmd_create) + _lxc(cmd_create) if cmd_attach: log.debug("Setting up default lxd bridge: %s" % " ".join(cmd_create)) - util.subp(cmd_attach) + _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( @@ -204,10 +214,10 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("mode") == "none": return None, None - bridge_name = bridge_cfg.get("name", "lxdbr0") + bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["lxc", "network", "attach-profile", bridge_name, - "default", "eth0", "--force-local"] + cmd_attach = ["network", "attach-profile", bridge_name, + "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach @@ -215,7 +225,7 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("mode") != "new": raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) - cmd_create = ["lxc", "network", "create", bridge_name] + cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): cmd_create.append("ipv4.address=%s/%s" % @@ -247,8 +257,49 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("domain"): cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) - cmd_create.append("--force-local") - return cmd_create, cmd_attach + +def _lxc(cmd): + env = {'LC_ALL': 'C', + 'HOME': os.environ.get('HOME', '/root'), + 'USER': os.environ.get('USER', 'root')} + util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + + +def maybe_cleanup_default(net_name, did_init, create, attach, + profile="default", nic_name="eth0"): + """Newer versions of lxc (3.0.1+) create a lxdbr0 network when + 'lxd init --auto' is run. Older versions did not. + + By removing ay that lxd-init created, we simply leave the add/attach + code in-tact. + + https://github.com/lxc/lxd/issues/4649""" + if net_name != _DEFAULT_NETWORK_NAME or not did_init: + return + + fail_assume_enoent = "failed. Assuming it did not exist." + succeeded = "succeeded." + if create: + msg = "Deletion of lxd network '%s' %s" + try: + _lxc(["network", "delete", net_name]) + LOG.debug(msg, net_name, succeeded) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise e + LOG.debug(msg, net_name, fail_assume_enoent) + + if attach: + msg = "Removal of device '%s' from profile '%s' %s" + try: + _lxc(["profile", "device", "remove", profile, nic_name]) + LOG.debug(msg, nic_name, profile, succeeded) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise e + LOG.debug(msg, nic_name, profile, fail_assume_enoent) + + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index f14a4fc5..339baba9 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" +MNT_COMMENT = "comment=cloudconfig" LOG = logging.getLogger(__name__) @@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None): if str(size).lower() == "auto": try: memsize = util.read_meminfo()['total'] - except IOError as e: - LOG.debug("Not creating swap. failed to read meminfo") + except IOError: + LOG.debug("Not creating swap: failed to read meminfo") return util.ensure_dir(tdir) @@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg): if os.path.exists(fname): if not os.path.exists("/proc/swaps"): - LOG.debug("swap file %s existed. no /proc/swaps. Being safe.", - fname) + LOG.debug("swap file %s exists, but no /proc/swaps exists, " + "being safe", fname) return fname try: for line in util.load_file("/proc/swaps").splitlines(): if line.startswith(fname + " "): - LOG.debug("swap file %s already in use.", fname) + LOG.debug("swap file %s already in use", fname) return fname - LOG.debug("swap file %s existed, but not in /proc/swaps", fname) + LOG.debug("swap file %s exists, but not in /proc/swaps", fname) except Exception: - LOG.warning("swap file %s existed. Error reading /proc/swaps", + LOG.warning("swap file %s exists. Error reading /proc/swaps", fname) return fname @@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args): LOG.debug("mounts configuration is %s", cfgmnt) + fstab_lines = [] + fstab_devs = {} + fstab_removed = [] + + for line in util.load_file(FSTAB_PATH).splitlines(): + if MNT_COMMENT in line: + fstab_removed.append(line) + continue + + try: + toks = WS.split(line) + except Exception: + pass + fstab_devs[toks[0]] = line + fstab_lines.append(line) + for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): @@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args): start = str(cfgmnt[i][0]) sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized != start: + log.debug("changed %s => %s" % (start, sanitized)) + if sanitized is None: - log.debug("Ignorming nonexistant named mount %s", start) + log.debug("Ignoring nonexistent named mount %s", start) + continue + elif sanitized in fstab_devs: + log.info("Device %s already defined in fstab: %s", + sanitized, fstab_devs[sanitized]) continue - if sanitized != start: - log.debug("changed %s => %s" % (start, sanitized)) cfgmnt[i][0] = sanitized # in case the user did not quote a field (likely fs-freq, fs_passno) @@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args): for defmnt in defmnts: start = defmnt[0] sanitized = sanitize_devname(start, cloud.device_name_to_device, log) - if sanitized is None: - log.debug("Ignoring nonexistant default named mount %s", start) - continue if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) + + if sanitized is None: + log.debug("Ignoring nonexistent default named mount %s", start) + continue + elif sanitized in fstab_devs: + log.debug("Device %s already defined in fstab: %s", + sanitized, fstab_devs[sanitized]) + continue + defmnt[0] = sanitized cfgmnt_has = False @@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args): actlist = [] for x in cfgmnt: if x[1] is None: - log.debug("Skipping non-existent device named %s", x[0]) + log.debug("Skipping nonexistent device named %s", x[0]) else: actlist.append(x) @@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args): actlist.append([swapret, "none", "swap", "sw", "0", "0"]) if len(actlist) == 0: - log.debug("No modifications to fstab needed.") + log.debug("No modifications to fstab needed") return - comment = "comment=cloudconfig" cc_lines = [] needswap = False dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this - line[3] = "%s,%s" % (line[3], comment) + line[3] = "%s,%s" % (line[3], MNT_COMMENT) if line[2] == "swap": needswap = True if line[1].startswith("/"): dirs.append(line[1]) cc_lines.append('\t'.join(line)) - fstab_lines = [] - removed = [] - for line in util.load_file(FSTAB_PATH).splitlines(): - try: - toks = WS.split(line) - if toks[3].find(comment) != -1: - removed.append(line) - continue - except Exception: - pass - fstab_lines.append(line) - for d in dirs: try: util.ensure_dir(d) @@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed to make '%s' config-mount", d) sadds = [WS.sub(" ", n) for n in cc_lines] - sdrops = [WS.sub(" ", n) for n in removed] + sdrops = [WS.sub(" ", n) for n in fstab_removed] sops = (["- " + drop for drop in sdrops if drop not in sadds] + ["+ " + add for add in sadds if add not in sdrops]) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index cbd0237d..9e074bda 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -10,20 +10,95 @@ from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils from cloudinit import util +import copy import os +import six from textwrap import dedent LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' -TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' NR_POOL_SERVERS = 4 -distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu'] +distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu'] + +NTP_CLIENT_CONFIG = { + 'chrony': { + 'check_exe': 'chronyd', + 'confpath': '/etc/chrony.conf', + 'packages': ['chrony'], + 'service_name': 'chrony', + 'template_name': 'chrony.conf.{distro}', + 'template': None, + }, + 'ntp': { + 'check_exe': 'ntpd', + 'confpath': NTP_CONF, + 'packages': ['ntp'], + 'service_name': 'ntp', + 'template_name': 'ntp.conf.{distro}', + 'template': None, + }, + 'ntpdate': { + 'check_exe': 'ntpdate', + 'confpath': NTP_CONF, + 'packages': ['ntpdate'], + 'service_name': 'ntpdate', + 'template_name': 'ntp.conf.{distro}', + 'template': None, + }, + 'systemd-timesyncd': { + 'check_exe': '/lib/systemd/systemd-timesyncd', + 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', + 'packages': [], + 'service_name': 'systemd-timesyncd', + 'template_name': 'timesyncd.conf', + 'template': None, + }, +} + +# This is Distro-specific configuration overrides of the base config +DISTRO_CLIENT_CONFIG = { + 'debian': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + }, + }, + 'opensuse': { + 'chrony': { + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'service_name': 'ntpd', + }, + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + }, + }, + 'sles': { + 'chrony': { + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'service_name': 'ntpd', + }, + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + }, + }, + 'ubuntu': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + }, + }, +} # The schema definition for each cloud-config module is a strict contract for @@ -48,7 +123,34 @@ schema = { 'distros': distros, 'examples': [ dedent("""\ + # Override ntp with chrony configuration on Ubuntu + ntp: + enabled: true + ntp_client: chrony # Uses cloud-init default chrony configuration + """), + dedent("""\ + # Provide a custom ntp client configuration ntp: + enabled: true + ntp_client: myntpclient + config: + confpath: /etc/myntpclient/myntpclient.conf + check_exe: myntpclientd + packages: + - myntpclient + service_name: myntpclient + template: | + ## template:jinja + # My NTP Client config + {% if pools -%}# pools{% endif %} + {% for pool in pools -%} + pool {{pool}} iburst + {% endfor %} + {%- if servers %}# servers + {% endif %} + {% for server in servers -%} + server {{server}} iburst + {% endfor %} pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] servers: - ntp.server.local @@ -83,79 +185,159 @@ schema = { List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with the format ``{0-3}.{distro}.pool.ntp.org``.""") - } + }, + 'ntp_client': { + 'type': 'string', + 'default': 'auto', + 'description': dedent("""\ + Name of an NTP client to use to configure system NTP. + When unprovided or 'auto' the default client preferred + by the distribution will be used. The following + built-in client names can be used to override existing + configuration defaults: chrony, ntp, ntpdate, + systemd-timesyncd."""), + }, + 'enabled': { + 'type': 'boolean', + 'default': True, + 'description': dedent("""\ + Attempt to enable ntp clients if set to True. If set + to False, ntp client will not be configured or + installed"""), + }, + 'config': { + 'description': dedent("""\ + Configuration settings or overrides for the + ``ntp_client`` specified."""), + 'type': ['object'], + 'properties': { + 'confpath': { + 'type': 'string', + 'description': dedent("""\ + The path to where the ``ntp_client`` + configuration is written."""), + }, + 'check_exe': { + 'type': 'string', + 'description': dedent("""\ + The executable name for the ``ntp_client``. + For example, ntp service ``check_exe`` is + 'ntpd' because it runs the ntpd binary."""), + }, + 'packages': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + 'uniqueItems': True, + 'description': dedent("""\ + List of packages needed to be installed for the + selected ``ntp_client``."""), + }, + 'service_name': { + 'type': 'string', + 'description': dedent("""\ + The systemd or sysvinit service name used to + start and stop the ``ntp_client`` + service."""), + }, + 'template': { + 'type': 'string', + 'description': dedent("""\ + Inline template allowing users to define their + own ``ntp_client`` configuration template. + The value must start with '## template:jinja' + to enable use of templating support. + """), + }, + }, + # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override + # of builtin client values. + 'required': [], + 'minProperties': 1, # If we have config, define something + 'additionalProperties': False + }, }, 'required': [], 'additionalProperties': False } } } - -__doc__ = get_schema_doc(schema) # Supplement python help() +REQUIRED_NTP_CONFIG_KEYS = frozenset([ + 'check_exe', 'confpath', 'packages', 'service_name']) -def handle(name, cfg, cloud, log, _args): - """Enable and configure ntp.""" - if 'ntp' not in cfg: - LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) - return - ntp_cfg = cfg['ntp'] - if ntp_cfg is None: - ntp_cfg = {} # Allow empty config which will install the package +__doc__ = get_schema_doc(schema) # Supplement python help() - # TODO drop this when validate_cloudconfig_schema is strict=True - if not isinstance(ntp_cfg, (dict)): - raise RuntimeError( - "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) - validate_cloudconfig_schema(cfg, schema) - if ntp_installable(): - service_name = 'ntp' - confpath = NTP_CONF - template_name = None - packages = ['ntp'] - check_exe = 'ntpd' - else: - service_name = 'systemd-timesyncd' - confpath = TIMESYNCD_CONF - template_name = 'timesyncd.conf' - packages = [] - check_exe = '/lib/systemd/systemd-timesyncd' - - rename_ntp_conf() - # ensure when ntp is installed it has a configuration file - # to use instead of starting up with packaged defaults - write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name) - install_ntp(cloud.distro.install_packages, packages=packages, - check_exe=check_exe) +def distro_ntp_client_configs(distro): + """Construct a distro-specific ntp client config dictionary by merging + distro specific changes into base config. - try: - reload_ntp(service_name, systemd=cloud.distro.uses_systemd()) - except util.ProcessExecutionError as e: - LOG.exception("Failed to reload/start ntp service: %s", e) - raise + @param distro: String providing the distro class name. + @returns: Dict of distro configurations for ntp clients. + """ + dcfg = DISTRO_CLIENT_CONFIG + cfg = copy.copy(NTP_CLIENT_CONFIG) + if distro in dcfg: + cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True) + return cfg -def ntp_installable(): - """Check if we can install ntp package +def select_ntp_client(ntp_client, distro): + """Determine which ntp client is to be used, consulting the distro + for its preference. - Ubuntu-Core systems do not have an ntp package available, so - we always return False. Other systems require package managers to install - the ntp package If we fail to find one of the package managers, then we - cannot install ntp. + @param ntp_client: String name of the ntp client to use. + @param distro: Distro class instance. + @returns: Dict of the selected ntp client or {} if none selected. """ - if util.system_is_snappy(): - return False - if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])): - return True + # construct distro-specific ntp_client_config dict + distro_cfg = distro_ntp_client_configs(distro.name) + + # user specified client, return its config + if ntp_client and ntp_client != 'auto': + LOG.debug('Selected NTP client "%s" via user-data configuration', + ntp_client) + return distro_cfg.get(ntp_client, {}) + + # default to auto if unset in distro + distro_ntp_client = distro.get_option('ntp_client', 'auto') + + clientcfg = {} + if distro_ntp_client == "auto": + for client in distro.preferred_ntp_clients: + cfg = distro_cfg.get(client) + if util.which(cfg.get('check_exe')): + LOG.debug('Selected NTP client "%s", already installed', + client) + clientcfg = cfg + break + + if not clientcfg: + client = distro.preferred_ntp_clients[0] + LOG.debug( + 'Selected distro preferred NTP client "%s", not yet installed', + client) + clientcfg = distro_cfg.get(client) + else: + LOG.debug('Selected NTP client "%s" via distro system config', + distro_ntp_client) + clientcfg = distro_cfg.get(distro_ntp_client, {}) + + return clientcfg - return False +def install_ntp_client(install_func, packages=None, check_exe="ntpd"): + """Install ntp client package if not already installed. -def install_ntp(install_func, packages=None, check_exe="ntpd"): + @param install_func: function. This parameter is invoked with the contents + of the packages parameter. + @param packages: list. This parameter defaults to ['ntp']. + @param check_exe: string. The name of a binary that indicates the package + the specified package is already installed. + """ if util.which(check_exe): return if packages is None: @@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): install_func(packages) -def rename_ntp_conf(config=None): - """Rename any existing ntp.conf file""" - if config is None: # For testing - config = NTP_CONF - if os.path.exists(config): - util.rename(config, config + ".dist") +def rename_ntp_conf(confpath=None): + """Rename any existing ntp client config file + + @param confpath: string. Specify a path to an existing ntp client + configuration file. + """ + if os.path.exists(confpath): + util.rename(confpath, confpath + ".dist") def generate_server_names(distro): + """Generate a list of server names to populate an ntp client configuration + file. + + @param distro: string. Specify the distro name + @returns: list: A list of strings representing ntp servers for this distro. + """ names = [] pool_distro = distro # For legal reasons x.pool.sles.ntp.org does not exist, @@ -185,34 +375,60 @@ def generate_server_names(distro): return names -def write_ntp_config_template(cfg, cloud, path, template=None): - servers = cfg.get('servers', []) - pools = cfg.get('pools', []) +def write_ntp_config_template(distro_name, servers=None, pools=None, + path=None, template_fn=None, template=None): + """Render a ntp client configuration for the specified client. + + @param distro_name: string. The distro class name. + @param servers: A list of strings specifying ntp servers. Defaults to empty + list. + @param pools: A list of strings specifying ntp pools. Defaults to empty + list. + @param path: A string to specify where to write the rendered template. + @param template_fn: A string to specify the template source file. + @param template: A string specifying the contents of the template. This + content will be written to a temporary file before being used to render + the configuration file. + + @raises: ValueError when path is None. + @raises: ValueError when template_fn is None and template is None. + """ + if not servers: + servers = [] + if not pools: + pools = [] if len(servers) == 0 and len(pools) == 0: - pools = generate_server_names(cloud.distro.name) + pools = generate_server_names(distro_name) LOG.debug( 'Adding distro default ntp pool servers: %s', ','.join(pools)) - params = { - 'servers': servers, - 'pools': pools, - } + if not path: + raise ValueError('Invalid value for path parameter') - if template is None: - template = 'ntp.conf.%s' % cloud.distro.name + if not template_fn and not template: + raise ValueError('Not template_fn or template provided') - template_fn = cloud.get_template_filename(template) - if not template_fn: - template_fn = cloud.get_template_filename('ntp.conf') - if not template_fn: - raise RuntimeError( - 'No template found, not rendering {path}'.format(path=path)) + params = {'servers': servers, 'pools': pools} + if template: + tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + template_fn = tfile[1] # filepath is second item in tuple + util.write_file(template_fn, content=template) templater.render_to_file(template_fn, path, params) + # clean up temporary template + if template: + util.del_file(template_fn) def reload_ntp(service, systemd=False): + """Restart or reload an ntp system service. + + @param service: A string specifying the name of the service to be affected. + @param systemd: A boolean indicating if the distro uses systemd, defaults + to False. + @returns: A tuple of stdout, stderr results from executing the action. + """ if systemd: cmd = ['systemctl', 'reload-or-restart', service] else: @@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False): util.subp(cmd, capture=True) +def supplemental_schema_validation(ntp_config): + """Validate user-provided ntp:config option values. + + This function supplements flexible jsonschema validation with specific + value checks to aid in triage of invalid user-provided configuration. + + @param ntp_config: Dictionary of configuration value under 'ntp'. + + @raises: ValueError describing invalid values provided. + """ + errors = [] + missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) + if missing: + keys = ', '.join(sorted(missing)) + errors.append( + 'Missing required ntp:config keys: {keys}'.format(keys=keys)) + elif not any([ntp_config.get('template'), + ntp_config.get('template_name')]): + errors.append( + 'Either ntp:config:template or ntp:config:template_name values' + ' are required') + for key, value in sorted(ntp_config.items()): + keypath = 'ntp:config:' + key + if key == 'confpath': + if not all([value, isinstance(value, six.string_types)]): + errors.append( + 'Expected a config file path {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif key == 'packages': + if not isinstance(value, list): + errors.append( + 'Expected a list of required package names for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif key in ('template', 'template_name'): + if value is None: # Either template or template_name can be none + continue + if not isinstance(value, six.string_types): + errors.append( + 'Expected a string type for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif not isinstance(value, six.string_types): + errors.append( + 'Expected a string type for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + + if errors: + raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( + errors='\n'.join(errors))) + + +def handle(name, cfg, cloud, log, _args): + """Enable and configure ntp.""" + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return + ntp_cfg = cfg['ntp'] + if ntp_cfg is None: + ntp_cfg = {} # Allow empty config which will install the package + + # TODO drop this when validate_cloudconfig_schema is strict=True + if not isinstance(ntp_cfg, (dict)): + raise RuntimeError( + "'ntp' key existed in config, but not a dictionary type," + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + + validate_cloudconfig_schema(cfg, schema) + + # Allow users to explicitly enable/disable + enabled = ntp_cfg.get('enabled', True) + if util.is_false(enabled): + LOG.debug("Skipping module named %s, disabled by cfg", name) + return + + # Select which client is going to be used and get the configuration + ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), + cloud.distro) + + # Allow user ntp config to override distro configurations + ntp_client_config = util.mergemanydict( + [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + + supplemental_schema_validation(ntp_client_config) + rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + + template_fn = None + if not ntp_client_config.get('template'): + template_name = ( + ntp_client_config.get('template_name').replace('{distro}', + cloud.distro.name)) + template_fn = cloud.get_template_filename(template_name) + if not template_fn: + msg = ('No template found, not rendering %s' % + ntp_client_config.get('template_name')) + raise RuntimeError(msg) + + write_ntp_config_template(cloud.distro.name, + servers=ntp_cfg.get('servers', []), + pools=ntp_cfg.get('pools', []), + path=ntp_client_config.get('confpath'), + template_fn=template_fn, + template=ntp_client_config.get('template')) + + install_ntp_client(cloud.distro.install_packages, + packages=ntp_client_config['packages'], + check_exe=ntp_client_config['check_exe']) + try: + reload_ntp(ntp_client_config['service_name'], + systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 878069b7..3be0d1c1 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,6 +41,7 @@ keys to post. Available keys are: """ from cloudinit import templater +from cloudinit import url_helper from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args): } url = templater.render_string(url, url_params) try: - util.read_file_or_url(url, data=real_submit_keys, - retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url_helper.read_file_or_url( + url, data=real_submit_keys, retries=tries, sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths)) except Exception: util.logexc(log, "Failed to post phone home data to %s in %s tries", url, tries) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 4da3a588..50b37470 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -74,7 +74,7 @@ def givecmdline(pid): if util.is_FreeBSD(): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] - m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 013e69b5..2edddd0c 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', devpth) + return ('growfs', '-y', devpth) def _resize_zfs(mount_point, devpth): @@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth): def _get_dumpfs_output(mount_point): - dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) - return dumpfs_res + return util.subp(['dumpfs', '-m', mount_point])[0] def _get_gpart_output(part): - gpart_res, err = util.subp(['gpart', 'show', part]) - return gpart_res + return util.subp(['gpart', 'show', part])[0] def _can_skip_resize_ufs(mount_point, devpth): @@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth): if not line.startswith('#'): newfs_cmd = shlex.split(line) opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' - optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": cur_fs_sz = int(a) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 530808ce..edee01e5 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -126,7 +126,6 @@ class SubscriptionManager(object): self.enable_repo = self.rhel_cfg.get('enable-repo') self.disable_repo = self.rhel_cfg.get('disable-repo') self.servicelevel = self.rhel_cfg.get('service-level') - self.subman = ['subscription-manager'] def log_success(self, msg): '''Simple wrapper for logging info messages. Useful for unittests''' @@ -173,21 +172,12 @@ class SubscriptionManager(object): cmd = ['identity'] try: - self._sub_man_cli(cmd) + _sub_man_cli(cmd) except util.ProcessExecutionError: return False return True - def _sub_man_cli(self, cmd, logstring_val=False): - ''' - Uses the prefered cloud-init subprocess def of util.subp - and runs subscription-manager. Breaking this to a - separate function for later use in mocking and unittests - ''' - cmd = self.subman + cmd - return util.subp(cmd, logstring=logstring_val) - def rhn_register(self): ''' Registers the system by userid and password or activation key @@ -209,8 +199,7 @@ class SubscriptionManager(object): cmd.append("--serverurl={0}".format(self.server_hostname)) try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = _sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -233,8 +222,7 @@ class SubscriptionManager(object): # Attempting to register the system only try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = _sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -257,7 +245,7 @@ class SubscriptionManager(object): .format(self.servicelevel)] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = _sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): @@ -275,7 +263,7 @@ class SubscriptionManager(object): def _set_auto_attach(self): cmd = ['attach', '--auto'] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = _sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: self.log_warn("Auto-attach failed with: {0}".format(e)) return False @@ -294,12 +282,12 @@ class SubscriptionManager(object): # Get all available pools cmd = ['list', '--available', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = _sub_man_cli(cmd)[0] available = (results.rstrip()).split("\n") # Get all consumed pools cmd = ['list', '--consumed', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = _sub_man_cli(cmd)[0] consumed = (results.rstrip()).split("\n") return available, consumed @@ -311,14 +299,14 @@ class SubscriptionManager(object): ''' cmd = ['repos', '--list-enabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = _sub_man_cli(cmd)[0] active_repos = [] for repo in return_out.split("\n"): if "Repo ID:" in repo: active_repos.append((repo.split(':')[1]).strip()) cmd = ['repos', '--list-disabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = _sub_man_cli(cmd)[0] inactive_repos = [] for repo in return_out.split("\n"): @@ -348,7 +336,7 @@ class SubscriptionManager(object): if len(pool_list) > 0: cmd.extend(pool_list) try: - self._sub_man_cli(cmd) + _sub_man_cli(cmd) self.log.debug("Attached the following pools to your " "system: %s", (", ".join(pool_list)) .replace('--pool=', '')) @@ -425,7 +413,7 @@ class SubscriptionManager(object): cmd.extend(enable_list) try: - self._sub_man_cli(cmd) + _sub_man_cli(cmd) except util.ProcessExecutionError as e: self.log_warn("Unable to alter repos due to {0}".format(e)) return False @@ -441,4 +429,15 @@ class SubscriptionManager(object): def is_configured(self): return bool((self.userid and self.password) or self.activation_key) + +def _sub_man_cli(cmd, logstring_val=False): + ''' + Uses the prefered cloud-init subprocess def of util.subp + and runs subscription-manager. Breaking this to a + separate function for later use in mocking and unittests + ''' + return util.subp(['subscription-manager'] + cmd, + logstring=logstring_val) + + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index af08788c..27d2366c 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P<proto>[@]{0,2})' - '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' - '([:](?P<port>[0-9]+))?$') + r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' + r'([:](?P<port>[0-9]+))?$') def reload_syslog(command=DEF_RELOAD, systemd=False): diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 539cbd5d..1f75d6c5 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -42,6 +42,11 @@ schema = { all commands must be proper yaml, so you have to quote any characters yaml would eat (':' can be problematic) + + .. note:: + + when writing files, do not use /tmp dir as it races with + systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. """), 'distros': distros, 'examples': [dedent("""\ @@ -66,7 +71,6 @@ schema = { 'additionalProperties': False, 'minItems': 1, 'required': [], - 'uniqueItems': True } } } diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index bb24d57f..5ef97376 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -68,16 +68,57 @@ import re import sys from cloudinit.distros import ug_util -from cloudinit import ssh_util +from cloudinit import log as logging +from cloudinit.ssh_util import update_ssh_config from cloudinit import util from string import ascii_letters, digits +LOG = logging.getLogger(__name__) + # We are removing certain 'painful' letters/numbers PW_SET = (''.join([x for x in ascii_letters + digits if x not in 'loLOI01'])) +def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): + """Apply sshd PasswordAuthentication changes. + + @param pw_auth: config setting from 'pw_auth'. + Best given as True, False, or "unchanged". + @param service_cmd: The service command list (['service']) + @param service_name: The name of the sshd service for the system. + + @return: None""" + cfg_name = "PasswordAuthentication" + if service_cmd is None: + service_cmd = ["service"] + + if util.is_true(pw_auth): + cfg_val = 'yes' + elif util.is_false(pw_auth): + cfg_val = 'no' + else: + bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + if pw_auth is None or pw_auth.lower() == 'unchanged': + LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) + else: + LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) + return + + updated = update_ssh_config({cfg_name: cfg_val}) + if not updated: + LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + return + + if 'systemctl' in service_cmd: + cmd = list(service_cmd) + ["restart", service_name] + else: + cmd = list(service_cmd) + [service_name, "restart"] + util.subp(cmd) + LOG.debug("Restarted the ssh daemon.") + + def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] @@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args): if expired_users: log.debug("Expired passwords for: %s users", expired_users) - change_pwauth = False - pw_auth = None - if 'ssh_pwauth' in cfg: - if util.is_true(cfg['ssh_pwauth']): - change_pwauth = True - pw_auth = 'yes' - elif util.is_false(cfg['ssh_pwauth']): - change_pwauth = True - pw_auth = 'no' - elif str(cfg['ssh_pwauth']).lower() == 'unchanged': - log.debug('Leaving auth line unchanged') - change_pwauth = False - elif not str(cfg['ssh_pwauth']).strip(): - log.debug('Leaving auth line unchanged') - change_pwauth = False - elif not cfg['ssh_pwauth']: - log.debug('Leaving auth line unchanged') - change_pwauth = False - else: - msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] - util.logexc(log, msg) - - if change_pwauth: - replaced_auth = False - - # See: man sshd_config - old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) - new_lines = [] - i = 0 - for (i, line) in enumerate(old_lines): - # Keywords are case-insensitive and arguments are case-sensitive - if line.key == 'passwordauthentication': - log.debug("Replacing auth line %s with %s", i + 1, pw_auth) - replaced_auth = True - line.value = pw_auth - new_lines.append(line) - - if not replaced_auth: - log.debug("Adding new auth line %s", i + 1) - replaced_auth = True - new_lines.append(ssh_util.SshdConfigLine('', - 'PasswordAuthentication', - pw_auth)) - - lines = [str(l) for l in new_lines] - util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), - copy_mode=True) - - try: - cmd = cloud.distro.init_cmd # Default service - cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) - cmd.append('restart') - if 'systemctl' in cmd: # Switch action ordering - cmd[1], cmd[2] = cmd[2], cmd[1] - cmd = filter(None, cmd) # Remove empty arguments - util.subp(cmd) - log.debug("Restarted the ssh daemon") - except Exception: - util.logexc(log, "Restarting of the ssh daemon failed") + handle_ssh_pwauth( + cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd, + service_name=cloud.distro.get_option('ssh_svcname', 'ssh')) if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 34a53fd4..90724b81 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -110,7 +110,6 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True }, 'squashfuse_in_container': { 'type': 'boolean' @@ -204,12 +203,12 @@ def maybe_install_squashfuse(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['squashfuse']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install squashfuse") raise diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index bab80bbe..15bee2d3 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): def read_installed_packages(): ret = [] - for (name, date, version, dev) in read_pkg_data(): + for (name, _date, _version, dev) in read_pkg_data(): if dev: ret.append(NAMESPACE_DELIM.join([name, dev])) else: @@ -222,7 +222,7 @@ def read_installed_packages(): def read_pkg_data(): - out, err = util.subp([SNAPPY_CMD, "list"]) + out, _err = util.subp([SNAPPY_CMD, "list"]) pkg_data = [] for line in out.splitlines()[1:]: toks = line.split(sep=None, maxsplit=3) diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 45204a07..f8f7cb35 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -101,10 +101,6 @@ from cloudinit.distros import ug_util from cloudinit import ssh_util from cloudinit import util -DISABLE_ROOT_OPTS = ( - "no-port-forwarding,no-agent-forwarding," - "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" - " rather than the user \\\"root\\\".\';echo;sleep 10\"") GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' @@ -185,7 +181,7 @@ def handle(_name, cfg, cloud, log, _args): (user, _user_config) = ug_util.extract_default(users) disable_root = util.get_cfg_option_bool(cfg, "disable_root", True) disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", - DISABLE_ROOT_OPTS) + ssh_util.DISABLE_USER_OPTS) keys = cloud.get_public_ssh_keys() or [] if "ssh_authorized_keys" in cfg: @@ -207,6 +203,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): if not user: user = "NONE" key_prefix = disable_root_opts.replace('$USER', user) + key_prefix = key_prefix.replace('$DISABLE_USER', 'root') else: key_prefix = '' diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 16b1868b..5e082bd6 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -87,7 +87,6 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True } }, 'additionalProperties': False, # Reject keys not in schema @@ -149,12 +148,12 @@ def maybe_install_ua_tools(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['ubuntu-advantage-tools']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install ubuntu-advantage-tools") raise diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index b215e95a..c32a743a 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -52,10 +52,20 @@ config keys for an entry in ``users`` are as follows: associated with the address, username and SSH keys will be requested from there. Default: none - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's - authkeys file. Default: none - - ``ssh_import_id``: Optional. SSH id to import for user. Default: none - - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. - Default: none. + authkeys file. Default: none. This key can not be combined with + ``ssh_redirect_user``. + - ``ssh_import_id``: Optional. SSH id to import for user. Default: none. + This key can not be combined with ``ssh_redirect_user``. + - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH + logins for this user. When specified, all cloud meta-data public ssh + keys will be set up in a disabled state for this username. Any ssh login + as this username will timeout and prompt with a message to login instead + as the configured <default_username> for this instance. Default: false. + This key can not be combined with ``ssh_import_id`` or + ``ssh_authorized_keys``. + - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False. + Default: none. An absence of sudo key, or a value of none or false + will result in no sudo rules being written for the user. - ``system``: Optional. Create user as system user with no home directory. Default: false - ``uid``: Optional. The user's ID. Default: The next available value. @@ -82,6 +92,9 @@ config keys for an entry in ``users`` are as follows: users: - default + # User explicitly omitted from sudo permission; also default behavior. + - name: <some_restricted_user> + sudo: false - name: <username> expiredate: <date> gecos: <comment> @@ -97,6 +110,7 @@ config keys for an entry in ``users`` are as follows: selinux_user: <selinux username> shell: <shell path> snapuser: <email> + ssh_redirect_user: <true/false> ssh_authorized_keys: - <key> - <key> @@ -110,17 +124,44 @@ config keys for an entry in ``users`` are as follows: # since the module attribute 'distros' # is a list of distros that are supported, not a sub-module from cloudinit.distros import ug_util +from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +LOG = logging.getLogger(__name__) + frequency = PER_INSTANCE def handle(name, cfg, cloud, _log, _args): (users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro) + (default_user, _user_config) = ug_util.extract_default(users) + cloud_keys = cloud.get_public_ssh_keys() or [] for (name, members) in groups.items(): cloud.distro.create_group(name, members) for (user, config) in users.items(): + ssh_redirect_user = config.pop("ssh_redirect_user", False) + if ssh_redirect_user: + if 'ssh_authorized_keys' in config or 'ssh_import_id' in config: + raise ValueError( + 'Not creating user %s. ssh_redirect_user cannot be' + ' provided with ssh_import_id or ssh_authorized_keys' % + user) + if ssh_redirect_user not in (True, 'default'): + raise ValueError( + 'Not creating user %s. Invalid value of' + ' ssh_redirect_user: %s. Expected values: true, default' + ' or false.' % (user, ssh_redirect_user)) + if default_user is None: + LOG.warning( + 'Ignoring ssh_redirect_user: %s for %s.' + ' No default_user defined.' + ' Perhaps missing cloud configuration users: ' + ' [default, ..].', + ssh_redirect_user, user) + else: + config['ssh_redirect_user'] = default_user + config['cloud_public_ssh_keys'] = cloud_keys cloud.distro.create_user(user, **config) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 54ae3a68..31d1db61 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -15,9 +15,14 @@ binary gzip data can be specified and will be decoded before being written. .. note:: if multiline data is provided, care should be taken to ensure that it - follows yaml formatting standargs. to specify binary data, use the yaml + follows yaml formatting standards. to specify binary data, use the yaml option ``!!binary`` +.. note:: + Do not write files under /tmp during boot because of a race with + systemd-tmpfiles-clean that can cause temp files to get cleaned during + the early boot process. Use /run/somedir instead to avoid race LP:1707222. + **Internal name:** ``cc_write_files`` **Module frequency:** per instance diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index ca7d0d5b..080a6d06 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -4,7 +4,7 @@ from __future__ import print_function from cloudinit import importer -from cloudinit.util import find_modules, read_file_or_url +from cloudinit.util import find_modules, load_file import argparse from collections import defaultdict @@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False): def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): """Return contents of the cloud-config file annotated with schema errors. - @param cloudconfig: YAML-loaded object from the original_content. + @param cloudconfig: YAML-loaded dict from the original_content or empty + dict if unparseable. @param original_content: The contents of a cloud-config file @param schema_errors: List of tuples from a JSONSchemaValidationError. The tuples consist of (schemapath, error_message). """ if not schema_errors: return original_content - schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) + schemapaths = {} + if cloudconfig: + schemapaths = _schemapath_for_cloudconfig( + cloudconfig, original_content) errors_by_line = defaultdict(list) error_count = 1 error_footer = [] annotated_content = [] for path, msg in schema_errors: - errors_by_line[schemapaths[path]].append(msg) + match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path) + if match: + line, col = match.groups() + errors_by_line[int(line)].append(msg) + else: + col = None + errors_by_line[schemapaths[path]].append(msg) + if col is not None: + msg = 'Line {line} column {col}: {msg}'.format( + line=line, col=col, msg=msg) error_footer.append('# E{0}: {1}'.format(error_count, msg)) error_count += 1 lines = original_content.decode().split('\n') @@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): """ if not os.path.exists(config_path): raise RuntimeError('Configfile {0} does not exist'.format(config_path)) - content = read_file_or_url('file://{0}'.format(config_path)).contents + content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = ( - ('header', 'File {0} needs to begin with "{1}"'.format( + ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( config_path, CLOUD_CONFIG_HEADER.decode())),) - raise SchemaValidationError(errors) - + error = SchemaValidationError(errors) + if annotate: + print(annotated_cloudconfig_file({}, content, error.schema_errors)) + raise error try: cloudconfig = yaml.safe_load(content) - except yaml.parser.ParserError as e: - errors = ( - ('format', 'File {0} is not valid yaml. {1}'.format( - config_path, str(e))),) - raise SchemaValidationError(errors) - + except (yaml.YAMLError) as e: + line = column = 1 + mark = None + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): + mark = getattr(e, 'context_mark') + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): + mark = getattr(e, 'problem_mark') + if mark: + line = mark.line + 1 + column = mark.column + 1 + errors = (('format-l{line}.c{col}'.format(line=line, col=column), + 'File {0} is not valid yaml. {1}'.format( + config_path, str(e))),) + error = SchemaValidationError(errors) + if annotate: + print(annotated_cloudconfig_file({}, content, error.schema_errors)) + raise error try: validate_cloudconfig_schema( cloudconfig, schema, strict=True) @@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content): list_index = 0 RE_YAML_INDENT = r'^(\s*)' scopes = [] - for line_number, line in enumerate(content_lines): + for line_number, line in enumerate(content_lines, 1): indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) line = line.strip() if not line or line.startswith('#'): @@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content): scopes.append((indent_depth + 2, key + '.0')) for inner_list_index in range(0, len(yaml.safe_load(value))): list_key = key + '.' + str(inner_list_index) - schema_line_numbers[list_key] = line_number + 1 - schema_line_numbers[key] = line_number + 1 + schema_line_numbers[list_key] = line_number + schema_line_numbers[key] = line_number return schema_line_numbers @@ -297,8 +323,8 @@ def get_schema(): configs_dir = os.path.dirname(os.path.abspath(__file__)) potential_handlers = find_modules(configs_dir) - for (fname, mod_name) in potential_handlers.items(): - mod_locs, looked_locs = importer.find_module( + for (_fname, mod_name) in potential_handlers.items(): + mod_locs, _looked_locs = importer.find_module( mod_name, ['cloudinit.config'], ['schema']) if mod_locs: mod = importer.import_module(mod_locs[0]) @@ -337,9 +363,11 @@ def handle_schema_args(name, args): try: validate_cloudconfig_file( args.config_file, full_schema, args.annotate) - except (SchemaValidationError, RuntimeError) as e: + except SchemaValidationError as e: if not args.annotate: error(str(e)) + except RuntimeError as e: + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py new file mode 100644 index 00000000..67646b03 --- /dev/null +++ b/cloudinit/config/tests/test_disable_ec2_metadata.py @@ -0,0 +1,50 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_disable_ec2_metadata handler""" + +import cloudinit.config.cc_disable_ec2_metadata as ec2_meta + +from cloudinit.tests.helpers import CiTestCase, mock + +import logging + +LOG = logging.getLogger(__name__) + +DISABLE_CFG = {'disable_ec2_metadata': 'true'} + + +class TestEC2MetadataRoute(CiTestCase): + + with_logs = True + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_ifconfig(self, m_subp, m_which): + """Set the route if ifconfig command is available""" + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['route', 'add', '-host', '169.254.169.254', 'reject'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_ip(self, m_subp, m_which): + """Set the route if ip command is available""" + m_which.side_effect = lambda x: x if x == 'ip' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_no_tool(self, m_subp, m_which): + """Log error when neither route nor ip commands are available""" + m_which.return_value = None # Find neither ifconfig nor ip + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + self.assertEqual( + [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py new file mode 100644 index 00000000..b051ec82 --- /dev/null +++ b/cloudinit/config/tests/test_set_passwords.py @@ -0,0 +1,71 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import mock + +from cloudinit.config import cc_set_passwords as setpass +from cloudinit.tests.helpers import CiTestCase +from cloudinit import util + +MODPATH = "cloudinit.config.cc_set_passwords." + + +class TestHandleSshPwauth(CiTestCase): + """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth.""" + + with_logs = True + + @mock.patch(MODPATH + "util.subp") + def test_unknown_value_logs_warning(self, m_subp): + setpass.handle_ssh_pwauth("floo") + self.assertIn("Unrecognized value: ssh_pwauth=floo", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + setpass.handle_ssh_pwauth( + True, service_cmd=["systemctl"], service_name="myssh") + self.assertEqual(mock.call(["systemctl", "restart", "myssh"]), + m_subp.call_args) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_service_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + setpass.handle_ssh_pwauth( + True, service_cmd=["service"], service_name="myssh") + self.assertEqual(mock.call(["service", "myssh", "restart"]), + m_subp.call_args) + + @mock.patch(MODPATH + "update_ssh_config", return_value=False) + @mock.patch(MODPATH + "util.subp") + def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): + """If config is not updated, then no system restart should be done.""" + setpass.handle_ssh_pwauth(True) + m_subp.assert_not_called() + self.assertIn("No need to restart ssh", self.logs.getvalue()) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): + """If 'unchanged', then no updates to config and no restart.""" + setpass.handle_ssh_pwauth( + "unchanged", service_cmd=["systemctl"], service_name="myssh") + m_update_ssh_config.assert_not_called() + m_subp.assert_not_called() + + @mock.patch(MODPATH + "util.subp") + def test_valid_change_values(self, m_subp): + """If value is a valid changen value, then update should be called.""" + upname = MODPATH + "update_ssh_config" + optname = "PasswordAuthentication" + for value in util.FALSE_STRINGS + util.TRUE_STRINGS: + optval = "yes" if value in util.TRUE_STRINGS else "no" + with mock.patch(upname, return_value=False) as m_update: + setpass.handle_ssh_pwauth(value) + m_update.assert_called_with({optname: optval}) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index c5b4a9de..3c472891 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import ( from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) + CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) SYSTEM_USER_ASSERTION = """\ @@ -162,6 +162,7 @@ class TestAddAssertions(CiTestCase): class TestRunCommands(CiTestCase): with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): super(TestRunCommands, self).setUp() @@ -245,9 +246,10 @@ class TestRunCommands(CiTestCase): @skipUnlessJsonSchema() -class TestSchema(CiTestCase): +class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True + schema = schema def test_schema_warns_on_snap_not_as_dict(self): """If the snap configuration is not a dict, emit a warning.""" @@ -340,6 +342,30 @@ class TestSchema(CiTestCase): {'snap': {'assertions': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo" "bye"]]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") + class TestHandle(CiTestCase): @@ -399,8 +425,10 @@ class TestHandle(CiTestCase): 'snap': {'commands': ['echo "HI" >> %s' % outfile, 'echo "MOM" >> %s' % outfile]}} mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with mock.patch(mock_path, new_callable=StringIO): - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) @mock.patch('cloudinit.config.cc_snap.util.subp') diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py new file mode 100644 index 00000000..c8a4271f --- /dev/null +++ b/cloudinit/config/tests/test_ssh.py @@ -0,0 +1,151 @@ +# This file is part of cloud-init. See LICENSE file for license information. + + +from cloudinit.config import cc_ssh +from cloudinit import ssh_util +from cloudinit.tests.helpers import CiTestCase, mock + +MODPATH = "cloudinit.config.cc_ssh." + + +@mock.patch(MODPATH + "ssh_util.setup_user_keys") +class TestHandleSsh(CiTestCase): + """Test cc_ssh handling of ssh config.""" + + def test_apply_credentials_with_user(self, m_setup_keys): + """Apply keys for the given user and root.""" + keys = ["key1"] + user = "clouduser" + cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) + + def test_apply_credentials_with_no_user(self, m_setup_keys): + """Apply keys for root only.""" + keys = ["key1"] + user = None + cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) + self.assertEqual([mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) + + def test_apply_credentials_with_user_disable_root(self, m_setup_keys): + """Apply keys for the given user and disable root ssh.""" + keys = ["key1"] + user = "clouduser" + options = ssh_util.DISABLE_USER_OPTS + cc_ssh.apply_credentials(keys, user, True, options) + options = options.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys): + """Apply keys no user and disable root ssh.""" + keys = ["key1"] + user = None + options = ssh_util.DISABLE_USER_OPTS + cc_ssh.apply_credentials(keys, user, True, options) + options = options.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_no_cfg(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with no config ignores generating existing keyfiles.""" + cfg = {} + keys = ["key1"] + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ([], {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, None, None) + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") + m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') + self.assertIn( + [mock.call('/etc/ssh/ssh_host_rsa_key'), + mock.call('/etc/ssh/ssh_host_dsa_key'), + mock.call('/etc/ssh/ssh_host_ecdsa_key'), + mock.call('/etc/ssh/ssh_host_ed25519_key')], + m_path_exists.call_args_list) + self.assertEqual([mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with no config and a default distro user.""" + cfg = {} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, None, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with explicit disable_root and a default distro user.""" + # This test is identical to test_handle_no_cfg_and_default_root, + # except this uses an explicit cfg value + cfg = {"disable_root": True} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, None, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with disable_root == False.""" + # When disable_root == False, the ssh redirect for root is skipped + cfg = {"disable_root": False} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.get_public_ssh_keys = mock.Mock(return_value=keys) + cc_ssh.handle("name", cfg, cloud, None, None) + + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index f2a59faf..b7cf9bee 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import ( handle, maybe_install_ua_tools, run_commands, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from cloudinit.tests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) # Module path used in mocks @@ -22,6 +23,7 @@ class FakeCloud(object): class TestRunCommands(CiTestCase): with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): super(TestRunCommands, self).setUp() @@ -105,9 +107,10 @@ class TestRunCommands(CiTestCase): @skipUnlessJsonSchema() -class TestSchema(CiTestCase): +class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True + schema = schema def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): """If ubuntu-advantage configuration is not a dict, emit a warning.""" @@ -169,6 +172,30 @@ class TestSchema(CiTestCase): {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo" "bye"]]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") + class TestHandle(CiTestCase): @@ -208,8 +235,10 @@ class TestHandle(CiTestCase): 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, 'echo "MOM" >> %s' % outfile]}} mock_path = '%s.sys.stderr' % MPATH - with mock.patch(mock_path, new_callable=StringIO): - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, + args=None) self.assertEqual('HI\nMOM\n', util.load_file(outfile)) diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py new file mode 100644 index 00000000..ba0afae3 --- /dev/null +++ b/cloudinit/config/tests/test_users_groups.py @@ -0,0 +1,144 @@ +# This file is part of cloud-init. See LICENSE file for license information. + + +from cloudinit.config import cc_users_groups +from cloudinit.tests.helpers import CiTestCase, mock + +MODPATH = "cloudinit.config.cc_users_groups" + + +@mock.patch('cloudinit.distros.ubuntu.Distro.create_group') +@mock.patch('cloudinit.distros.ubuntu.Distro.create_user') +class TestHandleUsersGroups(CiTestCase): + """Test cc_users_groups handling of config.""" + + with_logs = True + + def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group): + """Test handle with no config will not create users or groups.""" + cfg = {} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_user.assert_not_called() + m_group.assert_not_called() + + def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group): + """When users in config, create users with distro.create_user.""" + cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', default=False)]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group): + """When ssh_redirect_user is True pass default user and cloud keys.""" + cfg = { + 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, + ssh_redirect_user='ubuntu')]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group): + """When ssh_redirect_user is 'default' pass default username.""" + cfg = { + 'users': ['default', {'name': 'me2', + 'ssh_redirect_user': 'default'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, + ssh_redirect_user='ubuntu')]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group): + """Warn when ssh_redirect_user is not 'default'.""" + cfg = { + 'users': ['default', {'name': 'me2', + 'ssh_redirect_user': 'snowflake'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + with self.assertRaises(ValueError) as context_manager: + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_group.assert_not_called() + self.assertEqual( + 'Not creating user me2. Invalid value of ssh_redirect_user:' + ' snowflake. Expected values: true, default or false.', + str(context_manager.exception)) + + def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group): + """When unspecified ssh_redirect_user is false and not set up.""" + cfg = {'users': ['default', {'name': 'me2'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', default=False)]) + m_group.assert_not_called() + + def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group): + """Warn when ssh_redirect_user is True and no default user present.""" + cfg = { + 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + # System config defines *no* default user for the distro. + sys_cfg = {} + metadata = {} # no public-keys defined + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_user.assert_called_once_with('me2', default=False) + m_group.assert_not_called() + self.assertEqual( + 'WARNING: Ignoring ssh_redirect_user: True for me2. No' + ' default_user defined. Perhaps missing' + ' cloud configuration users: [default, ..].\n', + self.logs.getvalue()) |