summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
authorzdc <zdc@users.noreply.github.com>2020-09-15 21:35:20 +0300
committerGitHub <noreply@github.com>2020-09-15 21:35:20 +0300
commit76adf82b8a4dbcf636151d292175b7d1ac182fcf (patch)
treef57f3db085a724df237ffa64b589c6bb6dd3b28f /cloudinit/config
parent1a790ee102fd405e5c3a20a17a69ba0c118ed874 (diff)
parent7cd260b313267dc7123cb99a75d4555e24909cca (diff)
downloadvyos-cloud-init-76adf82b8a4dbcf636151d292175b7d1ac182fcf.tar.gz
vyos-cloud-init-76adf82b8a4dbcf636151d292175b7d1ac182fcf.zip
Merge pull request #18 from zdc/T2117-equuleus-20.3
T2117: Cloud-init updated to 20.3
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/cc_apk_configure.py263
-rw-r--r--cloudinit/config/cc_apt_configure.py582
-rw-r--r--cloudinit/config/cc_apt_pipelining.py2
-rw-r--r--cloudinit/config/cc_bootcmd.py3
-rwxr-xr-xcloudinit/config/cc_byobu.py3
-rw-r--r--cloudinit/config/cc_ca_certs.py45
-rw-r--r--cloudinit/config/cc_chef.py403
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py7
-rw-r--r--cloudinit/config/cc_disk_setup.py83
-rw-r--r--cloudinit/config/cc_emit_upstart.py8
-rw-r--r--cloudinit/config/cc_fan.py7
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py33
-rw-r--r--cloudinit/config/cc_grub_dpkg.py98
-rw-r--r--cloudinit/config/cc_keys_to_console.py3
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_locale.py65
-rw-r--r--cloudinit/config/cc_lxd.py23
-rw-r--r--cloudinit/config/cc_mcollective.py3
-rw-r--r--cloudinit/config/cc_mounts.py67
-rw-r--r--cloudinit/config/cc_ntp.py110
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py3
-rw-r--r--cloudinit/config/cc_phone_home.py3
-rw-r--r--cloudinit/config/cc_power_state_change.py72
-rw-r--r--cloudinit/config/cc_puppet.py13
-rw-r--r--cloudinit/config/cc_resizefs.py23
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py19
-rw-r--r--cloudinit/config/cc_rsyslog.py11
-rw-r--r--cloudinit/config/cc_salt_minion.py4
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py4
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py4
-rw-r--r--cloudinit/config/cc_scripts_per_once.py4
-rw-r--r--cloudinit/config/cc_scripts_user.py4
-rw-r--r--cloudinit/config/cc_scripts_vendor.py3
-rw-r--r--cloudinit/config/cc_seed_random.py5
-rw-r--r--cloudinit/config/cc_set_hostname.py3
-rwxr-xr-xcloudinit/config/cc_set_passwords.py7
-rw-r--r--cloudinit/config/cc_snap.py39
-rw-r--r--cloudinit/config/cc_spacewalk.py8
-rwxr-xr-xcloudinit/config/cc_ssh.py43
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py6
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py5
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py13
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py9
-rw-r--r--cloudinit/config/cc_users_groups.py7
-rw-r--r--cloudinit/config/cc_write_files.py191
-rw-r--r--cloudinit/config/cc_yum_add_repo.py4
-rw-r--r--cloudinit/config/schema.py137
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py14
-rw-r--r--cloudinit/config/tests/test_final_message.py46
-rw-r--r--cloudinit/config/tests/test_grub_dpkg.py176
-rw-r--r--cloudinit/config/tests/test_mounts.py28
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py86
-rw-r--r--cloudinit/config/tests/test_set_passwords.py38
-rw-r--r--cloudinit/config/tests/test_snap.py60
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py28
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py33
-rw-r--r--cloudinit/config/tests/test_users_groups.py10
59 files changed, 2223 insertions, 757 deletions
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
new file mode 100644
index 00000000..84d7a0b6
--- /dev/null
+++ b/cloudinit/config/cc_apk_configure.py
@@ -0,0 +1,263 @@
+# Copyright (c) 2020 Dermot Bradley
+#
+# Author: Dermot Bradley <dermot_bradley@yahoo.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Apk Configure: Configures apk repositories file."""
+
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import temp_utils
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+# If no mirror is specified then use this one
+DEFAULT_MIRROR = "https://alpine.global.ssl.fastly.net/alpine"
+
+
+REPOSITORIES_TEMPLATE = """\
+## template:jinja
+#
+# Created by cloud-init
+#
+# This file is written on first boot of an instance
+#
+
+{{ alpine_baseurl }}/{{ alpine_version }}/main
+{% if community_enabled -%}
+{{ alpine_baseurl }}/{{ alpine_version }}/community
+{% endif -%}
+{% if testing_enabled -%}
+{% if alpine_version != 'edge' %}
+#
+# Testing - using with non-Edge installation may cause problems!
+#
+{% endif %}
+{{ alpine_baseurl }}/edge/testing
+{% endif %}
+{% if local_repo != '' %}
+
+#
+# Local repo
+#
+{{ local_repo }}/{{ alpine_version }}
+{% endif %}
+
+"""
+
+
+frequency = PER_INSTANCE
+distros = ['alpine']
+schema = {
+ 'id': 'cc_apk_configure',
+ 'name': 'APK Configure',
+ 'title': 'Configure apk repositories file',
+ 'description': dedent("""\
+ This module handles configuration of the /etc/apk/repositories file.
+
+ .. note::
+ To ensure that apk configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+ """),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Keep the existing /etc/apk/repositories file unaltered.
+ apk_repos:
+ preserve_repositories: true
+ """),
+ dedent("""\
+ # Create repositories file for Alpine v3.12 main and community
+ # using default mirror site.
+ apk_repos:
+ alpine_repo:
+ community_enabled: true
+ version: 'v3.12'
+ """),
+ dedent("""\
+ # Create repositories file for Alpine Edge main, community, and
+ # testing using a specified mirror site and also a local repo.
+ apk_repos:
+ alpine_repo:
+ base_url: 'https://some-alpine-mirror/alpine'
+ community_enabled: true
+ testing_enabled: true
+ version: 'edge'
+ local_repo_base_url: 'https://my-local-server/local-alpine'
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apk_repos': {
+ 'type': 'object',
+ 'properties': {
+ 'preserve_repositories': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new repositories
+ file ``/etc/apk/repositories`` based on any valid
+ configuration settings specified within a apk_repos
+ section of cloud config. To disable this behavior and
+ preserve the repositories file from the pristine image,
+ set ``preserve_repositories`` to ``true``.
+
+ The ``preserve_repositories`` option overrides
+ all other config keys that would alter
+ ``/etc/apk/repositories``.
+ """)
+ },
+ 'alpine_repo': {
+ 'type': ['object', 'null'],
+ 'properties': {
+ 'base_url': {
+ 'type': 'string',
+ 'default': DEFAULT_MIRROR,
+ 'description': dedent("""\
+ The base URL of an Alpine repository, or
+ mirror, to download official packages from.
+ If not specified then it defaults to ``{}``
+ """.format(DEFAULT_MIRROR))
+ },
+ 'community_enabled': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to add the Community repo to the
+ repositories file. By default the Community
+ repo is not included.
+ """)
+ },
+ 'testing_enabled': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to add the Testing repo to the
+ repositories file. By default the Testing
+ repo is not included. It is only recommended
+ to use the Testing repo on a machine running
+ the ``Edge`` version of Alpine as packages
+ installed from Testing may have dependancies
+ that conflict with those in non-Edge Main or
+ Community repos."
+ """)
+ },
+ 'version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The Alpine version to use (e.g. ``v3.12`` or
+ ``edge``)
+ """)
+ },
+ },
+ 'required': ['version'],
+ 'minProperties': 1,
+ 'additionalProperties': False,
+ },
+ 'local_repo_base_url': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The base URL of an Alpine repository containing
+ unofficial packages
+ """)
+ }
+ },
+ 'required': [],
+ 'minProperties': 1, # Either preserve_repositories or alpine_repo
+ 'additionalProperties': False,
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
+
+
+def handle(name, cfg, cloud, log, _args):
+ """
+ Call to handle apk_repos sections in cloud-config file.
+
+ @param name: The module name "apk-configure" from cloud.cfg
+ @param cfg: A nested dict containing the entire cloud config contents.
+ @param cloud: The CloudInit object in use.
+ @param log: Pre-initialized Python logger object to use for logging.
+ @param _args: Any module arguments from cloud.cfg
+ """
+
+ # If there is no "apk_repos" section in the configuration
+ # then do nothing.
+ apk_section = cfg.get('apk_repos')
+ if not apk_section:
+ LOG.debug(("Skipping module named %s,"
+ " no 'apk_repos' section found"), name)
+ return
+
+ validate_cloudconfig_schema(cfg, schema)
+
+ # If "preserve_repositories" is explicitly set to True in
+ # the configuration do nothing.
+ if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
+ LOG.debug(("Skipping module named %s,"
+ " 'preserve_repositories' is set"), name)
+ return
+
+ # If there is no "alpine_repo" subsection of "apk_repos" present in the
+ # configuration then do nothing, as at least "version" is required to
+ # create valid repositories entries.
+ alpine_repo = apk_section.get('alpine_repo')
+ if not alpine_repo:
+ LOG.debug(("Skipping module named %s,"
+ " no 'alpine_repo' configuration found"), name)
+ return
+
+ # If there is no "version" value present in configuration then do nothing.
+ alpine_version = alpine_repo.get('version')
+ if not alpine_version:
+ LOG.debug(("Skipping module named %s,"
+ " 'version' not specified in alpine_repo"), name)
+ return
+
+ local_repo = apk_section.get('local_repo_base_url', '')
+
+ _write_repositories_file(alpine_repo, alpine_version, local_repo)
+
+
+def _write_repositories_file(alpine_repo, alpine_version, local_repo):
+ """
+ Write the /etc/apk/repositories file with the specified entries.
+
+ @param alpine_repo: A nested dict of the alpine_repo configuration.
+ @param alpine_version: A string of the Alpine version to use.
+ @param local_repo: A string containing the base URL of a local repo.
+ """
+
+ repo_file = '/etc/apk/repositories'
+
+ alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR)
+
+ params = {'alpine_baseurl': alpine_baseurl,
+ 'alpine_version': alpine_version,
+ 'community_enabled': alpine_repo.get('community_enabled'),
+ 'testing_enabled': alpine_repo.get('testing_enabled'),
+ 'local_repo': local_repo}
+
+ tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ template_fn = tfile[1] # Filepath is second item in tuple
+ util.write_file(template_fn, content=REPOSITORIES_TEMPLATE)
+
+ LOG.debug('Generating Alpine repository configuration file: %s',
+ repo_file)
+ templater.render_to_file(template_fn, repo_file, params)
+ # Clean up temporary template
+ util.del_file(template_fn)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index c44dec45..73d8719f 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -6,228 +6,372 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Apt Configure
--------------
-**Summary:** configure apt
-
-This module handles both configuration of apt options and adding source lists.
-There are configuration options such as ``apt_get_wrapper`` and
-``apt_get_command`` that control how cloud-init invokes apt-get.
-These configuration options are handled on a per-distro basis, so consult
-documentation for cloud-init's distro support for instructions on using
-these config options.
-
-.. note::
- To ensure that apt configuration is valid yaml, any strings containing
- special characters, especially ``:`` should be quoted.
-
-.. note::
- For more information about apt configuration, see the
- ``Additional apt configuration`` example.
-
-**Preserve sources.list:**
-
-By default, cloud-init will generate a new sources list in
-``/etc/apt/sources.list.d`` based on any changes specified in cloud config.
-To disable this behavior and preserve the sources list from the pristine image,
-set ``preserve_sources_list`` to ``true``.
-
-.. note::
- The ``preserve_sources_list`` option overrides all other config keys that
- would alter ``sources.list`` or ``sources.list.d``, **except** for
- additional sources to be added to ``sources.list.d``.
-
-**Disable source suites:**
-
-Entries in the sources list can be disabled using ``disable_suites``, which
-takes a list of suites to be disabled. If the string ``$RELEASE`` is present in
-a suite in the ``disable_suites`` list, it will be replaced with the release
-name. If a suite specified in ``disable_suites`` is not present in
-``sources.list`` it will be ignored. For convenience, several aliases are
-provided for ``disable_suites``:
-
- - ``updates`` => ``$RELEASE-updates``
- - ``backports`` => ``$RELEASE-backports``
- - ``security`` => ``$RELEASE-security``
- - ``proposed`` => ``$RELEASE-proposed``
- - ``release`` => ``$RELEASE``
-
-.. note::
- When a suite is disabled using ``disable_suites``, its entry in
- ``sources.list`` is not deleted; it is just commented out.
-
-**Configure primary and security mirrors:**
-
-The primary and security archive mirrors can be specified using the ``primary``
-and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys
-take a list of configs, allowing mirrors to be specified on a per-architecture
-basis. Each config is a dictionary which must have an entry for ``arches``,
-specifying which architectures that config entry is for. The keyword
-``default`` applies to any architecture not explicitly listed. The mirror url
-can be specified with the ``uri`` key, or a list of mirrors to check can be
-provided in order, with the first mirror that can be resolved being selected.
-This allows the same configuration to be used in different environment, with
-different hosts used for a local apt mirror. If no mirror is provided by
-``uri`` or ``search``, ``search_dns`` may be used to search for dns names in
-the format ``<distro>-mirror`` in each of the following:
-
- - fqdn of this host per cloud metadata
- - localdomain
- - domains listed in ``/etc/resolv.conf``
-
-If there is a dns entry for ``<distro>-mirror``, then it is assumed that there
-is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the
-``primary`` key is defined, but not the ``security`` key, then then
-configuration for ``primary`` is also used for ``security``. If ``search_dns``
-is used for the ``security`` key, the search pattern will be.
-``<distro>-security-mirror``.
-
-If no mirrors are specified, or all lookups fail, then default mirrors defined
-in the datasource are used. If none are present in the datasource either the
-following defaults are used:
-
- - primary: ``http://archive.ubuntu.com/ubuntu``
- - security: ``http://security.ubuntu.com/ubuntu``
-
-**Specify sources.list template:**
-
-A custom template for rendering ``sources.list`` can be specefied with
-``sources_list``. If no ``sources_list`` template is given, cloud-init will
-use sane default. Within this template, the following strings will be replaced
-with the appropriate values:
-
- - ``$MIRROR``
- - ``$RELEASE``
- - ``$PRIMARY``
- - ``$SECURITY``
-
-**Pass configuration to apt:**
-
-Apt configuration can be specified using ``conf``. Configuration is specified
-as a string. For multiline apt configuration, make sure to follow yaml syntax.
-
-**Configure apt proxy:**
-
-Proxy configuration for apt can be specified using ``conf``, but proxy config
-keys also exist for convenience. The proxy config keys, ``http_proxy``,
-``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp
-and https protocols respectively. The ``proxy`` key also exists as an alias for
-``http_proxy``. Proxy url is specified in the format
-``<protocol>://[[user][:pass]@]host[:port]/``.
-
-**Add apt repos by regex:**
+"""Apt Configure: Configure apt for the user."""
-All source entries in ``apt-sources`` that match regex in
-``add_apt_repo_match`` will be added to the system using
-``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
-to ``^[\\w-]+:\\w``
-
-**Add source list entries:**
-
-Source list entries can be specified as a dictionary under the ``sources``
-config key, with key in the dict representing a different source file. The key
-of each source entry will be used as an id that can be referenced in
-other config entries, as well as the filename for the source's configuration
-under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
-it will be appended. If there is no configuration for a key in ``sources``, no
-file will be written, but the key may still be referred to as an id in other
-``sources`` entries.
-
-Each entry under ``sources`` is a dictionary which may contain any of the
-following optional keys:
-
- - ``source``: a sources.list entry (some variable replacements apply)
- - ``keyid``: a key to import via shortid or fingerprint
- - ``key``: a raw PGP key
- - ``keyserver``: alternate keyserver to pull ``keyid`` key from
-
-The ``source`` key supports variable replacements for the following strings:
-
- - ``$MIRROR``
- - ``$PRIMARY``
- - ``$SECURITY``
- - ``$RELEASE``
-
-**Internal name:** ``cc_apt_configure``
+import glob
+import os
+import re
+from textwrap import dedent
-**Module frequency:** per instance
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import gpg
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** ubuntu, debian
+LOG = logging.getLogger(__name__)
-**Config keys**::
+# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
- apt:
- preserve_sources_list: <true/false>
- disable_suites:
+frequency = PER_INSTANCE
+distros = ["ubuntu", "debian"]
+mirror_property = {
+ 'type': 'array',
+ 'item': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['arches'],
+ 'properties': {
+ 'arches': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string'
+ },
+ 'minItems': 1
+ },
+ 'uri': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'search': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'minItems': 1
+ },
+ 'search_dns': {
+ 'type': 'boolean',
+ }
+ }
+ }
+}
+schema = {
+ 'id': 'cc_apt_configure',
+ 'name': 'Apt Configure',
+ 'title': 'Configure apt for the user',
+ 'description': dedent("""\
+ This module handles both configuration of apt options and adding
+ source lists. There are configuration options such as
+ ``apt_get_wrapper`` and ``apt_get_command`` that control how
+ cloud-init invokes apt-get. These configuration options are
+ handled on a per-distro basis, so consult documentation for
+ cloud-init's distro support for instructions on using
+ these config options.
+
+ .. note::
+ To ensure that apt configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+
+ .. note::
+ For more information about apt configuration, see the
+ ``Additional apt configuration`` example."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ apt:
+ preserve_sources_list: false
+ disable_suites:
- $RELEASE-updates
- backports
- $RELEASE
- mysuite
- primary:
+ primary:
- arches:
- amd64
- i386
- default
- uri: "http://us.archive.ubuntu.com/ubuntu"
+ uri: 'http://us.archive.ubuntu.com/ubuntu'
search:
- - "http://cool.but-sometimes-unreachable.com/ubuntu"
- - "http://us.archive.ubuntu.com/ubuntu"
+ - 'http://cool.but-sometimes-unreachable.com/ubuntu'
+ - 'http://us.archive.ubuntu.com/ubuntu'
search_dns: <true/false>
- arches:
- s390x
- arm64
- uri: "http://archive-to-use-for-arm64.example.com/ubuntu"
- security:
+ uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+ security:
- arches:
- default
search_dns: true
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- debconf_selections:
- set1: the-package the-package/some-flag boolean true
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ debconf_selections:
+ set1: the-package the-package/some-flag boolean true
+ conf: |
+ APT {
+ Get {
+ Assume-Yes 'true';
+ Fix-Broken 'true';
+ }
+ }
+ proxy: 'http://[[user][:pass]@]host[:port]/'
+ http_proxy: 'http://[[user][:pass]@]host[:port]/'
+ ftp_proxy: 'ftp://[[user][:pass]@]host[:port]/'
+ https_proxy: 'https://[[user][:pass]@]host[:port]/'
+ sources:
+ source1:
+ keyid: 'keyid'
+ keyserver: 'keyserverurl'
+ source: 'deb http://<url>/ xenial main'
+ source2:
+ source: 'ppa:<ppa-name>'
+ source3:
+ source: 'deb $MIRROR $RELEASE multiverse'
+ key: |
+ ------BEGIN PGP PUBLIC KEY BLOCK-------
+ <key data>
+ ------END PGP PUBLIC KEY BLOCK-------""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apt': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'preserve_sources_list': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new sources
+ list in ``/etc/apt/sources.list.d`` based on any
+ changes specified in cloud config. To disable this
+ behavior and preserve the sources list from the
+ pristine image, set ``preserve_sources_list``
+ to ``true``.
+
+ The ``preserve_sources_list`` option overrides
+ all other config keys that would alter
+ ``sources.list`` or ``sources.list.d``,
+ **except** for additional sources to be added
+ to ``sources.list.d``.""")
+ },
+ 'disable_suites': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Entries in the sources list can be disabled using
+ ``disable_suites``, which takes a list of suites
+ to be disabled. If the string ``$RELEASE`` is
+ present in a suite in the ``disable_suites`` list,
+ it will be replaced with the release name. If a
+ suite specified in ``disable_suites`` is not
+ present in ``sources.list`` it will be ignored.
+ For convenience, several aliases are provided for
+ ``disable_suites``:
+
+ - ``updates`` => ``$RELEASE-updates``
+ - ``backports`` => ``$RELEASE-backports``
+ - ``security`` => ``$RELEASE-security``
+ - ``proposed`` => ``$RELEASE-proposed``
+ - ``release`` => ``$RELEASE``.
+
+ When a suite is disabled using ``disable_suites``,
+ its entry in ``sources.list`` is not deleted; it
+ is just commented out.""")
+ },
+ 'primary': {
+ **mirror_property,
+ 'description': dedent("""\
+ The primary and security archive mirrors can
+ be specified using the ``primary`` and
+ ``security`` keys, respectively. Both the
+ ``primary`` and ``security`` keys take a list
+ of configs, allowing mirrors to be specified
+ on a per-architecture basis. Each config is a
+ dictionary which must have an entry for
+ ``arches``, specifying which architectures
+ that config entry is for. The keyword
+ ``default`` applies to any architecture not
+ explicitly listed. The mirror url can be specified
+ with the ``uri`` key, or a list of mirrors to
+ check can be provided in order, with the first
+ mirror that can be resolved being selected. This
+ allows the same configuration to be used in
+ different environment, with different hosts used
+ for a local apt mirror. If no mirror is provided
+ by ``uri`` or ``search``, ``search_dns`` may be
+ used to search for dns names in the format
+ ``<distro>-mirror`` in each of the following:
+
+ - fqdn of this host per cloud metadata,
+ - localdomain,
+ - domains listed in ``/etc/resolv.conf``.
+
+ If there is a dns entry for ``<distro>-mirror``,
+ then it is assumed that there is a distro mirror
+ at ``http://<distro>-mirror.<domain>/<distro>``.
+ If the ``primary`` key is defined, but not the
+ ``security`` key, then then configuration for
+ ``primary`` is also used for ``security``.
+ If ``search_dns`` is used for the ``security``
+ key, the search pattern will be
+ ``<distro>-security-mirror``.
+
+ If no mirrors are specified, or all lookups fail,
+ then default mirrors defined in the datasource
+ are used. If none are present in the datasource
+ either the following defaults are used:
+
+ - ``primary`` => \
+ ``http://archive.ubuntu.com/ubuntu``.
+ - ``security`` => \
+ ``http://security.ubuntu.com/ubuntu``
+ """)},
+ 'security': {
+ **mirror_property,
+ 'description': dedent("""\
+ Please refer to the primary config documentation""")
+ },
+ 'add_apt_repo_match': {
+ 'type': 'string',
+ 'default': ADD_APT_REPO_MATCH,
+ 'description': dedent("""\
+ All source entries in ``apt-sources`` that match
+ regex in ``add_apt_repo_match`` will be added to
+ the system using ``add-apt-repository``. If
+ ``add_apt_repo_match`` is not specified, it
+ defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
+ },
+ 'debconf_selections': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Debconf additional configurations can be specified as a
+ dictionary under the ``debconf_selections`` config
+ key, with each key in the dict representing a
+ different set of configurations. The value of each key
+ must be a string containing all the debconf
+ configurations that must be applied. We will bundle
+ all of the values and pass them to
+ ``debconf-set-selections``. Therefore, each value line
+ must be a valid entry for ``debconf-set-selections``,
+ meaning that they must possess for distinct fields:
+
+ ``pkgname question type answer``
+
+ Where:
+
+ - ``pkgname`` is the name of the package.
+ - ``question`` the name of the questions.
+ - ``type`` is the type of question.
+ - ``answer`` is the value used to ansert the \
+ question.
+
+ For example: \
+ ``ippackage ippackage/ip string 127.0.01``
+ """)
+ },
+ 'sources_list': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specifies a custom template for rendering
+ ``sources.list`` . If no ``sources_list`` template
+ is given, cloud-init will use sane default. Within
+ this template, the following strings will be
+ replaced with the appropriate values:
+
+ - ``$MIRROR``
+ - ``$RELEASE``
+ - ``$PRIMARY``
+ - ``$SECURITY``""")
+ },
+ 'conf': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specify configuration for apt, such as proxy
+ configuration. This configuration is specified as a
+ string. For multiline apt configuration, make sure
+ to follow yaml syntax.""")
+ },
+ 'https_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify https apt proxy.
+ https proxy url is specified in the format
+ ``https://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'http_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify http apt proxy.
+ http proxy url is specified in the format
+ ``http://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'proxy': {
+ 'type': 'string',
+ 'description': 'Alias for defining a http apt proxy.'
+ },
+ 'ftp_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify ftp apt proxy.
+ ftp proxy url is specified in the format
+ ``ftp://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'sources': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Source list entries can be specified as a
+ dictionary under the ``sources`` config key, with
+ each key in the dict representing a different source
+ file. The key of each source entry will be used
+ as an id that can be referenced in other config
+ entries, as well as the filename for the source's
+ configuration under ``/etc/apt/sources.list.d``.
+ If the name does not end with ``.list``, it will
+ be appended. If there is no configuration for a
+ key in ``sources``, no file will be written, but
+ the key may still be referred to as an id in other
+ ``sources`` entries.
+
+ Each entry under ``sources`` is a dictionary which
+ may contain any of the following optional keys:
+
+ - ``source``: a sources.list entry \
+ (some variable replacements apply).
+ - ``keyid``: a key to import via shortid or \
+ fingerprint.
+ - ``key``: a raw PGP key.
+ - ``keyserver``: alternate keyserver to pull \
+ ``keyid`` key from.
+
+ The ``source`` key supports variable
+ replacements for the following strings:
+
+ - ``$MIRROR``
+ - ``$PRIMARY``
+ - ``$SECURITY``
+ - ``$RELEASE``""")
}
}
- proxy: "http://[[user][:pass]@]host[:port]/"
- http_proxy: "http://[[user][:pass]@]host[:port]/"
- ftp_proxy: "ftp://[[user][:pass]@]host[:port]/"
- https_proxy: "https://[[user][:pass]@]host[:port]/"
- sources:
- source1:
- keyid: "keyid"
- keyserver: "keyserverurl"
- source: "deb http://<url>/ xenial main"
- source2:
- source: "ppa:<ppa-name>"
- source3:
- source: "deb $MIRROR $RELEASE multiverse"
- key: |
- ------BEGIN PGP PUBLIC KEY BLOCK-------
- <key data>
- ------END PGP PUBLIC KEY BLOCK-------
-"""
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import log as logging
-from cloudinit import templater
-from cloudinit import util
+ }
+ }
+}
-LOG = logging.getLogger(__name__)
+__doc__ = get_schema_doc(schema)
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
# place where apt stores cached repository data
APT_LISTS = "/var/lib/apt/lists"
@@ -279,6 +423,7 @@ def handle(name, ocfg, cloud, log, _):
"Expected dictionary for 'apt' config, found {config_type}".format(
config_type=type(cfg)))
+ validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
@@ -287,7 +432,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (util.which('apt-get') or util.which('apt')):
+ if not (subp.which('apt-get') or subp.which('apt')):
return False, "no apt commands."
return True, "Apt is available."
@@ -334,7 +479,7 @@ def apply_apt(cfg, cloud, target):
def debconf_set_selections(selections, target=None):
if not selections.endswith(b'\n'):
selections += b'\n'
- util.subp(['debconf-set-selections'], data=selections, target=target,
+ subp.subp(['debconf-set-selections'], data=selections, target=target,
capture=True)
@@ -359,7 +504,7 @@ def dpkg_reconfigure(packages, target=None):
"but cannot be unconfigured: %s", unhandled)
if len(to_config):
- util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+ subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
list(to_config), data=None, target=target, capture=True)
@@ -402,7 +547,7 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -431,7 +576,7 @@ def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
default_mirrors = get_default_mirrors(arch)
- pre = util.target_path(target, APT_LISTS)
+ pre = subp.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
@@ -550,8 +695,8 @@ def add_apt_key_raw(key, target=None):
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
- except util.ProcessExecutionError:
+ subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
@@ -614,13 +759,13 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
if aa_repo_match(source):
try:
- util.subp(["add-apt-repository", source], target=target)
- except util.ProcessExecutionError:
+ subp.subp(["add-apt-repository", source], target=target)
+ except subp.ProcessExecutionError:
LOG.exception("add-apt-repository failed.")
raise
continue
- sourcefn = util.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent['filename'])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
@@ -763,25 +908,6 @@ def convert_to_v3_apt_format(cfg):
return cfg
-def search_for_mirror(candidates):
- """
- Search through a list of mirror urls for one that works
- This needs to return quickly.
- """
- if candidates is None:
- return None
-
- LOG.debug("search for mirror in candidates: '%s'", candidates)
- for cand in candidates:
- try:
- if util.is_resolvable_url(cand):
- LOG.debug("found working mirror: '%s'", cand)
- return cand
- except Exception:
- pass
- return None
-
-
def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
"""
Try to resolve a list of predefines DNS names to pick mirrors
@@ -813,7 +939,7 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
for post in doms:
mirror_list.append(mirrorfmt % (post))
- mirror = search_for_mirror(mirror_list)
+ mirror = util.search_for_mirror(mirror_list)
return mirror
@@ -876,7 +1002,7 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror(mcfg.get("search", None))
+ mirror = util.search_for_mirror(mcfg.get("search", None))
# fallback to search_dns if specified
if mirror is None:
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 225d0905..aa186ce2 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -9,7 +9,7 @@ Apt Pipelining
--------------
**Summary:** configure apt pipelining
-This module configures apt's ``Acquite::http::Pipeline-Depth`` option, whcih
+This module configures apt's ``Acquite::http::Pipeline-Depth`` option, which
controls how apt handles HTTP pipelining. It may be useful for pipelining to be
disabled, because some web servers, such as S3 do not pipeline properly (LP:
#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 6813f534..246e4497 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -16,6 +16,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -99,7 +100,7 @@ def handle(name, cfg, cloud, log, _args):
if iid:
env['INSTANCE_ID'] = str(iid)
cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
+ subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 0b4352c8..9fdaeba1 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -39,6 +39,7 @@ Valid configuration options for this module are:
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
distros = ['ubuntu', 'debian']
@@ -93,6 +94,6 @@ def handle(name, cfg, cloud, log, args):
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 64bc900e..3c453d91 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -16,11 +16,16 @@ can be removed from the system with the configuration option
certificates must be specified using valid yaml. in order to specify a
multiline certificate, the yaml multiline list syntax must be used
+.. note::
+ For Alpine Linux the "remove-defaults" functionality works if the
+ ca-certificates package is installed but not if the
+ ca-certificates-bundle package is installed.
+
**Internal name:** ``cc_ca_certs``
**Module frequency:** per instance
-**Supported distros:** ubuntu, debian
+**Supported distros:** alpine, debian, ubuntu
**Config keys**::
@@ -36,6 +41,7 @@ can be removed from the system with the configuration option
import os
+from cloudinit import subp
from cloudinit import util
CA_CERT_PATH = "/usr/share/ca-certificates/"
@@ -44,14 +50,14 @@ CA_CERT_CONFIG = "/etc/ca-certificates.conf"
CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-distros = ['ubuntu', 'debian']
+distros = ['alpine', 'debian', 'ubuntu']
def update_ca_certs():
"""
Updates the CA certificate cache on the current machine.
"""
- util.subp(["update-ca-certificates"], capture=False)
+ subp.subp(["update-ca-certificates"], capture=False)
def add_ca_certs(certs):
@@ -66,17 +72,23 @@ def add_ca_certs(certs):
cert_file_contents = "\n".join([str(c) for c in certs])
util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([line for line in orig.splitlines()
- if line != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
+ if os.stat(CA_CERT_CONFIG).st_size == 0:
+ # If the CA_CERT_CONFIG file is empty (i.e. all existing
+ # CA certs have been deleted) then simply output a single
+ # line with the cloud-init cert filename.
+ out = "%s\n" % CA_CERT_FILENAME
+ else:
+ # Append cert filename to CA_CERT_CONFIG file.
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(CA_CERT_CONFIG)
+ cur_cont = '\n'.join([line for line in orig.splitlines()
+ if line != CA_CERT_FILENAME])
+ out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
util.write_file(CA_CERT_CONFIG, out, omode="wb")
-def remove_default_ca_certs():
+def remove_default_ca_certs(distro_name):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
@@ -84,11 +96,14 @@ def remove_default_ca_certs():
util.delete_dir_contents(CA_CERT_PATH)
util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
util.write_file(CA_CERT_CONFIG, "", mode=0o644)
- debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
+
+ if distro_name != 'alpine':
+ debconf_sel = (
+ "ca-certificates ca-certificates/trust_new_crts " + "select no")
+ subp.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -110,7 +125,7 @@ def handle(name, cfg, _cloud, log, _args):
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs()
+ remove_default_ca_certs(cloud.distro.name)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 01d61fa1..aaf71366 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -6,78 +6,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Chef
-----
-**Summary:** module that configures, starts and installs chef.
-
-This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-**Internal name:** ``cc_chef``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- client_key:
- encrypted_data_bag_secret:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- omnibus_url:
- omnibus_url_retries:
- omnibus_version:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
+"""Chef: module that configures, starts and installs chef."""
import itertools
import json
import os
+from textwrap import dedent
+from cloudinit import subp
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import templater
+from cloudinit import temp_utils
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
RUBY_VERSION_DEFAULT = "1.8"
@@ -98,6 +42,8 @@ OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
+CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
+CHEF_ENVIRONMENT = '_default'
CHEF_FB_PATH = '/etc/chef/firstboot.json'
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
@@ -107,11 +53,11 @@ CHEF_RB_TPL_DEFAULTS = {
'log_location': '/var/log/chef/client.log',
'validation_key': CHEF_VALIDATION_PEM_PATH,
'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
+ 'client_key': '/etc/chef/client.pem',
'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
+ 'file_cache_path': '/var/cache/chef',
+ 'file_backup_path': '/var/backups/chef',
+ 'pid_file': '/var/run/chef/client.pid',
'show_time': True,
'encrypted_data_bag_secret': None,
}
@@ -122,9 +68,9 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'client_key',
'file_cache_path',
'json_attribs',
- 'file_cache_path',
'pid_file',
'encrypted_data_bag_secret',
+ 'chef_license',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
@@ -141,12 +87,277 @@ CHEF_EXEC_PATH = '/usr/bin/chef-client'
CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
+frequency = PER_ALWAYS
+distros = ["all"]
+schema = {
+ 'id': 'cc_chef',
+ 'name': 'Chef',
+ 'title': 'module that configures, starts and installs chef',
+ 'description': dedent("""\
+ This module enables chef to be installed (from packages,
+ gems, or from omnibus). Before this occurs, chef configuration is
+ written to disk (validation.pem, client.pem, firstboot.json,
+ client.rb), and required directories are created (/etc/chef and
+ /var/log/chef and so-on). If configured, chef will be
+ installed and started in either daemon or non-daemon mode.
+ If run in non-daemon mode, post run actions are executed to do
+ finishing activities such as removing validation.pem."""),
+ 'distros': distros,
+ 'examples': [dedent("""
+ chef:
+ directories:
+ - /etc/chef
+ - /var/log/chef
+ validation_cert: system
+ install_type: omnibus
+ initial_attributes:
+ apache:
+ prefork:
+ maxclients: 100
+ keepalive: off
+ run_list:
+ - recipe[apache2]
+ - role[db]
+ encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret
+ environment: _default
+ log_level: :auto
+ omnibus_url_retries: 2
+ server_url: https://chef.yourorg.com:4000
+ ssl_verify_mode: :verify_peer
+ validation_name: yourorg-validator""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'chef': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'directories': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Create the necessary directories for chef to run. By
+ default, it creates the following directories:
+
+ {chef_dirs}""").format(
+ chef_dirs="\n".join(
+ [" - ``{}``".format(d) for d in CHEF_DIRS]
+ )
+ )
+ },
+ 'validation_cert': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional string to be written to file validation_key.
+ Special value ``system`` means set use existing file.
+ """)
+ },
+ 'validation_key': {
+ 'type': 'string',
+ 'default': CHEF_VALIDATION_PEM_PATH,
+ 'description': dedent("""\
+ Optional path for validation_cert. default to
+ ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
+ },
+ 'firstboot_path': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Path to write run_list and initial_attributes keys that
+ should also be present in this configuration, defaults
+ to ``{}``.""".format(CHEF_FB_PATH))
+ },
+ 'exec': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ define if we should run or not run chef (defaults to
+ false, unless a gem installed is requested where this
+ will then default to true).""")
+ },
+ 'client_key': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
+ 'description': dedent("""\
+ Optional path for client_cert. default to
+ ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
+ },
+ 'encrypted_data_bag_secret': {
+ 'type': 'string',
+ 'default': None,
+ 'description': dedent("""\
+ Specifies the location of the secret key used by chef
+ to encrypt data items. By default, this path is set
+ to None, meaning that chef will have to look at the
+ path ``{}`` for it.
+ """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
+ },
+ 'environment': {
+ 'type': 'string',
+ 'default': CHEF_ENVIRONMENT,
+ 'description': dedent("""\
+ Specifies which environment chef will use. By default,
+ it will use the ``{}`` configuration.
+ """.format(CHEF_ENVIRONMENT))
+ },
+ 'file_backup_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
+ 'description': dedent("""\
+ Specifies the location in which backup files are
+ stored. By default, it uses the
+ ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_backup_path']))
+ },
+ 'file_cache_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
+ 'description': dedent("""\
+ Specifies the location in which chef cache files will
+ be saved. By default, it uses the ``{}``
+ location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_cache_path']))
+ },
+ 'json_attribs': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Specifies the location in which some chef json data is
+ stored. By default, it uses the
+ ``{}`` location.""".format(CHEF_FB_PATH))
+ },
+ 'log_level': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
+ 'description': dedent("""\
+ Defines the level of logging to be stored in the log
+ file. By default this value is set to ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
+ },
+ 'log_location': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
+ 'description': dedent("""\
+ Specifies the location of the chef lof file. By
+ default, the location is specified at
+ ``{}``.""".format(
+ CHEF_RB_TPL_DEFAULTS['log_location']))
+ },
+ 'node_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the node to run. By default, we will
+ use th instance id as the node name.""")
+ },
+ 'omnibus_url': {
+ 'type': 'string',
+ 'default': OMNIBUS_URL,
+ 'description': dedent("""\
+ Omnibus URL if chef should be installed through
+ Omnibus. By default, it uses the
+ ``{}``.""".format(OMNIBUS_URL))
+ },
+ 'omnibus_url_retries': {
+ 'type': 'integer',
+ 'default': OMNIBUS_URL_RETRIES,
+ 'description': dedent("""\
+ The number of retries that will be attempted to reach
+ the Omnibus URL""")
+ },
+ 'omnibus_version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional version string to require for omnibus
+ install.""")
+ },
+ 'pid_file': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
+ 'description': dedent("""\
+ The location in which a process identification
+ number (pid) is saved. By default, it saves
+ in the ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['pid_file']))
+ },
+ 'server_url': {
+ 'type': 'string',
+ 'description': 'The URL for the chef server'
+ },
+ 'show_time': {
+ 'type': 'boolean',
+ 'default': True,
+ 'description': 'Show time in chef logs'
+ },
+ 'ssl_verify_mode': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
+ 'description': dedent("""\
+ Set the verify mode for HTTPS requests. We can have
+ two possible values for this parameter:
+
+ - ``:verify_none``: No validation of SSL \
+ certificates.
+ - ``:verify_peer``: Validate all SSL certificates.
+
+ By default, the parameter is set as ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
+ },
+ 'validation_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the chef-validator key that Chef Infra
+ Client uses to access the Chef Infra Server during
+ the initial Chef Infra Client run.""")
+ },
+ 'force_install': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ If set to ``True``, forces chef installation, even
+ if it is already installed.""")
+ },
+ 'initial_attributes': {
+ 'type': 'object',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': dedent("""\
+ Specify a list of initial attributes used by the
+ cookbooks.""")
+ },
+ 'install_type': {
+ 'type': 'string',
+ 'default': 'packages',
+ 'description': dedent("""\
+ The type of installation for chef. It can be one of
+ the following values:
+
+ - ``packages``
+ - ``gems``
+ - ``omnibus``""")
+ },
+ 'run_list': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': 'A run list for a first boot json.'
+ },
+ "chef_license": {
+ 'type': 'string',
+ 'description': dedent("""\
+ string that indicates if user accepts or not license
+ related to some of chef products""")
+ }
+ }
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
def post_run_chef(chef_cfg, log):
@@ -196,6 +407,8 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Skipping module named %s,"
" no 'chef' key in configuration"), name)
return
+
+ validate_cloudconfig_schema(cfg, schema)
chef_cfg = cfg['chef']
# Ensure the chef directories we use exist
@@ -223,7 +436,7 @@ def handle(name, cfg, cloud, log, _args):
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
# Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
+ # are associated with paths have their parent directory created
# before they are used by the chef-client itself.
param_paths = set()
for (k, v) in params.items():
@@ -253,9 +466,10 @@ def handle(name, cfg, cloud, log, _args):
# Try to install chef, if its not already installed...
force_install = util.get_cfg_option_bool(chef_cfg,
'force_install', default=False)
- if not is_installed() or force_install:
+ installed = subp.is_exe(CHEF_EXEC_PATH)
+ if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
+ elif installed:
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
else:
run = False
@@ -280,7 +494,32 @@ def run_chef(chef_cfg, log):
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
+
+
+def subp_blob_in_tempfile(blob, *args, **kwargs):
+ """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
+
+ 'basename' as a kwarg allows providing the basename for the file.
+ The 'args' argument to subp will be updated with the full path to the
+ filename as the first argument.
+ """
+ basename = kwargs.pop('basename', "subp_blob")
+
+ if len(args) == 0 and 'args' not in kwargs:
+ args = [tuple()]
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, basename)
+ if 'args' in kwargs:
+ kwargs['args'] = [tmpf] + list(kwargs['args'])
+ else:
+ args = list(args)
+ args[0] = [tmpf] + args[0]
+
+ util.write_file(tmpf, blob, mode=0o700)
+ return subp.subp(*args, **kwargs)
def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
@@ -303,7 +542,7 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
else:
args = ['-v', omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
- return util.subp_blob_in_tempfile(
+ return subp_blob_in_tempfile(
blob=content, args=args,
basename='chef-omnibus-install', capture=False)
@@ -352,11 +591,11 @@ def install_chef_from_gems(ruby_version, chef_version, distro):
if not os.path.exists('/usr/bin/ruby'):
util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'-v %s' % chef_version, '--no-ri',
'--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'--no-ri', '--no-rdoc', '--bindir',
'/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 885b3138..dff93245 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -26,6 +26,7 @@ by default.
disable_ec2_metadata: <true/false>
"""
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
@@ -40,15 +41,15 @@ def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if util.which('ip'):
+ if subp.which('ip'):
reject_cmd = REJECT_CMD_IP
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
reject_cmd = REJECT_CMD_IF
else:
log.error(('Neither "route" nor "ip" command found, unable to '
'manipulate routing table'))
return
- util.subp(reject_cmd, capture=False)
+ subp.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 0796cb7b..a7bdc703 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -99,6 +99,7 @@ specified using ``filesystem``.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit import subp
import logging
import os
import shlex
@@ -106,13 +107,13 @@ import shlex
frequency = PER_INSTANCE
# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
+UDEVADM_CMD = subp.which('udevadm')
+SFDISK_CMD = subp.which("sfdisk")
+SGDISK_CMD = subp.which("sgdisk")
+LSBLK_CMD = subp.which("lsblk")
+BLKID_CMD = subp.which("blkid")
+BLKDEV_CMD = subp.which("blockdev")
+WIPEFS_CMD = subp.which("wipefs")
LANG_C_ENV = {'LANG': 'C'}
@@ -163,7 +164,7 @@ def handle(_name, cfg, cloud, log, _args):
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
- for origname in disk_setup.keys():
+ for origname in list(disk_setup):
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
@@ -248,9 +249,11 @@ def enumerate_disk(device, nodeps=False):
info = None
try:
- info, _err = util.subp(lsblk_cmd)
+ info, _err = subp.subp(lsblk_cmd)
except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
+ raise Exception(
+ "Failed during disk check for %s\n%s" % (device, e)
+ ) from e
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
@@ -310,9 +313,11 @@ def check_fs(device):
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
+ out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
+ raise Exception(
+ "Failed during disk check for %s\n%s" % (device, e)
+ ) from e
if out:
if len(out.splitlines()) == 1:
@@ -427,16 +432,16 @@ def get_dyn_func(*args):
else:
return globals()[func_name]
- except KeyError:
- raise Exception("No such function %s to call!" % func_name)
+ except KeyError as e:
+ raise Exception("No such function %s to call!" % func_name) from e
def get_hdd_size(device):
try:
- size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
+ raise Exception("Failed to get %s size\n%s" % (device, e)) from e
return int(size_in_bytes) / int(sector_size)
@@ -452,10 +457,11 @@ def check_partition_mbr_layout(device, layout):
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
+ out, _err = subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
+ raise Exception(
+ "Error running partition command on %s\n%s" % (device, e)
+ ) from e
found_layout = []
for line in out.splitlines():
@@ -482,10 +488,11 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
prt_cmd = [SGDISK_CMD, '-p', device]
try:
- out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV)
+ out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
+ raise Exception(
+ "Error running partition command on %s\n%s" % (device, e)
+ ) from e
out_lines = iter(out.splitlines())
# Skip header. Output looks like:
@@ -655,9 +662,11 @@ def purge_disk(device):
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s", d['name'])
- util.subp(wipefs_cmd)
- except Exception:
- raise Exception("Failed FS purge of /dev/%s" % d['name'])
+ subp.subp(wipefs_cmd)
+ except Exception as e:
+ raise Exception(
+ "Failed FS purge of /dev/%s" % d['name']
+ ) from e
purge_disk_ptable(device)
@@ -682,7 +691,7 @@ def read_parttbl(device):
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
util.udevadm_settle()
try:
- util.subp(blkdev_cmd)
+ subp.subp(blkdev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
@@ -697,25 +706,27 @@ def exec_mkpart_mbr(device, layout):
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device]
try:
- util.subp(prt_cmd, data="%s\n" % layout)
+ subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception("Failed to partition device %s\n%s" % (device, e))
+ raise Exception(
+ "Failed to partition device %s\n%s" % (device, e)
+ ) from e
read_parttbl(device)
def exec_mkpart_gpt(device, layout):
try:
- util.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, '-Z', device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- util.subp([SGDISK_CMD,
+ subp.subp([SGDISK_CMD,
'-n', '{}:{}:{}'.format(index, start, end), device])
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
- util.subp(
+ subp.subp(
[SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
except Exception:
LOG.warning("Failed to partition device %s", device)
@@ -967,9 +978,9 @@ def mkfs(fs_cfg):
fs_cmd)
else:
# Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
+ mkfs_cmd = subp.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
+ mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
@@ -994,8 +1005,8 @@ def mkfs(fs_cfg):
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", str(fs_cmd))
try:
- util.subp(fs_cmd, shell=shell)
+ subp.subp(fs_cmd, shell=shell)
except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
+ raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b342e04d..b1d99f97 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -25,7 +25,7 @@ import os
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
+from cloudinit import subp
frequency = PER_ALWAYS
@@ -43,9 +43,9 @@ def is_upstart_system():
del myenv['UPSTART_SESSION']
check_cmd = ['initctl', 'version']
try:
- (out, _err) = util.subp(check_cmd, env=myenv)
+ (out, _err) = subp.subp(check_cmd, env=myenv)
return 'upstart' in out
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.debug("'%s' returned '%s', not using upstart",
' '.join(check_cmd), e.exit_code)
return False
@@ -66,7 +66,7 @@ def handle(name, _cfg, cloud, log, args):
for n in event_names:
cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 0a135bbe..77984bca 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -39,6 +39,7 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -62,8 +63,8 @@ def stop_update_start(service, config_file, content, systemd=False):
def run(cmd, msg):
try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ return subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning("failed: %s (%s): %s", service, cmd, e)
return False
@@ -94,7 +95,7 @@ def handle(name, cfg, cloud, log, args):
util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
distro = cloud.distro
- if not util.which('fanctl'):
+ if not subp.which('fanctl'):
distro.install_packages(['ubuntu-fan'])
stop_update_start(
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index fd141541..3441f7a9 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -78,7 +78,7 @@ def handle(_name, cfg, cloud, log, args):
boot_fin_fn = cloud.paths.boot_finished
try:
contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
+ util.write_file(boot_fin_fn, contents, ensure_dir_exists=False)
except Exception:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 1b512a06..237c3d02 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -70,6 +70,7 @@ import stat
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -131,30 +132,30 @@ class ResizeGrowPart(object):
myenv['LANG'] = 'C'
try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
+ (out, _err) = subp.subp(["growpart", "--help"], env=myenv)
if re.search(r"--update\s+", out):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", '--dry-run', diskdev, partnum])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
diskdev, partnum)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
return (before, before)
try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", diskdev, partnum])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
return (before, get_size(partdev))
@@ -165,11 +166,11 @@ class ResizeGpart(object):
myenv['LANG'] = 'C'
try:
- (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
if re.search(r"gpart recover ", err):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
@@ -182,18 +183,18 @@ class ResizeGpart(object):
be recovered.
"""
try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "recover", diskdev])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 0:
util.logexc(LOG, "Failed: gpart recover %s", diskdev)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
before = get_size(partdev)
try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "resize", "-i", partnum, diskdev])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
# Since growing the FS requires a reboot, make sure we reboot
# first when this module has finished.
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index a323edfa..eb03c664 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -1,8 +1,9 @@
-# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2009-2010, 2020 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Matthew Ruffell <matthew.ruffell@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -15,15 +16,15 @@ Configure which device is used as the target for grub installation. This module
should work correctly by default without any user configuration. It can be
enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``. If no
-installation device is specified this module will look for the first existing
-device in:
+installation device is specified this module will execute grub-probe to
+determine which disk the /boot directory is associated with.
- - ``/dev/sda``
- - ``/dev/vda``
- - ``/dev/xvda``
- - ``/dev/sda1``
- - ``/dev/vda1``
- - ``/dev/xvda1``
+The value which is placed into the debconf database is in the format which the
+grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value,
+but we do fallback to the plain disk name if a by-id name is not present.
+
+If this module is executed inside a container, then the debconf database is
+seeded with empty values, and install_devices_empty is set to true.
**Internal name:** ``cc_grub_dpkg``
@@ -42,11 +43,68 @@ device in:
import os
+from cloudinit import subp
from cloudinit import util
+from cloudinit.subp import ProcessExecutionError
distros = ['ubuntu', 'debian']
+def fetch_idevs(log):
+ """
+ Fetches the /dev/disk/by-id device grub is installed to.
+ Falls back to plain disk name if no by-id entry is present.
+ """
+ disk = ""
+ devices = []
+
+ try:
+ # get the root disk where the /boot directory resides.
+ disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
+ capture=True)[0].strip()
+ except ProcessExecutionError as e:
+ # grub-common may not be installed, especially on containers
+ # FileNotFoundError is a nested exception of ProcessExecutionError
+ if isinstance(e.reason, FileNotFoundError):
+ log.debug("'grub-probe' not found in $PATH")
+ # disks from the container host are present in /proc and /sys
+ # which is where grub-probe determines where /boot is.
+ # it then checks for existence in /dev, which fails as host disks
+ # are not exposed to the container.
+ elif "failed to get canonical path" in e.stderr:
+ log.debug("grub-probe 'failed to get canonical path'")
+ else:
+ # something bad has happened, continue to log the error
+ raise
+ except Exception:
+ util.logexc(log, "grub-probe failed to execute for grub-dpkg")
+
+ if not disk or not os.path.exists(disk):
+ # If we failed to detect a disk, we can return early
+ return ''
+
+ try:
+ # check if disk exists and use udevadm to fetch symlinks
+ devices = subp.subp(
+ ['udevadm', 'info', '--root', '--query=symlink', disk],
+ capture=True
+ )[0].strip().split()
+ except Exception:
+ util.logexc(
+ log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
+ )
+
+ log.debug('considering these device symlinks: %s', ','.join(devices))
+ # filter symlinks for /dev/disk/by-id entries
+ devices = [dev for dev in devices if 'disk/by-id' in dev]
+ log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices))
+ # select first device if there is one, else fall back to plain name
+ idevs = sorted(devices)[0] if devices else disk
+ log.debug('selected %s', idevs)
+
+ return idevs
+
+
def handle(name, cfg, _cloud, log, _args):
mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
@@ -62,22 +120,10 @@ def handle(name, cfg, _cloud, log, _args):
idevs_empty = util.get_cfg_option_str(
mycfg, "grub-pc/install_devices_empty", None)
- if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs is None:
- idevs = ""
- if idevs_empty is None:
- idevs_empty = "true"
- else:
- if idevs_empty is None:
- idevs_empty = "false"
- if idevs is None:
- idevs = "/dev/sda"
- for dev in ("/dev/sda", "/dev/vda", "/dev/xvda",
- "/dev/sda1", "/dev/vda1", "/dev/xvda1"):
- if os.path.exists(dev):
- idevs = dev
- break
+ if idevs is None:
+ idevs = fetch_idevs(log)
+ if idevs_empty is None:
+ idevs_empty = "false" if idevs else "true"
# now idevs and idevs_empty are set to determined values
# or, those set by user
@@ -90,7 +136,7 @@ def handle(name, cfg, _cloud, log, _args):
(idevs, idevs_empty))
try:
- util.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(['debconf-set-selections'], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 3d2ded3d..0f2be52b 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -33,6 +33,7 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
import os
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -64,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
try:
cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index a9c04d86..299c4d01 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -61,6 +61,7 @@ from io import BytesIO
from configobj import ConfigObj
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -116,7 +117,7 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
+ subp.subp(["service", "landscape-client", "restart"])
def merge_together(objs):
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index f68c3cc7..4f8b7bf6 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -6,27 +6,58 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Locale
-------
-**Summary:** set system locale
+"""Locale: set system locale"""
-Configure the system locale and apply it system wide. By default use the locale
-specified by the datasource.
+from textwrap import dedent
-**Internal name:** ``cc_locale``
-
-**Module frequency:** per instance
+from cloudinit import util
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** all
-**Config keys**::
+frequency = PER_INSTANCE
+distros = ['all']
+schema = {
+ 'id': 'cc_locale',
+ 'name': 'Locale',
+ 'title': 'Set system locale',
+ 'description': dedent(
+ """\
+ Configure the system locale and apply it system wide. By default use
+ the locale specified by the datasource."""
+ ),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Set the locale to ar_AE
+ locale: ar_AE
+ """),
+ dedent("""\
+ # Set the locale to fr_CA in /etc/alternate_path/locale
+ locale: fr_CA
+ locale_configfile: /etc/alternate_path/locale
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'locale': {
+ 'type': 'string',
+ 'description': (
+ "The locale to set as the system's locale (e.g. ar_PS)"
+ ),
+ },
+ 'locale_configfile': {
+ 'type': 'string',
+ 'description': (
+ "The file in which to write the locale configuration (defaults"
+ " to the distro's default location)"
+ ),
+ },
+ },
+}
- locale: <locale str>
- locale_configfile: <path to locale config file>
-"""
-
-from cloudinit import util
+__doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, cloud, log, args):
@@ -40,6 +71,8 @@ def handle(name, cfg, cloud, log, args):
name, locale)
return
+ validate_cloudconfig_schema(cfg, schema)
+
log.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 151a9844..7129c9c6 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -48,6 +48,7 @@ lxd-bridge will be configured accordingly.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
import os
@@ -85,16 +86,16 @@ def handle(name, cfg, cloud, log, args):
# Install the needed packages
packages = []
- if not util.which("lxd"):
+ if not subp.which("lxd"):
packages.append('lxd')
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
packages.append('zfsutils-linux')
if len(packages):
try:
cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
+ except subp.ProcessExecutionError as exc:
log.warning("failed to install packages %s: %s", packages, exc)
return
@@ -104,20 +105,20 @@ def handle(name, cfg, cloud, log, args):
'network_address', 'network_port', 'storage_backend',
'storage_create_device', 'storage_create_loop',
'storage_pool', 'trust_password')
- util.subp(['lxd', 'waitready', '--timeout=300'])
+ subp.subp(['lxd', 'waitready', '--timeout=300'])
cmd = ['lxd', 'init', '--auto']
for k in init_keys:
if init_cfg.get(k):
cmd.extend(["--%s=%s" %
(k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
+ subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
if os.path.exists("/etc/default/lxd-bridge") \
- and util.which(dconf_comm):
+ and subp.which(dconf_comm):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -127,7 +128,7 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting lxd debconf via " + dconf_comm)
data = "\n".join(["set %s %s" % (k, v)
for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
+ subp.subp(['debconf-communicate'], data)
except Exception:
util.logexc(log, "Failed to run '%s' for lxd with" %
dconf_comm)
@@ -137,7 +138,7 @@ def handle(name, cfg, cloud, log, args):
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
+ subp.subp(['dpkg-reconfigure', 'lxd',
'--frontend=noninteractive'])
else:
# Built-in LXD bridge support
@@ -264,7 +265,7 @@ def _lxc(cmd):
env = {'LC_ALL': 'C',
'HOME': os.environ.get('HOME', '/root'),
'USER': os.environ.get('USER', 'root')}
- util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
def maybe_cleanup_default(net_name, did_init, create, attach,
@@ -286,7 +287,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["network", "delete", net_name])
LOG.debug(msg, net_name, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, net_name, fail_assume_enoent)
@@ -296,7 +297,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["profile", "device", "remove", profile, nic_name])
LOG.debug(msg, nic_name, profile, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 351183f1..41ea4fc9 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,6 +56,7 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
@@ -140,6 +141,6 @@ def handle(name, cfg, cloud, log, _args):
configure(config=mcollective_cfg['conf'])
# restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(['service', 'mcollective', 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 4ae3f1fc..54f2f878 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -65,15 +65,19 @@ swap file is created.
from string import whitespace
import logging
-import os.path
+import os
import re
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
+# Name matches 'server:/path'
+NETWORK_NAME_FILTER = r"^.+:.*"
+NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
MNT_COMMENT = "comment=cloudconfig"
@@ -93,6 +97,13 @@ def is_meta_device_name(name):
return False
+def is_network_device(name):
+ # return true if this is a network device
+ if NETWORK_NAME_RE.match(name):
+ return True
+ return False
+
+
def _get_nth_partition_for_device(device_path, partition_number):
potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
'-part%s' % (partition_number,)]
@@ -122,6 +133,9 @@ def sanitize_devname(startname, transformer, log):
devname = "ephemeral0"
log.debug("Adjusted mount option from ephemeral to ephemeral0")
+ if is_network_device(startname):
+ return startname
+
device_path, partition_number = util.expand_dotted_devname(devname)
if is_meta_device_name(device_path):
@@ -223,24 +237,24 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
return size
-def create_swapfile(fname, size):
+def create_swapfile(fname: str, size: str) -> None:
"""Size is in MiB."""
- errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s"
+ errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
def create_swap(fname, size, method):
LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
fname, fstype, method)
if method == "fallocate":
- cmd = ['fallocate', '-l', '%dM' % size, fname]
+ cmd = ['fallocate', '-l', '%sM' % size, fname]
elif method == "dd":
cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
- 'count=%d' % size]
+ 'count=%s' % size]
try:
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, method, e)
util.del_file(fname)
@@ -249,20 +263,22 @@ def create_swapfile(fname, size):
fstype = util.get_mount_info(swap_dir)[1]
- if fstype in ("xfs", "btrfs"):
+ if (fstype == "xfs" and
+ util.kernel_version() < (4, 18)) or fstype == "btrfs":
create_swap(fname, size, "dd")
else:
try:
create_swap(fname, size, "fallocate")
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, "dd", e)
LOG.warning("Will attempt with dd.")
create_swap(fname, size, "dd")
- util.chmod(fname, 0o600)
+ if os.path.exists(fname):
+ util.chmod(fname, 0o600)
try:
- util.subp(['mkswap', fname])
- except util.ProcessExecutionError:
+ subp.subp(['mkswap', fname])
+ except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -274,7 +290,6 @@ def setup_swapfile(fname, size=None, maxsize=None):
maxsize: the maximum size
"""
swap_dir = os.path.dirname(fname)
- mibsize = str(int(size / (2 ** 20)))
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
@@ -286,6 +301,7 @@ def setup_swapfile(fname, size=None, maxsize=None):
size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
memsize=memsize)
+ mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
@@ -365,17 +381,18 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs = {}
fstab_removed = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- if MNT_COMMENT in line:
- fstab_removed.append(line)
- continue
+ if os.path.exists(FSTAB_PATH):
+ for line in util.load_file(FSTAB_PATH).splitlines():
+ if MNT_COMMENT in line:
+ fstab_removed.append(line)
+ continue
- try:
- toks = WS.split(line)
- except Exception:
- pass
- fstab_devs[toks[0]] = line
- fstab_lines.append(line)
+ try:
+ toks = WS.split(line)
+ except Exception:
+ pass
+ fstab_devs[toks[0]] = line
+ fstab_lines.append(line)
for i in range(len(cfgmnt)):
# skip something that wasn't a list
@@ -525,9 +542,9 @@ def handle(_name, cfg, cloud, log, _args):
for cmd in activate_cmds:
fmt = "Activate mounts: %s:" + ' '.join(cmd)
try:
- util.subp(cmd)
+ subp.subp(cmd)
log.debug(fmt, "PASS")
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 5498bbaa..3d7279d6 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -14,6 +14,7 @@ from cloudinit import log as logging
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
@@ -23,7 +24,8 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
+distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel',
+ 'sles', 'ubuntu']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -62,6 +64,17 @@ NTP_CLIENT_CONFIG = {
# This is Distro-specific configuration overrides of the base config
DISTRO_CLIENT_CONFIG = {
+ 'alpine': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'packages': [],
+ 'service_name': 'ntpd',
+ },
+ },
'debian': {
'chrony': {
'confpath': '/etc/chrony/chrony.conf',
@@ -113,11 +126,11 @@ schema = {
Handle ntp configuration. If ntp is not installed on the system and
ntp configuration is specified, ntp will be installed. If there is a
default ntp config file in the image or one is present in the
- distro's ntp package, it will be copied to ``/etc/ntp.conf.dist``
- before any changes are made. A list of ntp pools and ntp servers can
- be provided under the ``ntp`` config key. If no ntp ``servers`` or
- ``pools`` are provided, 4 pools will be used in the format
- ``{0-3}.{distro}.pool.ntp.org``."""),
+ distro's ntp package, it will be copied to a file with ``.dist``
+ appended to the filename before any changes are made. A list of ntp
+ pools and ntp servers can be provided under the ``ntp`` config key.
+ If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used
+ in the format ``{0-3}.{distro}.pool.ntp.org``."""),
'distros': distros,
'examples': [
dedent("""\
@@ -169,8 +182,11 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp pools. If both pools and servers are
- empty, 4 default pool servers will be provided of
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided of
+ the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
+ for Alpine Linux when using the Busybox NTP client
+ this setting will be ignored due to the limited
+ functionality of Busybox's ntpd.""")
},
'servers': {
'type': 'array',
@@ -181,46 +197,46 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp servers. If both pools and servers are
- empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided with
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
},
'ntp_client': {
'type': 'string',
'default': 'auto',
'description': dedent("""\
Name of an NTP client to use to configure system NTP.
- When unprovided or 'auto' the default client preferred
- by the distribution will be used. The following
- built-in client names can be used to override existing
- configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ When unprovided or 'auto' the default client preferred
+ by the distribution will be used. The following
+ built-in client names can be used to override existing
+ configuration defaults: chrony, ntp, ntpdate,
+ systemd-timesyncd."""),
},
'enabled': {
'type': 'boolean',
'default': True,
'description': dedent("""\
Attempt to enable ntp clients if set to True. If set
- to False, ntp client will not be configured or
- installed"""),
+ to False, ntp client will not be configured or
+ installed"""),
},
'config': {
'description': dedent("""\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
+ ``ntp_client`` specified."""),
'type': ['object'],
'properties': {
'confpath': {
'type': 'string',
'description': dedent("""\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""),
},
'check_exe': {
'type': 'string',
'description': dedent("""\
The executable name for the ``ntp_client``.
- For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ For example, ntp service ``check_exe`` is
+ 'ntpd' because it runs the ntpd binary."""),
},
'packages': {
'type': 'array',
@@ -230,22 +246,22 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""),
},
'service_name': {
'type': 'string',
'description': dedent("""\
The systemd or sysvinit service name used to
- start and stop the ``ntp_client``
- service."""),
+ start and stop the ``ntp_client``
+ service."""),
},
'template': {
'type': 'string',
'description': dedent("""\
Inline template allowing users to define their
- own ``ntp_client`` configuration template.
- The value must start with '## template:jinja'
- to enable use of templating support.
+ own ``ntp_client`` configuration template.
+ The value must start with '## template:jinja'
+ to enable use of templating support.
"""),
},
},
@@ -307,7 +323,7 @@ def select_ntp_client(ntp_client, distro):
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if util.which(cfg.get('check_exe')):
+ if subp.which(cfg.get('check_exe')):
LOG.debug('Selected NTP client "%s", already installed',
client)
clientcfg = cfg
@@ -336,7 +352,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
@param check_exe: string. The name of a binary that indicates the package
the specified package is already installed.
"""
- if util.which(check_exe):
+ if subp.which(check_exe):
return
if packages is None:
packages = ['ntp']
@@ -363,21 +379,30 @@ def generate_server_names(distro):
"""
names = []
pool_distro = distro
- # For legal reasons x.pool.sles.ntp.org does not exist,
- # use the opensuse pool
+
if distro == 'sles':
+ # For legal reasons x.pool.sles.ntp.org does not exist,
+ # use the opensuse pool
pool_distro = 'opensuse'
+ elif distro == 'alpine':
+ # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
+ # so use general x.pool.ntp.org instead.
+ pool_distro = ''
+
for x in range(0, NR_POOL_SERVERS):
- name = "%d.%s.pool.ntp.org" % (x, pool_distro)
- names.append(name)
+ names.append(".".join(
+ [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n]))
+
return names
-def write_ntp_config_template(distro_name, servers=None, pools=None,
- path=None, template_fn=None, template=None):
+def write_ntp_config_template(distro_name, service_name=None, servers=None,
+ pools=None, path=None, template_fn=None,
+ template=None):
"""Render a ntp client configuration for the specified client.
@param distro_name: string. The distro class name.
+ @param service_name: string. The name of the NTP client service.
@param servers: A list of strings specifying ntp servers. Defaults to empty
list.
@param pools: A list of strings specifying ntp pools. Defaults to empty
@@ -396,7 +421,14 @@ def write_ntp_config_template(distro_name, servers=None, pools=None,
if not pools:
pools = []
- if len(servers) == 0 and len(pools) == 0:
+ if (len(servers) == 0 and distro_name == 'alpine' and
+ service_name == 'ntpd'):
+ # Alpine's Busybox ntpd only understands "servers" configuration
+ # and not "pool" configuration.
+ servers = generate_server_names(distro_name)
+ LOG.debug(
+ 'Adding distro default ntp servers: %s', ','.join(servers))
+ elif len(servers) == 0 and len(pools) == 0:
pools = generate_server_names(distro_name)
LOG.debug(
'Adding distro default ntp pool servers: %s', ','.join(pools))
@@ -431,7 +463,7 @@ def reload_ntp(service, systemd=False):
cmd = ['systemctl', 'reload-or-restart', service]
else:
cmd = ['service', service, 'restart']
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def supplemental_schema_validation(ntp_config):
@@ -531,6 +563,8 @@ def handle(name, cfg, cloud, log, _args):
raise RuntimeError(msg)
write_ntp_config_template(cloud.distro.name,
+ service_name=ntp_client_config.get(
+ 'service_name'),
servers=ntp_cfg.get('servers', []),
pools=ntp_cfg.get('pools', []),
path=ntp_client_config.get('confpath'),
@@ -543,7 +577,7 @@ def handle(name, cfg, cloud, log, _args):
try:
reload_ntp(ntp_client_config['service_name'],
systemd=cloud.distro.uses_systemd())
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 86afffef..036baf85 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,6 +43,7 @@ import os
import time
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
REBOOT_FILE = "/var/run/reboot-required"
@@ -57,7 +58,7 @@ def _multi_cfg_bool_get(cfg, *keys):
def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
+ subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
for _i in range(0, wait_attempts):
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index b8e27090..733c3910 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -19,6 +19,7 @@ keys to post. Available keys are:
- ``pub_key_dsa``
- ``pub_key_rsa``
- ``pub_key_ecdsa``
+ - ``pub_key_ed25519``
- ``instance_id``
- ``hostname``
- ``fdqn``
@@ -52,6 +53,7 @@ POST_LIST_ALL = [
'pub_key_dsa',
'pub_key_rsa',
'pub_key_ecdsa',
+ 'pub_key_ed25519',
'instance_id',
'hostname',
'fqdn'
@@ -105,6 +107,7 @@ def handle(name, cfg, cloud, log, args):
'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
+ 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
}
for (n, path) in pubkeys.items():
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 3e81a3c7..6fcb8a7d 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,9 +22,8 @@ The ``delay`` key specifies a duration to be added onto any shutdown command
used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
the maximum amount of time between cloud-init starting and the system shutting
down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
-key must have an argument in a form that the ``shutdown`` utility recognizes.
-The most common format is the form ``+5`` for 5 minutes. See ``man shutdown``
-for more options.
+key must have an argument in either the form ``+5`` for 5 minutes or ``now``
+for immediate shutdown.
Optionally, a command can be run to determine whether or not
the system should shut down. The command to be run should be specified in the
@@ -33,6 +32,10 @@ the system should shut down. The command to be run should be specified in the
``condition`` key is omitted or the command specified by the ``condition``
key returns 0.
+.. note::
+ With Alpine Linux any message value specified is ignored as Alpine's halt,
+ poweroff, and reboot commands do not support broadcasting a message.
+
**Internal name:** ``cc_power_state_change``
**Module frequency:** per instance
@@ -56,6 +59,7 @@ import subprocess
import time
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -71,7 +75,7 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
@@ -111,9 +115,9 @@ def check_condition(cond, log=None):
return False
-def handle(_name, cfg, _cloud, log, _args):
+def handle(_name, cfg, cloud, log, _args):
try:
- (args, timeout, condition) = load_power_state(cfg)
+ (args, timeout, condition) = load_power_state(cfg, cloud.distro.name)
if args is None:
log.debug("no power_state provided. doing nothing")
return
@@ -140,7 +144,19 @@ def handle(_name, cfg, _cloud, log, _args):
condition, execmd, [args, devnull_fp])
-def load_power_state(cfg):
+def convert_delay(delay, fmt=None, scale=None):
+ if not fmt:
+ fmt = "+%s"
+ if not scale:
+ scale = 1
+
+ if delay != "now":
+ delay = fmt % int(int(delay) * int(scale))
+
+ return delay
+
+
+def load_power_state(cfg, distro_name):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
pstate = cfg.get('power_state')
@@ -160,26 +176,42 @@ def load_power_state(cfg):
(','.join(opt_map.keys()), mode))
delay = pstate.get("delay", "now")
- # convert integer 30 or string '30' to '+30'
- try:
- delay = "+%s" % int(delay)
- except ValueError:
- pass
+ message = pstate.get("message")
+ scale = 1
+ fmt = "+%s"
+ command = ["shutdown", opt_map[mode]]
+
+ if distro_name == 'alpine':
+ # Convert integer 30 or string '30' to '1800' (seconds) as Alpine's
+ # halt/poweroff/reboot commands take seconds rather than minutes.
+ scale = 60
+ # No "+" in front of delay value as not supported by Alpine's commands.
+ fmt = "%s"
+ if delay == "now":
+ # Alpine's commands do not understand "now".
+ delay = "0"
+ command = [mode, "-d"]
+ # Alpine's commands don't support a message.
+ message = None
- if delay != "now" and not re.match(r"\+[0-9]+", delay):
+ try:
+ delay = convert_delay(delay, fmt=fmt, scale=scale)
+ except ValueError as e:
raise TypeError(
"power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay)
+ " found '%s'." % delay
+ ) from e
- args = ["shutdown", opt_map[mode], delay]
- if pstate.get("message"):
- args.append(pstate.get("message"))
+ args = command + [delay]
+ if message:
+ args.append(message)
try:
timeout = float(pstate.get('timeout', 30.0))
- except ValueError:
- raise ValueError("failed to convert timeout '%s' to float." %
- pstate['timeout'])
+ except ValueError as e:
+ raise ValueError(
+ "failed to convert timeout '%s' to float." % pstate['timeout']
+ ) from e
condition = pstate.get("condition", True)
if not isinstance(condition, (str, list, bool)):
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index c01f5b8f..bc981cf4 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -83,6 +83,7 @@ import yaml
from io import StringIO
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
@@ -105,14 +106,14 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
+ subp.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
+ subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warning(("Sorry we do not know how to enable"
" puppet services on this system"))
@@ -159,9 +160,9 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
- puppet_config.readfp( # pylint: disable=W1505
+ puppet_config.read_file(
StringIO(cleaned_contents),
- filename=p_constants.conf_path)
+ source=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
@@ -203,6 +204,6 @@ def handle(name, cfg, cloud, log, _args):
_autostart_puppet(log)
# Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
+ subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 01dfc125..978d2ee0 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -19,6 +19,7 @@ from textwrap import dedent
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
NOBLOCK = "noblock"
@@ -88,11 +89,11 @@ def _resize_zfs(mount_point, devpth):
def _get_dumpfs_output(mount_point):
- return util.subp(['dumpfs', '-m', mount_point])[0]
+ return subp.subp(['dumpfs', '-m', mount_point])[0]
def _get_gpart_output(part):
- return util.subp(['gpart', 'show', part])[0]
+ return subp.subp(['gpart', 'show', part])[0]
def _can_skip_resize_ufs(mount_point, devpth):
@@ -117,14 +118,12 @@ def _can_skip_resize_ufs(mount_point, devpth):
if o == "-f":
frag_sz = int(a)
# check the current partition size
- """
- # gpart show /dev/da0
-=> 40 62914480 da0 GPT (30G)
- 40 1024 1 freebsd-boot (512K)
- 1064 58719232 2 freebsd-ufs (28G)
- 58720296 3145728 3 freebsd-swap (1.5G)
- 61866024 1048496 - free - (512M)
- """
+ # Example output from `gpart show /dev/da0`:
+ # => 40 62914480 da0 GPT (30G)
+ # 40 1024 1 freebsd-boot (512K)
+ # 1064 58719232 2 freebsd-ufs (28G)
+ # 58720296 3145728 3 freebsd-swap (1.5G)
+ # 61866024 1048496 - free - (512M)
expect_sz = None
m = re.search('^(/dev/.+)p([0-9])$', devpth)
gpart_res = _get_gpart_output(m.group(1))
@@ -306,8 +305,8 @@ def handle(name, cfg, _cloud, log, args):
def do_resize(resize_cmd, log):
try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
+ subp.subp(resize_cmd)
+ except subp.ProcessExecutionError:
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
# TODO(harlowja): Should we add a fsck check after this to make
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 69f4768a..519e66eb 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** fedora, rhel, sles
+**Supported distros:** alpine, fedora, rhel, sles
**Config keys**::
@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['fedora', 'opensuse', 'rhel', 'sles']
+distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28c79b83..28d62e9d 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -39,6 +39,7 @@ Subscription`` example config.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -173,7 +174,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
return False
return True
@@ -200,7 +201,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -223,7 +224,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -246,7 +247,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
if line != '':
@@ -264,7 +265,7 @@ class SubscriptionManager(object):
cmd = ['attach', '--auto']
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
@@ -341,7 +342,7 @@ class SubscriptionManager(object):
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to attach pool {0} "
"due to {1}".format(pool, e))
return False
@@ -414,7 +415,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -432,11 +433,11 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
'''
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
'''
- return util.subp(['subscription-manager'] + cmd,
+ return subp.subp(['subscription-manager'] + cmd,
logstring=logstring_val)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5df0137d..2a2bc931 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -182,6 +182,7 @@ import os
import re
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
@@ -215,7 +216,7 @@ def reload_syslog(command=DEF_RELOAD, systemd=False):
cmd = ['service', service, 'restart']
else:
cmd = command
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def load_config(cfg):
@@ -346,8 +347,10 @@ class SyslogRemotesLine(object):
if self.port:
try:
int(self.port)
- except ValueError:
- raise ValueError("port '%s' is not an integer" % self.port)
+ except ValueError as e:
+ raise ValueError(
+ "port '%s' is not an integer" % self.port
+ ) from e
if not self.addr:
raise ValueError("address is required")
@@ -429,7 +432,7 @@ def handle(name, cfg, cloud, log, _args):
restarted = reload_syslog(
command=mycfg[KEYNAME_RELOAD],
systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 5dd8de37..b61876aa 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -45,7 +45,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
-from cloudinit import safeyaml, util
+from cloudinit import safeyaml, subp, util
from cloudinit.distros import rhel_util
@@ -130,6 +130,6 @@ def handle(name, cfg, cloud, log, _args):
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- util.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(['service', const.srv_name, 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 588e1b03..1e3f419e 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -24,7 +24,7 @@ module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ALWAYS
@@ -38,7 +38,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 75549b52..5966fb9a 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -27,7 +27,7 @@ the system. As a result per-instance scripts will run again.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -41,7 +41,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 259bdfab..bcca859e 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -25,7 +25,7 @@ be run in alphabetical order. This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ONCE
@@ -39,7 +39,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index d940dbd6..215703ef 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -27,7 +27,7 @@ This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -42,7 +42,7 @@ def handle(name, _cfg, cloud, log, _args):
# go here...
runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index faac9242..e0a4bfff 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,6 +28,7 @@ entry under the ``vendor_data`` config key.
import os
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -46,7 +47,7 @@ def handle(name, cfg, cloud, log, _args):
prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
try:
- util.runparts(runparts_path, exe_prefix=prefix)
+ subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index b65f3ed9..4fb9b44e 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -65,6 +65,7 @@ from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -92,14 +93,14 @@ def handle_random_seed_command(command, required, env=None):
return
cmd = command[0]
- if not util.which(cmd):
+ if not subp.which(cmd):
if required:
raise ValueError(
"command '{cmd}' not found but required=true".format(cmd=cmd))
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
- util.subp(command, env=env, capture=False)
+ subp.subp(command, env=env, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 10d6d197..1d23d80d 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -55,7 +55,6 @@ class SetHostnameError(Exception):
This may happen if we attempt to set the hostname early in cloud-init's
init-local timeframe as certain services may not be running yet.
"""
- pass
def handle(name, cfg, cloud, log, _args):
@@ -86,7 +85,7 @@ def handle(name, cfg, cloud, log, _args):
except Exception as e:
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
util.logexc(log, msg)
- raise SetHostnameError("%s: %s" % (msg, e))
+ raise SetHostnameError("%s: %s" % (msg, e)) from e
write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4943d545..d6b5682d 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -83,6 +83,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import log as logging
from cloudinit.ssh_util import update_ssh_config
+from cloudinit import subp
from cloudinit import util
from string import ascii_letters, digits
@@ -128,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
cmd = list(service_cmd) + ["restart", service_name]
else:
cmd = list(service_cmd) + [service_name, "restart"]
- util.subp(cmd)
+ subp.subp(cmd)
LOG.debug("Restarted the SSH daemon.")
@@ -241,12 +242,12 @@ def rand_user_password(pwlen=20):
def chpasswd(distro, plist_in, hashed=False):
- if util.is_FreeBSD():
+ if util.is_BSD():
for pentry in plist_in.splitlines():
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
else:
cmd = ['chpasswd'] + (['-e'] if hashed else [])
- util.subp(cmd, plist_in)
+ subp.subp(cmd, plist_in)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 90724b81..20ed7d2f 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -12,6 +12,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
+from cloudinit import subp
from cloudinit import util
@@ -61,9 +62,9 @@ schema = {
snap:
assertions:
00: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
02: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
commands:
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
@@ -85,6 +86,21 @@ schema = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of commands
+ snap:
+ commands:
+ - ['install', 'vlc']
+ - ['snap', 'install', 'vlc']
+ - snap install vlc
+ - 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of assertions
+ snap:
+ assertions:
+ - signed_assertion_blob_here
+ - |
+ signed_assertion_blob_here
""")],
'frequency': PER_INSTANCE,
'type': 'object',
@@ -98,7 +114,8 @@ schema = {
'additionalItems': False, # Reject items non-string
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
+ 'uniqueItems': True,
+ 'additionalProperties': {'type': 'string'},
},
'commands': {
'type': ['object', 'array'], # Array of strings or dict
@@ -110,6 +127,12 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
+ 'additionalProperties': {
+ 'oneOf': [
+ {'type': 'string'},
+ {'type': 'array', 'items': {'type': 'string'}},
+ ],
+ },
},
'squashfuse_in_container': {
'type': 'boolean'
@@ -122,10 +145,6 @@ schema = {
}
}
-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
-# Once python-jsonschema supports schema draft 6 add support for arbitrary
-# object keys with 'patternProperties' constraint to validate string values.
-
__doc__ = get_schema_doc(schema) # Supplement python help()
SNAP_CMD = "snap"
@@ -157,7 +176,7 @@ def add_assertions(assertions):
LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
+ subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
@@ -186,8 +205,8 @@ def run_commands(commands):
for command in fixed_snap_commands:
shell = isinstance(command, str)
try:
- util.subp(command, shell=shell, status_cb=sys.stderr.write)
- except util.ProcessExecutionError as e:
+ subp.subp(command, shell=shell, status_cb=sys.stderr.write)
+ except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
msg = 'Failures running snap commands:\n{cmd_failures}'.format(
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 1020e944..95083607 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -27,7 +27,7 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
activation_key: <key>
"""
-from cloudinit import util
+from cloudinit import subp
distros = ['redhat', 'fedora']
@@ -41,9 +41,9 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
already_registered = True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise
return already_registered
@@ -65,7 +65,7 @@ def do_register(server, profile_name,
cmd.extend(['--sslCACert', str(ca_cert_path)])
if activation_key:
cmd.extend(['--activationkey', str(activation_key)])
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 163cce99..9b2a333a 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -35,6 +35,42 @@ root login is disabled, and root login opts are set to::
no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+Supported public key types for the ``ssh_authorized_keys`` are:
+
+ - dsa
+ - rsa
+ - ecdsa
+ - ed25519
+ - ecdsa-sha2-nistp256-cert-v01@openssh.com
+ - ecdsa-sha2-nistp256
+ - ecdsa-sha2-nistp384-cert-v01@openssh.com
+ - ecdsa-sha2-nistp384
+ - ecdsa-sha2-nistp521-cert-v01@openssh.com
+ - ecdsa-sha2-nistp521
+ - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com
+ - sk-ecdsa-sha2-nistp256@openssh.com
+ - sk-ssh-ed25519-cert-v01@openssh.com
+ - sk-ssh-ed25519@openssh.com
+ - ssh-dss-cert-v01@openssh.com
+ - ssh-dss
+ - ssh-ed25519-cert-v01@openssh.com
+ - ssh-ed25519
+ - ssh-rsa-cert-v01@openssh.com
+ - ssh-rsa
+ - ssh-xmss-cert-v01@openssh.com
+ - ssh-xmss@openssh.com
+
+.. note::
+ this list has been filtered out from the supported keytypes of
+ `OpenSSH`_ source, where the sigonly keys are removed. Please see
+ ``ssh_util`` for more information.
+
+ ``dsa``, ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy,
+ as they are valid public keys in some old distros. They can possibly
+ be removed in the future when support for the older distros are dropped
+
+.. _OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshkey.c
+
Host Keys
^^^^^^^^^
@@ -116,6 +152,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import ssh_util
+from cloudinit import subp
from cloudinit import util
@@ -164,7 +201,7 @@ def handle(_name, cfg, cloud, log, _args):
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
except Exception:
util.logexc(log, "Failed generated a key for %s from %s",
@@ -186,9 +223,9 @@ def handle(_name, cfg, cloud, log, _args):
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
+ out, err = subp.subp(cmd, capture=True, env=lang_c)
sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
if (e.exit_code == 1 and
err.lower().startswith("unknown key")):
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 7ac1c8cf..05d30ad1 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -13,7 +13,7 @@ Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
the keys can be specified, but defaults to ``sha256``.
-**Internal name:** `` cc_ssh_authkey_fingerprints``
+**Internal name:** ``cc_ssh_authkey_fingerprints``
**Module frequency:** per instance
@@ -59,8 +59,8 @@ def _gen_fingerprint(b64_text, hash_meth='sha256'):
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ if (entry.keytype and entry.keytype.lower().strip()
+ in ssh_util.VALID_KEY_TYPES):
return True
return False
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 63f87298..856e5a9e 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -31,6 +31,7 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
import pwd
@@ -101,8 +102,8 @@ def import_ssh_ids(ids, user, log):
log.debug("Importing SSH ids for user %s.", user)
try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
+ subp.subp(cmd, capture=False)
+ except subp.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 8b6d2a1a..d61dc655 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -8,6 +8,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
@@ -109,18 +110,18 @@ def configure_ua(token=None, enable=None):
attach_cmd = ['ua', 'attach', token]
LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
try:
- util.subp(attach_cmd)
- except util.ProcessExecutionError as e:
+ subp.subp(attach_cmd)
+ except subp.ProcessExecutionError as e:
msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
error=str(e))
util.logexc(LOG, msg)
- raise RuntimeError(msg)
+ raise RuntimeError(msg) from e
enable_errors = []
for service in enable:
try:
cmd = ['ua', 'enable', service]
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
@@ -135,7 +136,7 @@ def configure_ua(token=None, enable=None):
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if util.which('ua'):
+ if subp.which('ua'):
return
try:
cloud.distro.update_package_sources()
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 297451d6..2d1d2b32 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -9,6 +9,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import temp_utils
from cloudinit import type_utils
from cloudinit import util
@@ -108,7 +109,7 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not util.which('ubuntu-drivers'):
+ if not subp.which('ubuntu-drivers'):
LOG.debug("'ubuntu-drivers' command not available. "
"Installing ubuntu-drivers-common")
pkg_install_func(['ubuntu-drivers-common'])
@@ -131,7 +132,7 @@ def install_drivers(cfg, pkg_install_func):
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
mode=0o755)
- util.subp([debconf_script, debconf_file])
+ subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
LOG, "Failed to register NVIDIA debconf template: %s", str(e))
@@ -141,8 +142,8 @@ def install_drivers(cfg, pkg_install_func):
util.del_dir(tdir)
try:
- util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
- except util.ProcessExecutionError as exc:
+ subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
LOG.warning('the available version of ubuntu-drivers is'
' too old to perform requested driver installation')
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 13764e60..426498a3 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -78,6 +78,13 @@ config keys for an entry in ``users`` are as follows:
If specifying a sudo rule for a user, ensure that the syntax for the rule
is valid, as it is not checked by cloud-init.
+.. note::
+ Most of these configuration options will not be honored if the user
+ already exists. Following options are the exceptions and they are
+ applicable on already-existing users:
+ - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo',
+ 'ssh_authorized_keys', 'ssh_redirect_user'.
+
**Internal name:** ``cc_users_groups``
**Module frequency:** per instance
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index bd87e9e5..8601e707 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -4,60 +4,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Write Files
------------
-**Summary:** write arbitrary files
-
-Write out arbitrary content to files, optionally setting permissions. Content
-can be specified in plain text or binary. Data encoded with either base64 or
-binary gzip data can be specified and will be decoded before being written.
-
-.. note::
- if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standards. to specify binary data, use the yaml
- option ``!!binary``
-
-.. note::
- Do not write files under /tmp during boot because of a race with
- systemd-tmpfiles-clean that can cause temp files to get cleaned during
- the early boot process. Use /run/somedir instead to avoid race LP:1707222.
-
-**Internal name:** ``cc_write_files``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
- - content: |
- # My new /etc/sysconfig/samba file
-
- SMDBOPTIONS="-D"
- path: /etc/sysconfig/samba
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAA
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAAB
- ...
- path: /bin/arch
- permissions: '0555'
- - content: |
- 15 * * * * root ship_logs
- path: /etc/crontab
- append: true
-"""
+"""Write Files: write arbitrary files"""
import base64
import os
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
@@ -71,6 +25,142 @@ UNKNOWN_ENC = 'text/plain'
LOG = logging.getLogger(__name__)
+distros = ['all']
+
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+supported_encoding_types = [
+ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
+ 'base64']
+
+schema = {
+ 'id': 'cc_write_files',
+ 'name': 'Write Files',
+ 'title': 'write arbitrary files',
+ 'description': dedent("""\
+ Write out arbitrary content to files, optionally setting permissions.
+ Parent folders in the path are created if absent.
+ Content can be specified in plain text or binary. Data encoded with
+ either base64 or binary gzip data can be specified and will be decoded
+ before being written. For empty file creation, content can be omitted.
+
+ .. note::
+ if multiline data is provided, care should be taken to ensure that it
+ follows yaml formatting standards. to specify binary data, use the yaml
+ option ``!!binary``
+
+ .. note::
+ Do not write files under /tmp during boot because of a race with
+ systemd-tmpfiles-clean that can cause temp files to get cleaned during
+ the early boot process. Use /run/somedir instead to avoid race
+ LP:1707222."""),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Write out base64 encoded content to /etc/sysconfig/selinux
+ write_files:
+ - encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+ """),
+ dedent("""\
+ # Appending content to an existing file
+ write_files:
+ - content: |
+ 15 * * * * root ship_logs
+ path: /etc/crontab
+ append: true
+ """),
+ dedent("""\
+ # Provide gziped binary content
+ write_files:
+ - encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
+ """),
+ dedent("""\
+ # Create an empty file on the system
+ write_files:
+ - path: /root/CLOUD_INIT_WAS_HERE
+ """)],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'write_files': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'path': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Path of the file to which ``content`` is decoded
+ and written
+ """),
+ },
+ 'content': {
+ 'type': 'string',
+ 'default': '',
+ 'description': dedent("""\
+ Optional content to write to the provided ``path``.
+ When content is present and encoding is not '%s',
+ decode the content prior to writing. Default:
+ **''**
+ """ % UNKNOWN_ENC),
+ },
+ 'owner': {
+ 'type': 'string',
+ 'default': DEFAULT_OWNER,
+ 'description': dedent("""\
+ Optional owner:group to chown on the file. Default:
+ **{owner}**
+ """.format(owner=DEFAULT_OWNER)),
+ },
+ 'permissions': {
+ 'type': 'string',
+ 'default': oct(DEFAULT_PERMS).replace('o', ''),
+ 'description': dedent("""\
+ Optional file permissions to set on ``path``
+ represented as an octal string '0###'. Default:
+ **'{perms}'**
+ """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ },
+ 'encoding': {
+ 'type': 'string',
+ 'default': UNKNOWN_ENC,
+ 'enum': supported_encoding_types,
+ 'description': dedent("""\
+ Optional encoding type of the content. Default is
+ **text/plain** and no content decoding is
+ performed. Supported encoding types are:
+ %s.""" % ", ".join(supported_encoding_types)),
+ },
+ 'append': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to append ``content`` to existing file if
+ ``path`` exists. Default: **false**.
+ """),
+ },
+ },
+ 'required': ['path'],
+ 'additionalProperties': False
+ },
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, _cloud, log, _args):
files = cfg.get('write_files')
@@ -78,6 +168,7 @@ def handle(name, cfg, _cloud, log, _args):
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
+ validate_cloudconfig_schema(cfg, schema)
write_files(name, files)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3673166a..01fe683c 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,7 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** fedora, rhel
+**Supported distros:** centos, fedora, rhel
**Config keys**::
@@ -36,7 +36,7 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['fedora', 'rhel']
+distros = ['centos', 'fedora', 'rhel']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 807c3eee..8a966aee 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,8 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from __future__ import print_function
-
from cloudinit import importer
from cloudinit.util import find_modules, load_file
@@ -36,6 +34,8 @@ SCHEMA_DOC_TMPL = """
{examples}
"""
SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_LIST_ITEM_TMPL = (
+ '{prefix}Each item in **{prop_name}** list supports the following keys:')
SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
@@ -58,6 +58,19 @@ class SchemaValidationError(ValueError):
super(SchemaValidationError, self).__init__(message)
+def is_schema_byte_string(checker, instance):
+ """TYPE_CHECKER override allowing bytes for string type
+
+ For jsonschema v. 3.0.0+
+ """
+ try:
+ from jsonschema import Draft4Validator
+ except ImportError:
+ return False
+ return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
+ isinstance(instance, (bytes,)))
+
+
def validate_cloudconfig_schema(config, schema, strict=False):
"""Validate provided config meets the schema definition.
@@ -73,11 +86,31 @@ def validate_cloudconfig_schema(config, schema, strict=False):
"""
try:
from jsonschema import Draft4Validator, FormatChecker
+ from jsonschema.validators import create, extend
except ImportError:
logging.debug(
'Ignoring schema validation. python-jsonschema is not present')
return
- validator = Draft4Validator(schema, format_checker=FormatChecker())
+
+ # Allow for bytes to be presented as an acceptable valid value for string
+ # type jsonschema attributes in cloud-init's schema.
+ # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
+ if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ 'string', is_schema_byte_string)
+ cloudinitValidator = extend(Draft4Validator, type_checker=type_checker)
+ else: # jsonschema 2.6 workaround
+ types = Draft4Validator.DEFAULT_TYPES
+ # Allow bytes as well as string (and disable a spurious
+ # unsupported-assignment-operation pylint warning which appears because
+ # this code path isn't written against the latest jsonschema).
+ types['string'] = (str, bytes) # pylint: disable=E1137
+ cloudinitValidator = create(
+ meta_schema=Draft4Validator.META_SCHEMA,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ default_types=types)
+ validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
path = '.'.join([str(p) for p in error.path])
@@ -106,7 +139,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
schemapaths = _schemapath_for_cloudconfig(
cloudconfig, original_content)
errors_by_line = defaultdict(list)
- error_count = 1
error_footer = []
annotated_content = []
for path, msg in schema_errors:
@@ -120,18 +152,17 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
if col is not None:
msg = 'Line {line} column {col}: {msg}'.format(
line=line, col=col, msg=msg)
- error_footer.append('# E{0}: {1}'.format(error_count, msg))
- error_count += 1
lines = original_content.decode().split('\n')
- error_count = 1
- for line_number, line in enumerate(lines):
- errors = errors_by_line[line_number + 1]
+ error_index = 1
+ for line_number, line in enumerate(lines, 1):
+ errors = errors_by_line[line_number]
if errors:
- error_label = ','.join(
- ['E{0}'.format(count + error_count)
- for count in range(0, len(errors))])
- error_count += len(errors)
- annotated_content.append(line + '\t\t# ' + error_label)
+ error_label = []
+ for error in errors:
+ error_label.append('E{0}'.format(error_index))
+ error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_index += 1
+ annotated_content.append(line + '\t\t# ' + ','.join(error_label))
else:
annotated_content.append(line)
annotated_content.append(
@@ -179,7 +210,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
- raise error
+ raise error from e
try:
validate_cloudconfig_schema(
cloudconfig, schema, strict=True)
@@ -213,20 +244,34 @@ def _schemapath_for_cloudconfig(config, original_content):
previous_depth = -1
path_prefix = ''
if line.startswith('- '):
+ # Process list items adding a list_index to the path prefix
+ previous_list_idx = '.%d' % (list_index - 1)
+ if path_prefix and path_prefix.endswith(previous_list_idx):
+ path_prefix = path_prefix[:-len(previous_list_idx)]
key = str(list_index)
- value = line[1:]
+ schema_line_numbers[key] = line_number
+ item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
+ item_indent += 1 # For the leading '-' character
+ previous_depth = indent_depth
+ indent_depth += item_indent
+ line = line[item_indent:] # Strip leading list item + whitespace
list_index += 1
else:
+ # Process non-list lines setting value if present
list_index = 0
key, value = line.split(':', 1)
+ if path_prefix:
+ # Append any existing path_prefix for a fully-pathed key
+ key = path_prefix + '.' + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
+ if list_index > 0 and indent_depth == previous_depth:
+ path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ break
else:
previous_depth = -1
path_prefix = ''
- if path_prefix:
- key = path_prefix + '.' + key
scopes.append((indent_depth, key))
if value:
value = value.strip()
@@ -259,6 +304,28 @@ def _get_property_type(property_dict):
return property_type
+def _parse_description(description, prefix):
+ """Parse description from the schema in a format that we can better
+ display in our docs. This parser does three things:
+
+ - Guarantee that a paragraph will be in a single line
+ - Guarantee that each new paragraph will be aligned with
+ the first paragraph
+ - Proper align lists of items
+
+ @param description: The original description in the schema.
+ @param prefix: The number of spaces used to align the current description
+ """
+ list_paragraph = prefix * 3
+ description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(
+ r"\n\n", r"\n\n{}".format(prefix), description)
+ description = re.sub(
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+
+ return description
+
+
def _get_property_doc(schema, prefix=' '):
"""Return restructured text describing the supported schema properties."""
new_prefix = prefix + ' '
@@ -266,11 +333,23 @@ def _get_property_doc(schema, prefix=' '):
for prop_key, prop_config in schema.get('properties', {}).items():
# Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
description = prop_config.get('description', '')
+
properties.append(SCHEMA_PROPERTY_TMPL.format(
prefix=prefix,
prop_name=prop_key,
type=_get_property_type(prop_config),
- description=description.replace('\n', '')))
+ description=_parse_description(description, prefix)))
+ items = prop_config.get('items')
+ if items:
+ if isinstance(items, list):
+ for item in items:
+ properties.append(
+ _get_property_doc(item, prefix=new_prefix))
+ elif isinstance(items, dict) and items.get('properties'):
+ properties.append(SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=prop_key))
+ new_prefix += ' '
+ properties.append(_get_property_doc(items, prefix=new_prefix))
if 'properties' in prop_config:
properties.append(
_get_property_doc(prop_config, prefix=new_prefix))
@@ -346,8 +425,9 @@ def get_parser(parser=None):
description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
- parser.add_argument('-d', '--doc', action="store_true", default=False,
- help='Print schema documentation')
+ parser.add_argument('-d', '--docs', nargs='+',
+ help=('Print schema module docs. Choices: all or'
+ ' space-delimited cc_names.'))
parser.add_argument('--annotate', action="store_true", default=False,
help='Annotate existing cloud-config file with errors')
return parser
@@ -355,9 +435,9 @@ def get_parser(parser=None):
def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
- exclusive_args = [args.config_file, args.doc]
+ exclusive_args = [args.config_file, args.docs]
if not any(exclusive_args) or all(exclusive_args):
- error('Expected either --config-file argument or --doc')
+ error('Expected either --config-file argument or --docs')
full_schema = get_schema()
if args.config_file:
try:
@@ -370,9 +450,16 @@ def handle_schema_args(name, args):
error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
- if args.doc:
+ elif args.docs:
+ schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
+ schema_ids += ['all']
+ invalid_docs = set(args.docs).difference(set(schema_ids))
+ if invalid_docs:
+ error('Invalid --docs value {0}. Must be one of: {1}'.format(
+ list(invalid_docs), ', '.join(schema_ids)))
for subschema in full_schema['allOf']:
- print(get_schema_doc(subschema))
+ if 'all' in args.docs or subschema['id'] in args.docs:
+ print(get_schema_doc(subschema))
def main():
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
index 67646b03..b00f2083 100644
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -15,10 +15,8 @@ DISABLE_CFG = {'disable_ec2_metadata': 'true'}
class TestEC2MetadataRoute(CiTestCase):
- with_logs = True
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ifconfig(self, m_subp, m_which):
"""Set the route if ifconfig command is available"""
m_which.side_effect = lambda x: x if x == 'ifconfig' else None
@@ -27,8 +25,8 @@ class TestEC2MetadataRoute(CiTestCase):
['route', 'add', '-host', '169.254.169.254', 'reject'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ip(self, m_subp, m_which):
"""Set the route if ip command is available"""
m_which.side_effect = lambda x: x if x == 'ip' else None
@@ -37,8 +35,8 @@ class TestEC2MetadataRoute(CiTestCase):
['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_no_tool(self, m_subp, m_which):
"""Log error when neither route nor ip commands are available"""
m_which.return_value = None # Find neither ifconfig nor ip
diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py
new file mode 100644
index 00000000..46ba99b2
--- /dev/null
+++ b/cloudinit/config/tests/test_final_message.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_final_message import handle
+
+
+class TestHandle:
+ # TODO: Expand these tests to cover full functionality; currently they only
+ # cover the logic around how the boot-finished file is written (and not its
+ # contents).
+
+ @pytest.mark.parametrize(
+ "instance_dir_exists,file_is_written,expected_log_substring",
+ [
+ (True, True, None),
+ (False, False, "Failed to write boot finished file "),
+ ],
+ )
+ def test_boot_finished_written(
+ self,
+ instance_dir_exists,
+ file_is_written,
+ expected_log_substring,
+ caplog,
+ tmpdir,
+ ):
+ instance_dir = tmpdir.join("var/lib/cloud/instance")
+ if instance_dir_exists:
+ instance_dir.ensure_dir()
+ boot_finished = instance_dir.join("boot-finished")
+
+ m_cloud = mock.Mock(
+ paths=mock.Mock(boot_finished=boot_finished.strpath)
+ )
+
+ handle(None, {}, m_cloud, logging.getLogger(), [])
+
+ # We should not change the status of the instance directory
+ assert instance_dir_exists == instance_dir.exists()
+ assert file_is_written == boot_finished.exists()
+
+ if expected_log_substring:
+ assert expected_log_substring in caplog.text
diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py
new file mode 100644
index 00000000..99c05bb5
--- /dev/null
+++ b/cloudinit/config/tests/test_grub_dpkg.py
@@ -0,0 +1,176 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import pytest
+
+from unittest import mock
+from logging import Logger
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
+
+
+class TestFetchIdevs:
+ """Tests cc_grub_dpkg.fetch_idevs()"""
+
+ # Note: udevadm info returns devices in a large single line string
+ @pytest.mark.parametrize(
+ "grub_output,path_exists,expected_log_call,udevadm_output"
+ ",expected_idevs",
+ [
+ # Inside a container, grub not installed
+ (
+ ProcessExecutionError(reason=FileNotFoundError()),
+ False,
+ mock.call("'grub-probe' not found in $PATH"),
+ '',
+ '',
+ ),
+ # Inside a container, grub installed
+ (
+ ProcessExecutionError(stderr="failed to get canonical path"),
+ False,
+ mock.call("grub-probe 'failed to get canonical path'"),
+ '',
+ '',
+ ),
+ # KVM Instance
+ (
+ ['/dev/vda'],
+ True,
+ None,
+ (
+ '/dev/disk/by-path/pci-0000:00:00.0 ',
+ '/dev/disk/by-path/virtio-pci-0000:00:00.0 '
+ ),
+ '/dev/vda',
+ ),
+ # Xen Instance
+ (
+ ['/dev/xvda'],
+ True,
+ None,
+ '',
+ '/dev/xvda',
+ ),
+ # NVMe Hardware Instance
+ (
+ ['/dev/nvme1n1'],
+ True,
+ None,
+ (
+ '/dev/disk/by-id/nvme-Company_hash000 ',
+ '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ',
+ '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 '
+ ),
+ '/dev/disk/by-id/nvme-Company_hash000',
+ ),
+ # SCSI Hardware Instance
+ (
+ ['/dev/sda'],
+ True,
+ None,
+ (
+ '/dev/disk/by-id/company-user-1 ',
+ '/dev/disk/by-id/scsi-0Company_user-1 ',
+ '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 '
+ ),
+ '/dev/disk/by-id/company-user-1',
+ ),
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output,
+ path_exists, expected_log_call, udevadm_output,
+ expected_idevs):
+ """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
+ m_subp.side_effect = [
+ grub_output,
+ ["".join(udevadm_output)]
+ ]
+ m_exists.return_value = path_exists
+ log = mock.Mock(spec=Logger)
+ idevs = fetch_idevs(log)
+ assert expected_idevs == idevs
+ if expected_log_call is not None:
+ assert expected_log_call in log.debug.call_args_list
+
+
+class TestHandle:
+ """Tests cc_grub_dpkg.handle()"""
+
+ @pytest.mark.parametrize(
+ "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
+ [
+ (
+ # No configuration
+ None,
+ None,
+ '/dev/disk/by-id/nvme-Company_hash000',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/disk/by-id/nvme-Company_hash000','false'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty unset
+ '/dev/sda',
+ None,
+ '/dev/sda',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/sda','false'"
+ ),
+ ),
+ (
+ # idevs unset, idevs_empty set
+ None,
+ 'true',
+ '/dev/xvda',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/xvda','true'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ '/dev/vda',
+ 'false',
+ '/dev/disk/by-id/company-user-1',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/vda','false'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ # Respect what the user defines, even if its logically wrong
+ '/dev/nvme0n1',
+ 'true',
+ '',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/nvme0n1','true'"
+ ),
+ )
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs,
+ cfg_idevs, cfg_idevs_empty, fetch_idevs_output,
+ expected_log_output):
+ """Test setting of correct debconf database entries"""
+ m_get_cfg_str.side_effect = [
+ cfg_idevs,
+ cfg_idevs_empty
+ ]
+ m_fetch_idevs.return_value = fetch_idevs_output
+ log = mock.Mock(spec=Logger)
+ handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
+ log.debug.assert_called_with("".join(expected_log_output))
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
new file mode 100644
index 00000000..764a33e3
--- /dev/null
+++ b/cloudinit/config/tests/test_mounts.py
@@ -0,0 +1,28 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_mounts import create_swapfile
+
+
+M_PATH = 'cloudinit.config.cc_mounts.'
+
+
+class TestCreateSwapfile:
+
+ @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
+ @mock.patch(M_PATH + 'util.get_mount_info')
+ @mock.patch(M_PATH + 'subp.subp')
+ def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ # Some of the calls to subp.subp should create the swap file; this
+ # roughly approximates that
+ m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
+
+ m_get_mount_info.return_value = (mock.ANY, fstype)
+
+ create_swapfile(fname, '')
+ assert mock.call(['mkswap', fname]) in m_subp.call_args_list
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
new file mode 100644
index 00000000..6546a0b5
--- /dev/null
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -0,0 +1,86 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_resolv_conf import generate_resolv_conf
+
+
+EXPECTED_HEADER = """\
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#\n\n"""
+
+
+class TestGenerateResolvConf:
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
+ generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_target_fname_is_used_if_passed(self, m_render_to_file):
+ generate_resolv_conf(
+ "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
+ )
+
+ assert [
+ mock.call(mock.ANY, "/use/this/path", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ # Patch in templater so we can assert on the actual generated content
+ @mock.patch("cloudinit.templater.util.write_file")
+ # Parameterise with the value to be passed to generate_resolv_conf as the
+ # params parameter, and the expected line after the header as
+ # expected_extra_line.
+ @pytest.mark.parametrize(
+ "params,expected_extra_line",
+ [
+ # No options
+ ({}, None),
+ # Just a true flag
+ ({"options": {"foo": True}}, "options foo"),
+ # Just a false flag
+ ({"options": {"foo": False}}, None),
+ # Just an option
+ ({"options": {"foo": "some_value"}}, "options foo:some_value"),
+ # A true flag and an option
+ (
+ {"options": {"foo": "some_value", "bar": True}},
+ "options bar foo:some_value",
+ ),
+ # Two options
+ (
+ {"options": {"foo": "some_value", "bar": "other_value"}},
+ "options bar:other_value foo:some_value",
+ ),
+ # Everything
+ (
+ {
+ "options": {
+ "foo": "some_value",
+ "bar": "other_value",
+ "baz": False,
+ "spam": True,
+ }
+ },
+ "options spam bar:other_value foo:some_value",
+ ),
+ ],
+ )
+ def test_flags_and_options(
+ self, m_write_file, params, expected_extra_line
+ ):
+ generate_resolv_conf("templates/resolv.conf.tmpl", params)
+
+ expected_content = EXPECTED_HEADER
+ if expected_extra_line is not None:
+ # If we have any extra lines, expect a trailing newline
+ expected_content += "\n".join([expected_extra_line, ""])
+ assert [
+ mock.call(mock.ANY, expected_content, mode=mock.ANY)
+ ] == m_write_file.call_args_list
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index 8247c388..daa1ef51 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -14,7 +14,7 @@ class TestHandleSshPwauth(CiTestCase):
with_logs = True
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unknown_value_logs_warning(self, m_subp):
setpass.handle_ssh_pwauth("floo")
self.assertIn("Unrecognized value: ssh_pwauth=floo",
@@ -22,7 +22,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.assert_not_called()
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -31,7 +31,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -40,7 +40,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
@@ -48,7 +48,7 @@ class TestHandleSshPwauth(CiTestCase):
self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
"""If 'unchanged', then no updates to config and no restart."""
setpass.handle_ssh_pwauth(
@@ -56,7 +56,7 @@ class TestHandleSshPwauth(CiTestCase):
m_update_ssh_config.assert_not_called()
m_subp.assert_not_called()
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_valid_change_values(self, m_subp):
"""If value is a valid changen value, then update should be called."""
upname = MODPATH + "update_ssh_config"
@@ -88,7 +88,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ssh_pwauth=None\n',
self.logs.getvalue())
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
"""handle parses command password hashes."""
cloud = self.tmp_cloud(distro='ubuntu')
@@ -98,7 +98,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
cfg = {'chpasswd': {'list': valid_hashed_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
@@ -112,12 +112,12 @@ class TestSetPasswordsHandle(CiTestCase):
'\n'.join(valid_hashed_pwds) + '\n')],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
- @mock.patch(MODPATH + "util.subp")
- def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_freebsd):
- """FreeBSD calls custom pw commands instead of chpasswd and passwd"""
- m_is_freebsd.return_value = True
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_bsd):
+ """BSD don't use chpasswd"""
+ m_is_bsd.return_value = True
cloud = self.tmp_cloud(distro='freebsd')
valid_pwds = ['ubuntu:passw0rd']
cfg = {'chpasswd': {'list': valid_pwds}}
@@ -129,18 +129,18 @@ class TestSetPasswordsHandle(CiTestCase):
mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_freebsd):
+ m_is_bsd):
"""handle parses command set random passwords."""
- m_is_freebsd.return_value = False
+ m_is_bsd.return_value = False
cloud = self.tmp_cloud(distro='ubuntu')
valid_random_pwds = [
'root:R',
'ubuntu:RANDOM']
cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index cbbb173d..6d4c014a 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -92,7 +92,7 @@ class TestAddAssertions(CiTestCase):
super(TestAddAssertions, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_on_empty_list(self, m_subp):
"""When provided with an empty list, add_assertions does nothing."""
add_assertions([])
@@ -107,7 +107,7 @@ class TestAddAssertions(CiTestCase):
"assertion parameter was not a list or dict: I'm Not Valid",
str(context_manager.exception))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_list(self, m_subp):
"""When provided with a list, add_assertions adds all assertions."""
self.assertEqual(
@@ -130,7 +130,7 @@ class TestAddAssertions(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_dict(self, m_subp):
"""When provided with a dict, add_assertions adds all assertions."""
self.assertEqual(
@@ -168,7 +168,7 @@ class TestRunCommands(CiTestCase):
super(TestRunCommands, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_run_commands_on_empty_list(self, m_subp):
"""When provided with an empty list, run_commands does nothing."""
run_commands([])
@@ -310,6 +310,52 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
{'snap': {'commands': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_values_are_invalid_type(self, _):
+ """Warnings when snap:commands values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: 123 is not valid under any of the given"
+ " schemas\n"
+ "WARNING: Invalid config:\n"
+ "snap.commands.01: 123 is not valid under any of the given"
+ " schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_list_values_are_invalid_type(self, _):
+ """Warnings when snap:commands list values are wrong type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [["snap", "install", 123]]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_assertions_values_are_invalid_type(self, _):
+ """Warnings when snap:assertions values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.assertions.0: 123 is not of type 'string'\n"
+ "WARNING: Invalid config:\n"
+ "snap.assertions.01: 123 is not of type 'string'\n",
+ self.logs.getvalue())
+
@mock.patch('cloudinit.config.cc_snap.add_assertions')
def test_warn_schema_assertions_is_not_list_or_dict(self, _):
"""Warn when snap:assertions config is not a list or dict."""
@@ -345,7 +391,7 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
def test_duplicates_are_fine_array_array(self):
"""Duplicated commands array/array entries are allowed."""
self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ {'commands': [["echo", "bye"], ["echo", "bye"]]},
"command entries can be duplicate.")
def test_duplicates_are_fine_array_string(self):
@@ -431,7 +477,7 @@ class TestHandle(CiTestCase):
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_handle_adds_assertions(self, m_subp):
"""Any configured snap assertions are provided to add_assertions."""
assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
@@ -447,7 +493,7 @@ class TestHandle(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
@skipUnlessJsonSchema()
def test_handle_validates_schema(self, m_subp):
"""Any provided configuration is runs validate_cloudconfig_schema."""
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index 8c4161ef..db7fb726 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -3,7 +3,7 @@
from cloudinit.config.cc_ubuntu_advantage import (
configure_ua, handle, maybe_install_ua_tools, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
@@ -26,10 +26,10 @@ class TestConfigureUA(CiTestCase):
super(TestConfigureUA, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_error(self, m_subp):
"""Errors from ua attach command are raised."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
'Invalid token SomeToken')
with self.assertRaises(RuntimeError) as context_manager:
configure_ua(token='SomeToken')
@@ -39,7 +39,7 @@ class TestConfigureUA(CiTestCase):
'Stdout: Invalid token SomeToken\nStderr: -',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_token(self, m_subp):
"""When token is provided, attach the machine to ua using the token."""
configure_ua(token='SomeToken')
@@ -48,7 +48,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_on_service_error(self, m_subp):
"""all services should be enabled and then any failures raised"""
@@ -56,7 +56,7 @@ class TestConfigureUA(CiTestCase):
fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
if cmd in fail_cmds and capture:
svc = cmd[-1]
- raise util.ProcessExecutionError(
+ raise subp.ProcessExecutionError(
'Invalid {} credentials'.format(svc.upper()))
m_subp.side_effect = fake_subp
@@ -83,7 +83,7 @@ class TestConfigureUA(CiTestCase):
'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_empty_services(self, m_subp):
"""When services is an empty list, do not auto-enable attach."""
configure_ua(token='SomeToken', enable=[])
@@ -92,7 +92,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_specific_services(self, m_subp):
"""When services a list, only enable specific services."""
configure_ua(token='SomeToken', enable=['fips'])
@@ -105,7 +105,7 @@ class TestConfigureUA(CiTestCase):
self.logs.getvalue())
@mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_string_services(self, m_subp):
"""When services a string, treat as singleton list and warn"""
configure_ua(token='SomeToken', enable='fips')
@@ -119,7 +119,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_weird_services(self, m_subp):
"""When services not string or list, warn but still attach"""
configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
@@ -285,7 +285,7 @@ class TestMaybeInstallUATools(CiTestCase):
super(TestMaybeInstallUATools, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
"""Do nothing if ubuntu-advantage-tools already exists."""
m_which.return_value = '/usr/bin/ua' # already installed
@@ -294,7 +294,7 @@ class TestMaybeInstallUATools(CiTestCase):
'Some apt error')
maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
"""maybe_install_ua_tools logs and raises apt update errors."""
m_which.return_value = None
@@ -306,7 +306,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertEqual('Some apt error', str(context_manager.exception))
self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_raises_install_errors(self, m_which):
"""maybe_install_ua_tools logs and raises package install errors."""
m_which.return_value = None
@@ -320,7 +320,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertIn(
'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_happy_path(self, m_which):
"""maybe_install_ua_tools installs ubuntu-advantage-tools."""
m_which.return_value = None
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
index 46952692..504ba356 100644
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -7,7 +7,7 @@ from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
from cloudinit.config import cc_ubuntu_drivers as drivers
-from cloudinit.util import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError
MPATH = "cloudinit.config.cc_ubuntu_drivers."
M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
@@ -16,6 +16,13 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
"(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+# The tests in this module call helper methods which are decorated with
+# mock.patch. pylint doesn't understand that mock.patch passes parameters to
+# the decorated function, so it incorrectly reports that we aren't passing
+# values for all parameters. Instead of annotating every single call, we
+# disable it for the entire module:
+# pylint: disable=no-value-for-parameter
+
class AnyTempScriptAndDebconfFile(object):
def __init__(self, tmp_dir, debconf_file):
@@ -46,8 +53,8 @@ class TestUbuntuDrivers(CiTestCase):
schema=drivers.schema, strict=True)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_happy_path_taken(
self, config, m_which, m_subp, m_tmp):
"""Positive path test through handle. Package should be installed."""
@@ -73,8 +80,8 @@ class TestUbuntuDrivers(CiTestCase):
self._assert_happy_path_taken(new_config)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_handle_raises_error_if_no_drivers_found(
self, m_which, m_subp, m_tmp):
"""If ubuntu-drivers doesn't install any drivers, raise an error."""
@@ -102,8 +109,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertIn('ubuntu-drivers found no drivers for installation',
self.logs.getvalue())
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_inert_with_config(self, config, m_which, m_subp):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
@@ -147,8 +154,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, m_install_drivers.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=True)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=True)
def test_install_drivers_no_install_if_present(
self, m_which, m_subp, m_tmp):
"""If 'ubuntu-drivers' is present, no package install should occur."""
@@ -174,8 +181,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, pkg_install.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
self, m_which, m_subp, m_tmp):
"""Older ubuntu-drivers versions should emit message and raise error"""
@@ -212,8 +219,8 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
tdir = self.tmp_dir()
debconf_file = os.path.join(tdir, 'nvidia.template')
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index f620b597..df89ddb3 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -39,7 +39,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -65,7 +65,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_fbsd_user.call_args_list,
[mock.call('freebsd', groups='wheel', lock_passwd=True,
shell='/bin/tcsh'),
@@ -86,7 +86,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -107,7 +107,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -146,7 +146,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),