summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
authorzsdc <taras@vyos.io>2022-03-25 20:58:01 +0200
committerzsdc <taras@vyos.io>2022-03-25 21:42:00 +0200
commit31448cccedd8f841fb3ac7d0f2e3cdefe08a53ba (patch)
tree349631a02467dae0158f6f663cc8aa8537974a97 /cloudinit/config
parent5c4b3943343a85fbe517e5ec1fc670b3a8566b4b (diff)
parent8537237d80a48c8f0cbf8e66aa4826bbc882b022 (diff)
downloadvyos-cloud-init-31448cccedd8f841fb3ac7d0f2e3cdefe08a53ba.tar.gz
vyos-cloud-init-31448cccedd8f841fb3ac7d0f2e3cdefe08a53ba.zip
T2117: Cloud-init updated to 22.1
Merged with 22.1 tag from the upstream Cloud-init repository. Our modules were slightly modified for compatibility with the new version.
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/__init__.py20
-rw-r--r--cloudinit/config/cc_apk_configure.py185
-rw-r--r--cloudinit/config/cc_apt_configure.py765
-rw-r--r--cloudinit/config/cc_apt_pipelining.py74
-rw-r--r--cloudinit/config/cc_bootcmd.py69
-rwxr-xr-xcloudinit/config/cc_byobu.py61
-rw-r--r--cloudinit/config/cc_ca_certs.py231
-rw-r--r--cloudinit/config/cc_chef.py549
-rw-r--r--cloudinit/config/cc_debug.py72
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py64
-rw-r--r--cloudinit/config/cc_disk_setup.py502
-rw-r--r--cloudinit/config/cc_emit_upstart.py26
-rw-r--r--cloudinit/config/cc_fan.py68
-rw-r--r--cloudinit/config/cc_final_message.py26
-rw-r--r--cloudinit/config/cc_foo.py1
-rw-r--r--cloudinit/config/cc_growpart.py162
-rw-r--r--cloudinit/config/cc_grub_dpkg.py54
-rw-r--r--cloudinit/config/cc_install_hotplug.py151
-rw-r--r--cloudinit/config/cc_keyboard.py129
-rw-r--r--cloudinit/config/cc_keys_to_console.py59
-rw-r--r--cloudinit/config/cc_landscape.py24
-rw-r--r--cloudinit/config/cc_locale.py64
-rw-r--r--cloudinit/config/cc_lxd.py186
-rw-r--r--cloudinit/config/cc_mcollective.py50
-rw-r--r--cloudinit/config/cc_migrator.py27
-rw-r--r--cloudinit/config/cc_mounts.py185
-rw-r--r--cloudinit/config/cc_ntp.py572
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py30
-rw-r--r--cloudinit/config/cc_phone_home.py111
-rw-r--r--cloudinit/config/cc_power_state_change.py58
-rw-r--r--cloudinit/config/cc_puppet.py274
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py53
-rw-r--r--cloudinit/config/cc_reset_rmc.py43
-rw-r--r--cloudinit/config/cc_resizefs.py198
-rw-r--r--cloudinit/config/cc_resizefs_vyos.py333
-rw-r--r--cloudinit/config/cc_resolv_conf.py61
-rw-r--r--cloudinit/config/cc_rh_subscription.py248
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py33
-rw-r--r--cloudinit/config/cc_rsyslog.py101
-rw-r--r--cloudinit/config/cc_runcmd.py95
-rw-r--r--cloudinit/config/cc_salt_minion.py71
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py16
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py14
-rw-r--r--cloudinit/config/cc_scripts_per_once.py14
-rw-r--r--cloudinit/config/cc_scripts_user.py12
-rw-r--r--cloudinit/config/cc_scripts_vendor.py22
-rw-r--r--cloudinit/config/cc_seed_random.py53
-rw-r--r--cloudinit/config/cc_set_hostname.py40
-rwxr-xr-xcloudinit/config/cc_set_passwords.py85
-rw-r--r--cloudinit/config/cc_snap.py181
-rw-r--r--cloudinit/config/cc_spacewalk.py67
-rwxr-xr-xcloudinit/config/cc_ssh.py133
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py75
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py23
-rw-r--r--cloudinit/config/cc_timezone.py2
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py166
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py145
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py72
-rw-r--r--cloudinit/config/cc_update_hostname.py27
-rw-r--r--cloudinit/config/cc_users_groups.py39
-rw-r--r--cloudinit/config/cc_vyos.py5
-rw-r--r--cloudinit/config/cc_vyos_userdata.py5
-rw-r--r--cloudinit/config/cc_write_files.py273
-rw-r--r--cloudinit/config/cc_write_files_deferred.py56
-rw-r--r--cloudinit/config/cc_yum_add_repo.py69
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py167
-rw-r--r--cloudinit/config/cloud-init-schema.json560
-rw-r--r--cloudinit/config/schema.py658
-rw-r--r--cloudinit/config/tests/test_apt_pipelining.py28
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py48
-rw-r--r--cloudinit/config/tests/test_final_message.py46
-rw-r--r--cloudinit/config/tests/test_grub_dpkg.py176
-rw-r--r--cloudinit/config/tests/test_mounts.py61
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py86
-rw-r--r--cloudinit/config/tests/test_set_passwords.py155
-rw-r--r--cloudinit/config/tests/test_snap.py564
-rw-r--r--cloudinit/config/tests/test_ssh.py405
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py333
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py244
-rw-r--r--cloudinit/config/tests/test_users_groups.py172
80 files changed, 5515 insertions, 5867 deletions
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index 0ef9a748..ed124180 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -6,9 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
-
from cloudinit import log as logging
+from cloudinit.settings import FREQUENCIES, PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -22,26 +21,27 @@ MOD_PREFIX = "cc_"
def form_module_name(name):
canon_name = name.replace("-", "_")
if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0:(len(canon_name) - 3)]
+ canon_name = canon_name[0 : (len(canon_name) - 3)]
canon_name = canon_name.strip()
if not canon_name:
return None
if not canon_name.startswith(MOD_PREFIX):
- canon_name = '%s%s' % (MOD_PREFIX, canon_name)
+ canon_name = "%s%s" % (MOD_PREFIX, canon_name)
return canon_name
def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
+ if not hasattr(mod, "frequency"):
+ setattr(mod, "frequency", def_freq)
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
LOG.warning("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, 'distros'):
- setattr(mod, 'distros', [])
- if not hasattr(mod, 'osfamilies'):
- setattr(mod, 'osfamilies', [])
+ if not hasattr(mod, "distros"):
+ setattr(mod, "distros", [])
+ if not hasattr(mod, "osfamilies"):
+ setattr(mod, "osfamilies", [])
return mod
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
index 84d7a0b6..0952c971 100644
--- a/cloudinit/config/cc_apk_configure.py
+++ b/cloudinit/config/cc_apk_configure.py
@@ -9,11 +9,8 @@
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit import temp_utils
-from cloudinit import templater
-from cloudinit import util
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import temp_utils, templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -55,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\
frequency = PER_INSTANCE
-distros = ['alpine']
-schema = {
- 'id': 'cc_apk_configure',
- 'name': 'APK Configure',
- 'title': 'Configure apk repositories file',
- 'description': dedent("""\
+distros = ["alpine"]
+meta: MetaSchema = {
+ "id": "cc_apk_configure",
+ "name": "APK Configure",
+ "title": "Configure apk repositories file",
+ "description": dedent(
+ """\
This module handles configuration of the /etc/apk/repositories file.
.. note::
To ensure that apk configuration is valid yaml, any strings
containing special characters, especially ``:`` should be quoted.
- """),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Keep the existing /etc/apk/repositories file unaltered.
apk_repos:
preserve_repositories: true
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create repositories file for Alpine v3.12 main and community
# using default mirror site.
apk_repos:
alpine_repo:
community_enabled: true
version: 'v3.12'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create repositories file for Alpine Edge main, community, and
# testing using a specified mirror site and also a local repo.
apk_repos:
@@ -92,93 +96,13 @@ schema = {
testing_enabled: true
version: 'edge'
local_repo_base_url: 'https://my-local-server/local-alpine'
- """),
+ """
+ ),
],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'apk_repos': {
- 'type': 'object',
- 'properties': {
- 'preserve_repositories': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- By default, cloud-init will generate a new repositories
- file ``/etc/apk/repositories`` based on any valid
- configuration settings specified within a apk_repos
- section of cloud config. To disable this behavior and
- preserve the repositories file from the pristine image,
- set ``preserve_repositories`` to ``true``.
-
- The ``preserve_repositories`` option overrides
- all other config keys that would alter
- ``/etc/apk/repositories``.
- """)
- },
- 'alpine_repo': {
- 'type': ['object', 'null'],
- 'properties': {
- 'base_url': {
- 'type': 'string',
- 'default': DEFAULT_MIRROR,
- 'description': dedent("""\
- The base URL of an Alpine repository, or
- mirror, to download official packages from.
- If not specified then it defaults to ``{}``
- """.format(DEFAULT_MIRROR))
- },
- 'community_enabled': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- Whether to add the Community repo to the
- repositories file. By default the Community
- repo is not included.
- """)
- },
- 'testing_enabled': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- Whether to add the Testing repo to the
- repositories file. By default the Testing
- repo is not included. It is only recommended
- to use the Testing repo on a machine running
- the ``Edge`` version of Alpine as packages
- installed from Testing may have dependancies
- that conflict with those in non-Edge Main or
- Community repos."
- """)
- },
- 'version': {
- 'type': 'string',
- 'description': dedent("""\
- The Alpine version to use (e.g. ``v3.12`` or
- ``edge``)
- """)
- },
- },
- 'required': ['version'],
- 'minProperties': 1,
- 'additionalProperties': False,
- },
- 'local_repo_base_url': {
- 'type': 'string',
- 'description': dedent("""\
- The base URL of an Alpine repository containing
- unofficial packages
- """)
- }
- },
- 'required': [],
- 'minProperties': 1, # Either preserve_repositories or alpine_repo
- 'additionalProperties': False,
- }
- }
+ "frequency": frequency,
}
-__doc__ = get_schema_doc(schema)
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
@@ -194,38 +118,42 @@ def handle(name, cfg, cloud, log, _args):
# If there is no "apk_repos" section in the configuration
# then do nothing.
- apk_section = cfg.get('apk_repos')
+ apk_section = cfg.get("apk_repos")
if not apk_section:
- LOG.debug(("Skipping module named %s,"
- " no 'apk_repos' section found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'apk_repos' section found", name
+ )
return
- validate_cloudconfig_schema(cfg, schema)
-
# If "preserve_repositories" is explicitly set to True in
# the configuration do nothing.
- if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
- LOG.debug(("Skipping module named %s,"
- " 'preserve_repositories' is set"), name)
+ if util.get_cfg_option_bool(apk_section, "preserve_repositories", False):
+ LOG.debug(
+ "Skipping module named %s, 'preserve_repositories' is set", name
+ )
return
# If there is no "alpine_repo" subsection of "apk_repos" present in the
# configuration then do nothing, as at least "version" is required to
# create valid repositories entries.
- alpine_repo = apk_section.get('alpine_repo')
+ alpine_repo = apk_section.get("alpine_repo")
if not alpine_repo:
- LOG.debug(("Skipping module named %s,"
- " no 'alpine_repo' configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'alpine_repo' configuration found",
+ name,
+ )
return
# If there is no "version" value present in configuration then do nothing.
- alpine_version = alpine_repo.get('version')
+ alpine_version = alpine_repo.get("version")
if not alpine_version:
- LOG.debug(("Skipping module named %s,"
- " 'version' not specified in alpine_repo"), name)
+ LOG.debug(
+ "Skipping module named %s, 'version' not specified in alpine_repo",
+ name,
+ )
return
- local_repo = apk_section.get('local_repo_base_url', '')
+ local_repo = apk_section.get("local_repo_base_url", "")
_write_repositories_file(alpine_repo, alpine_version, local_repo)
@@ -239,22 +167,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo):
@param local_repo: A string containing the base URL of a local repo.
"""
- repo_file = '/etc/apk/repositories'
+ repo_file = "/etc/apk/repositories"
- alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR)
+ alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR)
- params = {'alpine_baseurl': alpine_baseurl,
- 'alpine_version': alpine_version,
- 'community_enabled': alpine_repo.get('community_enabled'),
- 'testing_enabled': alpine_repo.get('testing_enabled'),
- 'local_repo': local_repo}
+ params = {
+ "alpine_baseurl": alpine_baseurl,
+ "alpine_version": alpine_version,
+ "community_enabled": alpine_repo.get("community_enabled"),
+ "testing_enabled": alpine_repo.get("testing_enabled"),
+ "local_repo": local_repo,
+ }
- tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl")
template_fn = tfile[1] # Filepath is second item in tuple
util.write_file(template_fn, content=REPOSITORIES_TEMPLATE)
- LOG.debug('Generating Alpine repository configuration file: %s',
- repo_file)
+ LOG.debug("Generating Alpine repository configuration file: %s", repo_file)
templater.render_to_file(template_fn, repo_file, params)
# Clean up temporary template
util.del_file(template_fn)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 73d8719f..c558311a 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -10,16 +10,14 @@
import glob
import os
+import pathlib
import re
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import gpg
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import templater
-from cloudinit import util
+from cloudinit import subp, templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -27,45 +25,19 @@ LOG = logging.getLogger(__name__)
# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+APT_LOCAL_KEYS = "/etc/apt/trusted.gpg"
+APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/"
+CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/"
+
frequency = PER_INSTANCE
distros = ["ubuntu", "debian"]
-mirror_property = {
- 'type': 'array',
- 'item': {
- 'type': 'object',
- 'additionalProperties': False,
- 'required': ['arches'],
- 'properties': {
- 'arches': {
- 'type': 'array',
- 'item': {
- 'type': 'string'
- },
- 'minItems': 1
- },
- 'uri': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'search': {
- 'type': 'array',
- 'item': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'minItems': 1
- },
- 'search_dns': {
- 'type': 'boolean',
- }
- }
- }
-}
-schema = {
- 'id': 'cc_apt_configure',
- 'name': 'Apt Configure',
- 'title': 'Configure apt for the user',
- 'description': dedent("""\
+
+meta: MetaSchema = {
+ "id": "cc_apt_configure",
+ "name": "Apt Configure",
+ "title": "Configure apt for the user",
+ "description": dedent(
+ """\
This module handles both configuration of apt options and adding
source lists. There are configuration options such as
``apt_get_wrapper`` and ``apt_get_command`` that control how
@@ -80,9 +52,12 @@ schema = {
.. note::
For more information about apt configuration, see the
- ``Additional apt configuration`` example."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ ``Additional apt configuration`` example."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
apt:
preserve_sources_list: false
disable_suites:
@@ -99,11 +74,12 @@ schema = {
search:
- 'http://cool.but-sometimes-unreachable.com/ubuntu'
- 'http://us.archive.ubuntu.com/ubuntu'
- search_dns: <true/false>
+ search_dns: false
- arches:
- s390x
- arm64
uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+
security:
- arches:
- default
@@ -130,7 +106,7 @@ schema = {
source1:
keyid: 'keyid'
keyserver: 'keyserverurl'
- source: 'deb http://<url>/ xenial main'
+ source: 'deb [signed-by=$KEY_FILE] http://<url>/ bionic main'
source2:
source: 'ppa:<ppa-name>'
source3:
@@ -138,239 +114,13 @@ schema = {
key: |
------BEGIN PGP PUBLIC KEY BLOCK-------
<key data>
- ------END PGP PUBLIC KEY BLOCK-------""")],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'apt': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'preserve_sources_list': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- By default, cloud-init will generate a new sources
- list in ``/etc/apt/sources.list.d`` based on any
- changes specified in cloud config. To disable this
- behavior and preserve the sources list from the
- pristine image, set ``preserve_sources_list``
- to ``true``.
-
- The ``preserve_sources_list`` option overrides
- all other config keys that would alter
- ``sources.list`` or ``sources.list.d``,
- **except** for additional sources to be added
- to ``sources.list.d``.""")
- },
- 'disable_suites': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
- Entries in the sources list can be disabled using
- ``disable_suites``, which takes a list of suites
- to be disabled. If the string ``$RELEASE`` is
- present in a suite in the ``disable_suites`` list,
- it will be replaced with the release name. If a
- suite specified in ``disable_suites`` is not
- present in ``sources.list`` it will be ignored.
- For convenience, several aliases are provided for
- ``disable_suites``:
-
- - ``updates`` => ``$RELEASE-updates``
- - ``backports`` => ``$RELEASE-backports``
- - ``security`` => ``$RELEASE-security``
- - ``proposed`` => ``$RELEASE-proposed``
- - ``release`` => ``$RELEASE``.
-
- When a suite is disabled using ``disable_suites``,
- its entry in ``sources.list`` is not deleted; it
- is just commented out.""")
- },
- 'primary': {
- **mirror_property,
- 'description': dedent("""\
- The primary and security archive mirrors can
- be specified using the ``primary`` and
- ``security`` keys, respectively. Both the
- ``primary`` and ``security`` keys take a list
- of configs, allowing mirrors to be specified
- on a per-architecture basis. Each config is a
- dictionary which must have an entry for
- ``arches``, specifying which architectures
- that config entry is for. The keyword
- ``default`` applies to any architecture not
- explicitly listed. The mirror url can be specified
- with the ``uri`` key, or a list of mirrors to
- check can be provided in order, with the first
- mirror that can be resolved being selected. This
- allows the same configuration to be used in
- different environment, with different hosts used
- for a local apt mirror. If no mirror is provided
- by ``uri`` or ``search``, ``search_dns`` may be
- used to search for dns names in the format
- ``<distro>-mirror`` in each of the following:
-
- - fqdn of this host per cloud metadata,
- - localdomain,
- - domains listed in ``/etc/resolv.conf``.
-
- If there is a dns entry for ``<distro>-mirror``,
- then it is assumed that there is a distro mirror
- at ``http://<distro>-mirror.<domain>/<distro>``.
- If the ``primary`` key is defined, but not the
- ``security`` key, then then configuration for
- ``primary`` is also used for ``security``.
- If ``search_dns`` is used for the ``security``
- key, the search pattern will be
- ``<distro>-security-mirror``.
-
- If no mirrors are specified, or all lookups fail,
- then default mirrors defined in the datasource
- are used. If none are present in the datasource
- either the following defaults are used:
-
- - ``primary`` => \
- ``http://archive.ubuntu.com/ubuntu``.
- - ``security`` => \
- ``http://security.ubuntu.com/ubuntu``
- """)},
- 'security': {
- **mirror_property,
- 'description': dedent("""\
- Please refer to the primary config documentation""")
- },
- 'add_apt_repo_match': {
- 'type': 'string',
- 'default': ADD_APT_REPO_MATCH,
- 'description': dedent("""\
- All source entries in ``apt-sources`` that match
- regex in ``add_apt_repo_match`` will be added to
- the system using ``add-apt-repository``. If
- ``add_apt_repo_match`` is not specified, it
- defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
- },
- 'debconf_selections': {
- 'type': 'object',
- 'items': {'type': 'string'},
- 'description': dedent("""\
- Debconf additional configurations can be specified as a
- dictionary under the ``debconf_selections`` config
- key, with each key in the dict representing a
- different set of configurations. The value of each key
- must be a string containing all the debconf
- configurations that must be applied. We will bundle
- all of the values and pass them to
- ``debconf-set-selections``. Therefore, each value line
- must be a valid entry for ``debconf-set-selections``,
- meaning that they must possess for distinct fields:
-
- ``pkgname question type answer``
-
- Where:
-
- - ``pkgname`` is the name of the package.
- - ``question`` the name of the questions.
- - ``type`` is the type of question.
- - ``answer`` is the value used to ansert the \
- question.
-
- For example: \
- ``ippackage ippackage/ip string 127.0.01``
- """)
- },
- 'sources_list': {
- 'type': 'string',
- 'description': dedent("""\
- Specifies a custom template for rendering
- ``sources.list`` . If no ``sources_list`` template
- is given, cloud-init will use sane default. Within
- this template, the following strings will be
- replaced with the appropriate values:
-
- - ``$MIRROR``
- - ``$RELEASE``
- - ``$PRIMARY``
- - ``$SECURITY``""")
- },
- 'conf': {
- 'type': 'string',
- 'description': dedent("""\
- Specify configuration for apt, such as proxy
- configuration. This configuration is specified as a
- string. For multiline apt configuration, make sure
- to follow yaml syntax.""")
- },
- 'https_proxy': {
- 'type': 'string',
- 'description': dedent("""\
- More convenient way to specify https apt proxy.
- https proxy url is specified in the format
- ``https://[[user][:pass]@]host[:port]/``.""")
- },
- 'http_proxy': {
- 'type': 'string',
- 'description': dedent("""\
- More convenient way to specify http apt proxy.
- http proxy url is specified in the format
- ``http://[[user][:pass]@]host[:port]/``.""")
- },
- 'proxy': {
- 'type': 'string',
- 'description': 'Alias for defining a http apt proxy.'
- },
- 'ftp_proxy': {
- 'type': 'string',
- 'description': dedent("""\
- More convenient way to specify ftp apt proxy.
- ftp proxy url is specified in the format
- ``ftp://[[user][:pass]@]host[:port]/``.""")
- },
- 'sources': {
- 'type': 'object',
- 'items': {'type': 'string'},
- 'description': dedent("""\
- Source list entries can be specified as a
- dictionary under the ``sources`` config key, with
- each key in the dict representing a different source
- file. The key of each source entry will be used
- as an id that can be referenced in other config
- entries, as well as the filename for the source's
- configuration under ``/etc/apt/sources.list.d``.
- If the name does not end with ``.list``, it will
- be appended. If there is no configuration for a
- key in ``sources``, no file will be written, but
- the key may still be referred to as an id in other
- ``sources`` entries.
-
- Each entry under ``sources`` is a dictionary which
- may contain any of the following optional keys:
-
- - ``source``: a sources.list entry \
- (some variable replacements apply).
- - ``keyid``: a key to import via shortid or \
- fingerprint.
- - ``key``: a raw PGP key.
- - ``keyserver``: alternate keyserver to pull \
- ``keyid`` key from.
-
- The ``source`` key supports variable
- replacements for the following strings:
-
- - ``$MIRROR``
- - ``$PRIMARY``
- - ``$SECURITY``
- - ``$RELEASE``""")
- }
- }
- }
- }
+ ------END PGP PUBLIC KEY BLOCK-------"""
+ )
+ ],
+ "frequency": frequency,
}
-__doc__ = get_schema_doc(schema)
+__doc__ = get_meta_doc(meta)
# place where apt stores cached repository data
@@ -384,18 +134,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy"
DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
# Default archive mirrors
-PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
- "SECURITY": "http://security.ubuntu.com/ubuntu/"}
-PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
- "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
-PRIMARY_ARCHES = ['amd64', 'i386']
-PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+PRIMARY_ARCH_MIRRORS = {
+ "PRIMARY": "http://archive.ubuntu.com/ubuntu/",
+ "SECURITY": "http://security.ubuntu.com/ubuntu/",
+}
+PORTS_MIRRORS = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+}
+PRIMARY_ARCHES = ["amd64", "i386"]
+PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"]
def get_default_mirrors(arch=None, target=None):
"""returns the default mirrors for the target. These depend on the
- architecture, for more see:
- https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
+ architecture, for more see:
+ https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
if arch is None:
arch = util.get_dpkg_architecture(target)
if arch in PRIMARY_ARCHES:
@@ -407,8 +161,8 @@ def get_default_mirrors(arch=None, target=None):
def handle(name, ocfg, cloud, log, _):
"""process the config for apt_config. This can be called from
- curthooks if a global apt config was provided or via the "apt"
- standalone command."""
+ curthooks if a global apt config was provided or via the "apt"
+ standalone command."""
# keeping code close to curtin codebase via entry handler
target = None
if log is not None:
@@ -416,14 +170,15 @@ def handle(name, ocfg, cloud, log, _):
LOG = log
# feed back converted config, but only work on the subset under 'apt'
ocfg = convert_to_v3_apt_format(ocfg)
- cfg = ocfg.get('apt', {})
+ cfg = ocfg.get("apt", {})
if not isinstance(cfg, dict):
raise ValueError(
"Expected dictionary for 'apt' config, found {config_type}".format(
- config_type=type(cfg)))
+ config_type=type(cfg)
+ )
+ )
- validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
@@ -432,7 +187,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (subp.which('apt-get') or subp.which('apt')):
+ if not (subp.which("apt-get") or subp.which("apt")):
return False, "no apt commands."
return True, "Apt is available."
@@ -447,12 +202,13 @@ def apply_apt(cfg, cloud, target):
LOG.debug("handling apt config: %s", cfg)
- release = util.lsb_release(target=target)['codename']
+ release = util.lsb_release(target=target)["codename"]
arch = util.get_dpkg_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
- if util.is_false(cfg.get('preserve_sources_list', False)):
+ if util.is_false(cfg.get("preserve_sources_list", False)):
+ add_mirror_keys(cfg, target)
generate_sources_list(cfg, release, mirrors, cloud)
rename_apt_lists(mirrors, target, arch)
@@ -462,25 +218,34 @@ def apply_apt(cfg, cloud, target):
LOG.exception("Failed to apply proxy or apt config info:")
# Process 'apt_source -> sources {dict}'
- if 'sources' in cfg:
+ if "sources" in cfg:
params = mirrors
- params['RELEASE'] = release
- params['MIRROR'] = mirrors["MIRROR"]
+ params["RELEASE"] = release
+ params["MIRROR"] = mirrors["MIRROR"]
matcher = None
- matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
+ matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH)
if matchcfg:
matcher = re.compile(matchcfg).search
- add_apt_sources(cfg['sources'], cloud, target=target,
- template_params=params, aa_repo_match=matcher)
+ add_apt_sources(
+ cfg["sources"],
+ cloud,
+ target=target,
+ template_params=params,
+ aa_repo_match=matcher,
+ )
def debconf_set_selections(selections, target=None):
- if not selections.endswith(b'\n'):
- selections += b'\n'
- subp.subp(['debconf-set-selections'], data=selections, target=target,
- capture=True)
+ if not selections.endswith(b"\n"):
+ selections += b"\n"
+ subp.subp(
+ ["debconf-set-selections"],
+ data=selections,
+ target=target,
+ capture=True,
+ )
def dpkg_reconfigure(packages, target=None):
@@ -500,12 +265,20 @@ def dpkg_reconfigure(packages, target=None):
unhandled.append(pkg)
if len(unhandled):
- LOG.warning("The following packages were installed and preseeded, "
- "but cannot be unconfigured: %s", unhandled)
+ LOG.warning(
+ "The following packages were installed and preseeded, "
+ "but cannot be unconfigured: %s",
+ unhandled,
+ )
if len(to_config):
- subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
- list(to_config), data=None, target=target, capture=True)
+ subp.subp(
+ ["dpkg-reconfigure", "--frontend=noninteractive"]
+ + list(to_config),
+ data=None,
+ target=target,
+ capture=True,
+ )
def apply_debconf_selections(cfg, target=None):
@@ -514,13 +287,12 @@ def apply_debconf_selections(cfg, target=None):
# set1: |
# cloud-init cloud-init/datasources multiselect MAAS
# set2: pkg pkg/value string bar
- selsets = cfg.get('debconf_selections')
+ selsets = cfg.get("debconf_selections")
if not selsets:
LOG.debug("debconf_selections was not set in config")
return
- selections = '\n'.join(
- [selsets[key] for key in sorted(selsets.keys())])
+ selections = "\n".join([selsets[key] for key in sorted(selsets.keys())])
debconf_set_selections(selections.encode(), target=target)
# get a complete list of packages listed in input
@@ -547,7 +319,8 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")
+ )
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -556,18 +329,18 @@ def clean_cloud_init(target):
def mirrorurl_to_apt_fileprefix(mirror):
"""mirrorurl_to_apt_fileprefix
- Convert a mirror url to the file prefix used by apt on disk to
- store cache information for that mirror.
- To do so do:
- - take off ???://
- - drop tailing /
- - convert in string / to _"""
+ Convert a mirror url to the file prefix used by apt on disk to
+ store cache information for that mirror.
+ To do so do:
+ - take off ???://
+ - drop tailing /
+ - convert in string / to _"""
string = mirror
if string.endswith("/"):
string = string[0:-1]
pos = string.find("://")
if pos >= 0:
- string = string[pos + 3:]
+ string = string[pos + 3 :]
string = string.replace("/", "_")
return string
@@ -599,8 +372,8 @@ def rename_apt_lists(new_mirrors, target, arch):
def mirror_to_placeholder(tmpl, mirror, placeholder):
"""mirror_to_placeholder
- replace the specified mirror in a template with a placeholder string
- Checks for existance of the expected mirror and warns if not found"""
+ replace the specified mirror in a template with a placeholder string
+ Checks for existance of the expected mirror and warns if not found"""
if mirror not in tmpl:
LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl)
return tmpl.replace(mirror, placeholder)
@@ -608,13 +381,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder):
def map_known_suites(suite):
"""there are a few default names which will be auto-extended.
- This comes at the inability to use those names literally as suites,
- but on the other hand increases readability of the cfg quite a lot"""
- mapping = {'updates': '$RELEASE-updates',
- 'backports': '$RELEASE-backports',
- 'security': '$RELEASE-security',
- 'proposed': '$RELEASE-proposed',
- 'release': '$RELEASE'}
+ This comes at the inability to use those names literally as suites,
+ but on the other hand increases readability of the cfg quite a lot"""
+ mapping = {
+ "updates": "$RELEASE-updates",
+ "backports": "$RELEASE-backports",
+ "security": "$RELEASE-security",
+ "proposed": "$RELEASE-proposed",
+ "release": "$RELEASE",
+ }
try:
retsuite = mapping[suite]
except KeyError:
@@ -624,14 +399,14 @@ def map_known_suites(suite):
def disable_suites(disabled, src, release):
"""reads the config for suites to be disabled and removes those
- from the template"""
+ from the template"""
if not disabled:
return src
retsrc = src
for suite in disabled:
suite = map_known_suites(suite)
- releasesuite = templater.render_string(suite, {'RELEASE': release})
+ releasesuite = templater.render_string(suite, {"RELEASE": release})
LOG.debug("Disabling suite %s as %s", suite, releasesuite)
newsrc = ""
@@ -653,109 +428,146 @@ def disable_suites(disabled, src, release):
break
if cols[pcol] == releasesuite:
- line = '# suite disabled by cloud-init: %s' % line
+ line = "# suite disabled by cloud-init: %s" % line
newsrc += line
retsrc = newsrc
return retsrc
+def add_mirror_keys(cfg, target):
+ """Adds any keys included in the primary/security mirror clauses"""
+ for key in ("primary", "security"):
+ for mirror in cfg.get(key, []):
+ add_apt_key(mirror, target, file_name=key)
+
+
def generate_sources_list(cfg, release, mirrors, cloud):
"""generate_sources_list
- create a source.list file based on a custom or default template
- by replacing mirrors and release in the template"""
+ create a source.list file based on a custom or default template
+ by replacing mirrors and release in the template"""
aptsrc = "/etc/apt/sources.list"
- params = {'RELEASE': release, 'codename': release}
+ params = {"RELEASE": release, "codename": release}
for k in mirrors:
params[k] = mirrors[k]
params[k.lower()] = mirrors[k]
- tmpl = cfg.get('sources_list', None)
+ tmpl = cfg.get("sources_list", None)
if tmpl is None:
LOG.info("No custom template provided, fall back to builtin")
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
+ template_fn = cloud.get_template_filename(
+ "sources.list.%s" % (cloud.distro.name)
+ )
if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
+ template_fn = cloud.get_template_filename("sources.list")
if not template_fn:
- LOG.warning("No template found, "
- "not rendering /etc/apt/sources.list")
+ LOG.warning(
+ "No template found, not rendering /etc/apt/sources.list"
+ )
return
tmpl = util.load_file(template_fn)
rendered = templater.render_string(tmpl, params)
- disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
+ disabled = disable_suites(cfg.get("disable_suites"), rendered, release)
util.write_file(aptsrc, disabled, mode=0o644)
-def add_apt_key_raw(key, target=None):
+def add_apt_key_raw(key, file_name, hardened=False, target=None):
"""
actual adding of a key as defined in key argument
to the system
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ name = pathlib.Path(file_name).stem
+ return apt_key("add", output_file=name, data=key, hardened=hardened)
except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
-def add_apt_key(ent, target=None):
+def add_apt_key(ent, target=None, hardened=False, file_name=None):
"""
Add key to the system as defined in ent (if any).
Supports raw keys or keyid's
The latter will as a first step fetched to get the raw key
"""
- if 'keyid' in ent and 'key' not in ent:
+ if "keyid" in ent and "key" not in ent:
keyserver = DEFAULT_KEYSERVER
- if 'keyserver' in ent:
- keyserver = ent['keyserver']
+ if "keyserver" in ent:
+ keyserver = ent["keyserver"]
- ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
+ ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver)
- if 'key' in ent:
- add_apt_key_raw(ent['key'], target)
+ if "key" in ent:
+ return add_apt_key_raw(
+ ent["key"], file_name or ent["filename"], hardened=hardened
+ )
def update_packages(cloud):
cloud.distro.update_package_sources()
-def add_apt_sources(srcdict, cloud, target=None, template_params=None,
- aa_repo_match=None):
+def add_apt_sources(
+ srcdict, cloud, target=None, template_params=None, aa_repo_match=None
+):
"""
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srcdict'. When rendering template, also
- include the values in dictionary searchList
+ install keys and repo source .list files defined in 'sources'
+
+ for each 'source' entry in the config:
+ 1. expand template variables and write source .list file in
+ /etc/apt/sources.list.d/
+ 2. install defined keys
+ 3. update packages via distro-specific method (i.e. apt-key update)
+
+
+ @param srcdict: a dict containing elements required
+ @param cloud: cloud instance object
+
+ Example srcdict value:
+ {
+ 'rio-grande-repo': {
+ 'source': 'deb [signed-by=$KEY_FILE] $MIRROR $RELEASE main',
+ 'keyid': 'B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77',
+ 'keyserver': 'pgp.mit.edu'
+ }
+ }
+
+ Note: Deb822 format is not supported
"""
if template_params is None:
template_params = {}
if aa_repo_match is None:
- raise ValueError('did not get a valid repo matcher')
+ raise ValueError("did not get a valid repo matcher")
if not isinstance(srcdict, dict):
- raise TypeError('unknown apt format: %s' % (srcdict))
+ raise TypeError("unknown apt format: %s" % (srcdict))
for filename in srcdict:
ent = srcdict[filename]
LOG.debug("adding source/key '%s'", ent)
- if 'filename' not in ent:
- ent['filename'] = filename
+ if "filename" not in ent:
+ ent["filename"] = filename
- add_apt_key(ent, target)
+ if "source" in ent and "$KEY_FILE" in ent["source"]:
+ key_file = add_apt_key(ent, target, hardened=True)
+ template_params["KEY_FILE"] = key_file
+ else:
+ key_file = add_apt_key(ent, target)
- if 'source' not in ent:
+ if "source" not in ent:
continue
- source = ent['source']
+ source = ent["source"]
source = templater.render_string(source, template_params)
- if not ent['filename'].startswith("/"):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
- if not ent['filename'].endswith(".list"):
- ent['filename'] += ".list"
+ if not ent["filename"].startswith("/"):
+ ent["filename"] = os.path.join(
+ "/etc/apt/sources.list.d/", ent["filename"]
+ )
+ if not ent["filename"].endswith(".list"):
+ ent["filename"] += ".list"
if aa_repo_match(source):
try:
@@ -765,7 +577,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
raise
continue
- sourcefn = subp.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent["filename"])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
@@ -781,17 +593,21 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
def convert_v1_to_v2_apt_format(srclist):
"""convert v1 apt format to v2 (dict in apt_sources)"""
srcdict = {}
+ LOG.warning(
+ "DEPRECATION: 'apt_sources' deprecated config key found."
+ " Use 'apt' instead"
+ )
if isinstance(srclist, list):
LOG.debug("apt config: convert V1 to V2 format (source list to dict)")
for srcent in srclist:
- if 'filename' not in srcent:
+ if "filename" not in srcent:
# file collides for multiple !filename cases for compatibility
# yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
+ srcent["filename"] = "cloud_config_sources.list"
key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
else:
# all with filename use that as key (matching new format)
- key = srcent['filename']
+ key = srcent["filename"]
srcdict[key] = srcent
elif isinstance(srclist, dict):
srcdict = srclist
@@ -803,7 +619,7 @@ def convert_v1_to_v2_apt_format(srclist):
def convert_key(oldcfg, aptcfg, oldkey, newkey):
"""convert an old key to the new one if the old one exists
- returns true if a key was found and converted"""
+ returns true if a key was found and converted"""
if oldcfg.get(oldkey, None) is not None:
aptcfg[newkey] = oldcfg.get(oldkey)
del oldcfg[oldkey]
@@ -813,33 +629,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey):
def convert_mirror(oldcfg, aptcfg):
"""convert old apt_mirror keys into the new more advanced mirror spec"""
- keymap = [('apt_mirror', 'uri'),
- ('apt_mirror_search', 'search'),
- ('apt_mirror_search_dns', 'search_dns')]
+ keymap = [
+ ("apt_mirror", "uri"),
+ ("apt_mirror_search", "search"),
+ ("apt_mirror_search_dns", "search_dns"),
+ ]
converted = False
- newmcfg = {'arches': ['default']}
+ newmcfg = {"arches": ["default"]}
for oldkey, newkey in keymap:
if convert_key(oldcfg, newmcfg, oldkey, newkey):
converted = True
# only insert new style config if anything was converted
if converted:
- aptcfg['primary'] = [newmcfg]
+ aptcfg["primary"] = [newmcfg]
def convert_v2_to_v3_apt_format(oldcfg):
"""convert old to new keys and adapt restructured mirror spec"""
- mapoldkeys = {'apt_sources': 'sources',
- 'apt_mirror': None,
- 'apt_mirror_search': None,
- 'apt_mirror_search_dns': None,
- 'apt_proxy': 'proxy',
- 'apt_http_proxy': 'http_proxy',
- 'apt_ftp_proxy': 'https_proxy',
- 'apt_https_proxy': 'ftp_proxy',
- 'apt_preserve_sources_list': 'preserve_sources_list',
- 'apt_custom_sources_list': 'sources_list',
- 'add_apt_repo_match': 'add_apt_repo_match'}
+ mapoldkeys = {
+ "apt_sources": "sources",
+ "apt_mirror": None,
+ "apt_mirror_search": None,
+ "apt_mirror_search_dns": None,
+ "apt_proxy": "proxy",
+ "apt_http_proxy": "http_proxy",
+ "apt_ftp_proxy": "https_proxy",
+ "apt_https_proxy": "ftp_proxy",
+ "apt_preserve_sources_list": "preserve_sources_list",
+ "apt_custom_sources_list": "sources_list",
+ "add_apt_repo_match": "add_apt_repo_match",
+ }
needtoconvert = []
for oldkey in mapoldkeys:
if oldkey in oldcfg:
@@ -851,13 +671,19 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no old config, so no new one to be created
if not needtoconvert:
return oldcfg
- LOG.debug("apt config: convert V2 to V3 format for keys '%s'",
- ", ".join(needtoconvert))
+ LOG.warning(
+ "DEPRECATION apt: converted deprecated config V2 to V3 format for"
+ " keys '%s'. Use updated config keys.",
+ ", ".join(needtoconvert),
+ )
# if old AND new config are provided, prefer the new one (LP #1616831)
- newaptcfg = oldcfg.get('apt', None)
+ newaptcfg = oldcfg.get("apt", None)
if newaptcfg is not None:
- LOG.debug("apt config: V1/2 and V3 format specified, preferring V3")
+ LOG.warning(
+ "DEPRECATION: apt config: deprecated V1/2 and V3 format specified,"
+ " preferring V3"
+ )
for oldkey in needtoconvert:
newkey = mapoldkeys[oldkey]
verify = oldcfg[oldkey] # drop, but keep a ref for verification
@@ -866,10 +692,11 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no simple mapping or no collision on this particular key
continue
if verify != newaptcfg[newkey]:
- raise ValueError("Old and New apt format defined with unequal "
- "values %s vs %s @ %s" % (verify,
- newaptcfg[newkey],
- oldkey))
+ raise ValueError(
+ "Old and New apt format defined with unequal "
+ "values %s vs %s @ %s"
+ % (verify, newaptcfg[newkey], oldkey)
+ )
# return conf after clearing conflicting V1/2 keys
return oldcfg
@@ -889,17 +716,17 @@ def convert_v2_to_v3_apt_format(oldcfg):
raise ValueError("old apt key '%s' left after conversion" % oldkey)
# insert new format into config and return full cfg with only v3 content
- oldcfg['apt'] = aptcfg
+ oldcfg["apt"] = aptcfg
return oldcfg
def convert_to_v3_apt_format(cfg):
"""convert the old list based format to the new dict based one. After that
- convert the old dict keys/format to v3 a.k.a 'new apt config'"""
+ convert the old dict keys/format to v3 a.k.a 'new apt config'"""
# V1 -> V2, the apt_sources entry from list to dict
- apt_sources = cfg.get('apt_sources', None)
+ apt_sources = cfg.get("apt_sources", None)
if apt_sources is not None:
- cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources)
+ cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources)
# V2 -> V3, move all former globals under the "apt" key
# Restructure into new key names and mirror hierarchy
@@ -931,7 +758,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
if mydom:
doms.append(".%s" % mydom)
- doms.extend((".localdomain", "",))
+ doms.extend(
+ (
+ ".localdomain",
+ "",
+ )
+ )
mirror_list = []
distro = cloud.distro.name
@@ -946,12 +778,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
def update_mirror_info(pmirror, smirror, arch, cloud):
"""sets security mirror to primary if not defined.
- returns defaults if no mirrors are defined"""
+ returns defaults if no mirrors are defined"""
if pmirror is not None:
if smirror is None:
smirror = pmirror
- return {'PRIMARY': pmirror,
- 'SECURITY': smirror}
+ return {"PRIMARY": pmirror, "SECURITY": smirror}
# None specified at all, get default mirrors from cloud
mirror_info = cloud.datasource.get_package_mirror_info()
@@ -960,8 +791,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud):
# arbitrary key/value pairs including 'primary' and 'security' keys.
# caller expects dict with PRIMARY and SECURITY.
m = mirror_info.copy()
- m['PRIMARY'] = m['primary']
- m['SECURITY'] = m['security']
+ m["PRIMARY"] = m["primary"]
+ m["SECURITY"] = m["security"]
return m
@@ -971,7 +802,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud):
def get_arch_mirrorconfig(cfg, mirrortype, arch):
"""out of a list of potential mirror configurations select
- and return the one matching the architecture (or default)"""
+ and return the one matching the architecture (or default)"""
# select the mirror specification (if-any)
mirror_cfg_list = cfg.get(mirrortype, None)
if mirror_cfg_list is None:
@@ -980,7 +811,7 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch):
# select the specification matching the target arch
default = None
for mirror_cfg_elem in mirror_cfg_list:
- arches = mirror_cfg_elem.get("arches")
+ arches = mirror_cfg_elem.get("arches") or []
if arch in arches:
return mirror_cfg_elem
if "default" in arches:
@@ -990,8 +821,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch):
def get_mirror(cfg, mirrortype, arch, cloud):
"""pass the three potential stages of mirror specification
- returns None is neither of them found anything otherwise the first
- hit is returned"""
+ returns None is neither of them found anything otherwise the first
+ hit is returned"""
mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch)
if mcfg is None:
return None
@@ -1007,18 +838,19 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search_dns if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror_dns(mcfg.get("search_dns", None),
- mirrortype, cfg, cloud)
+ mirror = search_for_mirror_dns(
+ mcfg.get("search_dns", None), mirrortype, cfg, cloud
+ )
return mirror
def find_apt_mirror_info(cfg, cloud, arch=None):
"""find_apt_mirror_info
- find an apt_mirror given the cfg provided.
- It can check for separate config of primary and security mirrors
- If only primary is given security is assumed to be equal to primary
- If the generic apt_mirror is given that is defining for both
+ find an apt_mirror given the cfg provided.
+ It can check for separate config of primary and security mirrors
+ If only primary is given security is assumed to be equal to primary
+ If the generic apt_mirror is given that is defining for both
"""
if arch is None:
@@ -1039,32 +871,115 @@ def find_apt_mirror_info(cfg, cloud, arch=None):
def apply_apt_config(cfg, proxy_fname, config_fname):
"""apply_apt_config
- Applies any apt*proxy config from if specified
+ Applies any apt*proxy config from if specified
"""
# Set up any apt proxy
- cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
- ('http_proxy', 'Acquire::http::Proxy "%s";'),
- ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
- ('https_proxy', 'Acquire::https::Proxy "%s";'))
+ cfgs = (
+ ("proxy", 'Acquire::http::Proxy "%s";'),
+ ("http_proxy", 'Acquire::http::Proxy "%s";'),
+ ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'),
+ ("https_proxy", 'Acquire::https::Proxy "%s";'),
+ )
proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
if len(proxies):
LOG.debug("write apt proxy info to %s", proxy_fname)
- util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
+ util.write_file(proxy_fname, "\n".join(proxies) + "\n")
elif os.path.isfile(proxy_fname):
util.del_file(proxy_fname)
LOG.debug("no apt proxy configured, removed %s", proxy_fname)
- if cfg.get('conf', None):
+ if cfg.get("conf", None):
LOG.debug("write apt config info to %s", config_fname)
- util.write_file(config_fname, cfg.get('conf'))
+ util.write_file(config_fname, cfg.get("conf"))
elif os.path.isfile(config_fname):
util.del_file(config_fname)
LOG.debug("no apt config configured, removed %s", config_fname)
+def apt_key(
+ command, output_file=None, data=None, hardened=False, human_output=True
+):
+ """apt-key replacement
+
+ commands implemented: 'add', 'list', 'finger'
+
+ @param output_file: name of output gpg file (without .gpg or .asc)
+ @param data: key contents
+ @param human_output: list keys formatted for human parsing
+ @param hardened: write keys to to /etc/apt/cloud-init.gpg.d/ (referred to
+ with [signed-by] in sources file)
+ """
+
+ def _get_key_files():
+ """return all apt keys
+
+ /etc/apt/trusted.gpg (if it exists) and all keyfiles (and symlinks to
+ keyfiles) in /etc/apt/trusted.gpg.d/ are returned
+
+ based on apt-key implementation
+ """
+ key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else []
+
+ for file in os.listdir(APT_TRUSTED_GPG_DIR):
+ if file.endswith(".gpg") or file.endswith(".asc"):
+ key_files.append(APT_TRUSTED_GPG_DIR + file)
+ return key_files if key_files else ""
+
+ def apt_key_add():
+ """apt-key add <file>
+
+ returns filepath to new keyring, or '/dev/null' when an error occurs
+ """
+ file_name = "/dev/null"
+ if not output_file:
+ util.logexc(
+ LOG, 'Unknown filename, failed to add key: "{}"'.format(data)
+ )
+ else:
+ try:
+ key_dir = (
+ CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR
+ )
+ stdout = gpg.dearmor(data)
+ file_name = "{}{}.gpg".format(key_dir, output_file)
+ util.write_file(file_name, stdout)
+ except subp.ProcessExecutionError:
+ util.logexc(
+ LOG, "Gpg error, failed to add key: {}".format(data)
+ )
+ except UnicodeDecodeError:
+ util.logexc(
+ LOG, "Decode error, failed to add key: {}".format(data)
+ )
+ return file_name
+
+ def apt_key_list():
+ """apt-key list
+
+ returns string of all trusted keys (in /etc/apt/trusted.gpg and
+ /etc/apt/trusted.gpg.d/)
+ """
+ key_list = []
+ for key_file in _get_key_files():
+ try:
+ key_list.append(gpg.list(key_file, human_output=human_output))
+ except subp.ProcessExecutionError as error:
+ LOG.warning('Failed to list key "%s": %s', key_file, error)
+ return "\n".join(key_list)
+
+ if command == "add":
+ return apt_key_add()
+ elif command == "finger" or command == "list":
+ return apt_key_list()
+ else:
+ raise ValueError(
+ "apt_key() commands add, list, and finger are currently supported"
+ )
+
+
CONFIG_CLEANERS = {
- 'cloud-init': clean_cloud_init,
+ "cloud-init": clean_cloud_init,
}
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index aa186ce2..901633d3 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -4,52 +4,59 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Apt Pipelining
---------------
-**Summary:** configure apt pipelining
+"""Apt Pipelining: configure apt pipelining."""
-This module configures apt's ``Acquite::http::Pipeline-Depth`` option, which
-controls how apt handles HTTP pipelining. It may be useful for pipelining to be
-disabled, because some web servers, such as S3 do not pipeline properly (LP:
-#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable
-pipelining altogether. This is the default behavior. If it is set to ``none``,
-``unchanged``, or ``os``, no change will be made to apt configuration and the
-default setting for the distro will be used. The pipeline depth can also be
-manually specified by setting ``apt_pipelining`` to a number. However, this is
-not recommended.
+from textwrap import dedent
-**Internal name:** ``cc_apt_pipelining``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
- apt_pipelining: <false/none/unchanged/os/number>
-"""
-
-from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-
-distros = ['ubuntu', 'debian']
-
+distros = ["ubuntu", "debian"]
DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-
-APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
- 'Acquire::http::Pipeline-Depth "%s";\n')
-
+APT_PIPE_TPL = (
+ "//Written by cloud-init per 'apt_pipelining'\n"
+ 'Acquire::http::Pipeline-Depth "%s";\n'
+)
# Acquire::http::Pipeline-Depth can be a value
# from 0 to 5 indicating how many outstanding requests APT should send.
# A value of zero MUST be specified if the remote host does not properly linger
# on TCP connections - otherwise data corruption will occur.
+meta: MetaSchema = {
+ "id": "cc_apt_pipelining",
+ "name": "Apt Pipelining",
+ "title": "Configure apt pipelining",
+ "description": dedent(
+ """\
+ This module configures apt's ``Acquite::http::Pipeline-Depth`` option,
+ which controls how apt handles HTTP pipelining. It may be useful for
+ pipelining to be disabled, because some web servers, such as S3 do not
+ pipeline properly (LP: #948461).
+
+ Value configuration options for this module are:
+
+ * ``false`` (Default): disable pipelining altogether
+ * ``none``, ``unchanged``, or ``os``: use distro default
+ * ``<number>``: Manually specify pipeline depth. This is not recommended.""" # noqa: E501
+ ),
+ "distros": distros,
+ "frequency": frequency,
+ "examples": [
+ "apt_pipelining: false",
+ "apt_pipelining: none",
+ "apt_pipelining: unchanged",
+ "apt_pipelining: os",
+ "apt_pipelining: 3",
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
-def handle(_name, cfg, _cloud, log, _args):
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os')
+def handle(_name, cfg, _cloud, log, _args):
+ apt_pipe_value = cfg.get("apt_pipelining", "os")
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
@@ -69,4 +76,5 @@ def write_apt_snippet(setting, log, f_name):
util.write_file(f_name, file_contents)
log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 246e4497..bd14aede 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -12,28 +12,20 @@
import os
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import subp, temp_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
frequency = PER_ALWAYS
-# The schema definition for each cloud-config module is a strict contract for
-# describing supported configuration parameters for each cloud-config section.
-# It allows cloud-config to validate and alert users to invalid or ignored
-# configuration options before actually attempting to deploy with said
-# configuration.
+distros = ["all"]
-distros = ['all']
-
-schema = {
- 'id': 'cc_bootcmd',
- 'name': 'Bootcmd',
- 'title': 'Run arbitrary commands early in the boot process',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_bootcmd",
+ "name": "Bootcmd",
+ "title": "Run arbitrary commands early in the boot process",
+ "description": dedent(
+ """\
This module runs arbitrary commands very early in the boot process,
only slightly after a boothook would run. This is very similar to a
boothook, but more user friendly. The environment variable
@@ -49,42 +41,32 @@ schema = {
when writing files, do not use /tmp dir as it races with
systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
bootcmd:
- echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
- """)],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'bootcmd': {
- 'type': 'array',
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
- },
- 'additionalItems': False, # Reject items of non-string non-list
- 'additionalProperties': False,
- 'minItems': 1,
- 'required': [],
- }
- }
+ """
+ )
+ ],
+ "frequency": PER_ALWAYS,
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
if "bootcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'bootcmd' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'bootcmd' key in configuration", name
+ )
return
- validate_cloudconfig_schema(cfg, schema)
with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf:
try:
content = util.shellify(cfg["bootcmd"])
@@ -98,11 +80,12 @@ def handle(name, cfg, cloud, log, _args):
env = os.environ.copy()
iid = cloud.get_instance_id()
if iid:
- env['INSTANCE_ID'] = str(iid)
- cmd = ['/bin/sh', tmpf.name]
+ env["INSTANCE_ID"] = str(iid)
+ cmd = ["/bin/sh", tmpf.name]
subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 9fdaeba1..fbc20410 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -6,11 +6,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Byobu
------
-**Summary:** enable/disable byobu system wide and for default user
+"""Byobu: Enable/disable byobu system wide and for default user."""
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module controls whether byobu is enabled or disabled system wide and for
the default system user. If byobu is to be enabled, this module will ensure it
is installed. Likewise, if it is to be disabled, it will be removed if
@@ -26,23 +29,23 @@ Valid configuration options for this module are:
- ``disable``: disable byobu for all users
- ``user``: alias for ``enable-user``
- ``system``: alias for ``enable-system``
-
-**Internal name:** ``cc_byobu``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
-
- byobu_by_default: <user/system>
"""
+distros = ["ubuntu", "debian"]
-from cloudinit.distros import ug_util
-from cloudinit import subp
-from cloudinit import util
+meta: MetaSchema = {
+ "id": "cc_byobu",
+ "name": "Byobu",
+ "title": "Enable/disable byobu system wide and for default user",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "byobu_by_default: enable-user",
+ "byobu_by_default: disable-system",
+ ],
+}
-distros = ['ubuntu', 'debian']
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
@@ -58,8 +61,14 @@ def handle(name, cfg, cloud, log, args):
if value == "user" or value == "system":
value = "enable-%s" % value
- valid = ("enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable")
+ valid = (
+ "enable-user",
+ "enable-system",
+ "enable",
+ "disable-user",
+ "disable-system",
+ "disable",
+ )
if value not in valid:
log.warning("Unknown value %s for byobu_by_default", value)
@@ -81,13 +90,16 @@ def handle(name, cfg, cloud, log, args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if not user:
- log.warning(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
+ log.warning(
+ "No default byobu user provided, "
+ "can not launch %s for the default user",
+ bl_inst,
+ )
else:
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
+ shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
if mod_sys:
- shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
+ shcmd += 'echo "%s" | debconf-set-selections' % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
@@ -96,4 +108,5 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting byobu to %s", value)
subp.subp(cmd, capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 3c453d91..6084cb4c 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -2,105 +2,161 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-CA Certs
---------
-**Summary:** add ca certificates
+"""CA Certs: Add ca certificates."""
+import os
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+DEFAULT_CONFIG = {
+ "ca_cert_path": "/usr/share/ca-certificates/",
+ "ca_cert_filename": "cloud-init-ca-certs.crt",
+ "ca_cert_config": "/etc/ca-certificates.conf",
+ "ca_cert_system_path": "/etc/ssl/certs/",
+ "ca_cert_update_cmd": ["update-ca-certificates"],
+}
+DISTRO_OVERRIDES = {
+ "rhel": {
+ "ca_cert_path": "/usr/share/pki/ca-trust-source/",
+ "ca_cert_filename": "anchors/cloud-init-ca-certs.crt",
+ "ca_cert_config": None,
+ "ca_cert_system_path": "/etc/pki/ca-trust/",
+ "ca_cert_update_cmd": ["update-ca-trust"],
+ }
+}
+
+MODULE_DESCRIPTION = """\
This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates
the ssl cert cache using ``update-ca-certificates``. The default certificates
can be removed from the system with the configuration option
-``remove-defaults``.
+``remove_defaults``.
.. note::
certificates must be specified using valid yaml. in order to specify a
multiline certificate, the yaml multiline list syntax must be used
.. note::
- For Alpine Linux the "remove-defaults" functionality works if the
+ For Alpine Linux the "remove_defaults" functionality works if the
ca-certificates package is installed but not if the
ca-certificates-bundle package is installed.
-
-**Internal name:** ``cc_ca_certs``
-
-**Module frequency:** per instance
-
-**Supported distros:** alpine, debian, ubuntu
-
-**Config keys**::
-
- ca-certs:
- remove-defaults: <true/false>
- trusted:
- - <single line cert>
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
"""
-
-import os
-
-from cloudinit import subp
-from cloudinit import util
-
-CA_CERT_PATH = "/usr/share/ca-certificates/"
-CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
-CA_CERT_CONFIG = "/etc/ca-certificates.conf"
-CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
-CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-
-distros = ['alpine', 'debian', 'ubuntu']
+distros = ["alpine", "debian", "ubuntu", "rhel"]
+
+meta: MetaSchema = {
+ "id": "cc_ca_certs",
+ "name": "CA Certificates",
+ "title": "Add ca certificates",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ ca_certs:
+ remove_defaults: true
+ trusted:
+ - single_line_cert
+ - |
+ -----BEGIN CERTIFICATE-----
+ YOUR-ORGS-TRUSTED-CA-CERT-HERE
+ -----END CERTIFICATE-----
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
+
+def _distro_ca_certs_configs(distro_name):
+ """Return a distro-specific ca_certs config dictionary
+
+ @param distro_name: String providing the distro class name.
+ @returns: Dict of distro configurations for ca-cert.
+ """
+ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG)
+ cfg["ca_cert_full_path"] = os.path.join(
+ cfg["ca_cert_path"], cfg["ca_cert_filename"]
+ )
+ return cfg
-def update_ca_certs():
+def update_ca_certs(distro_cfg):
"""
Updates the CA certificate cache on the current machine.
+
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- subp.subp(["update-ca-certificates"], capture=False)
+ subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False)
-def add_ca_certs(certs):
+def add_ca_certs(distro_cfg, certs):
"""
Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
@param certs: A list of certificate strings.
"""
- if certs:
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
-
- if os.stat(CA_CERT_CONFIG).st_size == 0:
- # If the CA_CERT_CONFIG file is empty (i.e. all existing
- # CA certs have been deleted) then simply output a single
- # line with the cloud-init cert filename.
- out = "%s\n" % CA_CERT_FILENAME
- else:
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([line for line in orig.splitlines()
- if line != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
- util.write_file(CA_CERT_CONFIG, out, omode="wb")
-
-
-def remove_default_ca_certs(distro_name):
+ if not certs:
+ return
+ # First ensure they are strings...
+ cert_file_contents = "\n".join([str(c) for c in certs])
+ util.write_file(
+ distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644
+ )
+ update_cert_config(distro_cfg)
+
+
+def update_cert_config(distro_cfg):
+ """
+ Update Certificate config file to add the file path managed cloud-init
+
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
+ """
+ if distro_cfg["ca_cert_config"] is None:
+ return
+ if os.stat(distro_cfg["ca_cert_config"]).st_size == 0:
+ # If the CA_CERT_CONFIG file is empty (i.e. all existing
+ # CA certs have been deleted) then simply output a single
+ # line with the cloud-init cert filename.
+ out = "%s\n" % distro_cfg["ca_cert_filename"]
+ else:
+ # Append cert filename to CA_CERT_CONFIG file.
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(distro_cfg["ca_cert_config"])
+ cr_cont = "\n".join(
+ [
+ line
+ for line in orig.splitlines()
+ if line != distro_cfg["ca_cert_filename"]
+ ]
+ )
+ out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"])
+ util.write_file(distro_cfg["ca_cert_config"], out, omode="wb")
+
+
+def remove_default_ca_certs(distro_name, distro_cfg):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
+
+ @param distro_name: String providing the distro class name.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0o644)
+ util.delete_dir_contents(distro_cfg["ca_cert_path"])
+ util.delete_dir_contents(distro_cfg["ca_cert_system_path"])
+ util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644)
- if distro_name != 'alpine':
+ if distro_name in ["debian", "ubuntu"]:
debconf_sel = (
- "ca-certificates ca-certificates/trust_new_crts " + "select no")
- subp.subp(('debconf-set-selections', '-'), debconf_sel)
+ "ca-certificates ca-certificates/trust_new_crts " + "select no"
+ )
+ subp.subp(("debconf-set-selections", "-"), debconf_sel)
def handle(name, cfg, cloud, log, _args):
@@ -113,29 +169,50 @@ def handle(name, cfg, cloud, log, _args):
@param log: Pre-initialized Python logger object to use for logging.
@param args: Any module arguments from cloud.cfg
"""
- # If there isn't a ca-certs section in the configuration don't do anything
- if "ca-certs" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'ca-certs' key in configuration"), name)
+ if "ca-certs" in cfg:
+ log.warning(
+ "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
+ " instead."
+ )
+ elif "ca_certs" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'ca_certs' key in configuration",
+ name,
+ )
return
- ca_cert_cfg = cfg['ca-certs']
+ if "ca-certs" in cfg and "ca_certs" in cfg:
+ log.warning(
+ "Found both ca-certs (deprecated) and ca_certs config keys."
+ " Ignoring ca-certs."
+ )
+ ca_cert_cfg = cfg.get("ca_certs", cfg.get("ca-certs"))
+ distro_cfg = _distro_ca_certs_configs(cloud.distro.name)
- # If there is a remove-defaults option set to true, remove the system
+ # If there is a remove_defaults option set to true, remove the system
# default trusted CA certs first.
- if ca_cert_cfg.get("remove-defaults", False):
+ if "remove-defaults" in ca_cert_cfg:
+ log.warning(
+ "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
+ " Use 'ca_certs.remove_defaults' instead."
+ )
+ if ca_cert_cfg.get("remove-defaults", False):
+ log.debug("Removing default certificates")
+ remove_default_ca_certs(cloud.distro.name, distro_cfg)
+ elif ca_cert_cfg.get("remove_defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs(cloud.distro.name)
+ remove_default_ca_certs(cloud.distro.name, distro_cfg)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
+ add_ca_certs(distro_cfg, trusted_certs)
# Update the system with the new cert configuration.
log.debug("Updating certificates")
- update_ca_certs()
+ update_ca_certs(distro_cfg)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index aaf71366..fdb3a6e3 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -13,87 +13,91 @@ import json
import os
from textwrap import dedent
-from cloudinit import subp
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
-from cloudinit import templater
-from cloudinit import temp_utils
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import subp, temp_utils, templater, url_helper, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
-
RUBY_VERSION_DEFAULT = "1.8"
-CHEF_DIRS = tuple([
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
-])
-REQUIRED_CHEF_DIRS = tuple([
- '/etc/chef',
-])
+CHEF_DIRS = tuple(
+ [
+ "/etc/chef",
+ "/var/log/chef",
+ "/var/lib/chef",
+ "/var/cache/chef",
+ "/var/backups/chef",
+ "/var/run/chef",
+ ]
+)
+REQUIRED_CHEF_DIRS = tuple(
+ [
+ "/etc/chef",
+ ]
+)
# Used if fetching chef from a omnibus style package
OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
-CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
-CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
-CHEF_ENVIRONMENT = '_default'
-CHEF_FB_PATH = '/etc/chef/firstboot.json'
+CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem"
+CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret"
+CHEF_ENVIRONMENT = "_default"
+CHEF_FB_PATH = "/etc/chef/firstboot.json"
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
- 'ssl_verify_mode': ':verify_none',
- 'log_level': ':info',
+ "ssl_verify_mode": ":verify_none",
+ "log_level": ":info",
# These are not symbols...
- 'log_location': '/var/log/chef/client.log',
- 'validation_key': CHEF_VALIDATION_PEM_PATH,
- 'validation_cert': None,
- 'client_key': '/etc/chef/client.pem',
- 'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': '/var/cache/chef',
- 'file_backup_path': '/var/backups/chef',
- 'pid_file': '/var/run/chef/client.pid',
- 'show_time': True,
- 'encrypted_data_bag_secret': None,
+ "log_location": "/var/log/chef/client.log",
+ "validation_key": CHEF_VALIDATION_PEM_PATH,
+ "validation_cert": None,
+ "client_key": "/etc/chef/client.pem",
+ "json_attribs": CHEF_FB_PATH,
+ "file_cache_path": "/var/cache/chef",
+ "file_backup_path": "/var/backups/chef",
+ "pid_file": "/var/run/chef/client.pid",
+ "show_time": True,
+ "encrypted_data_bag_secret": None,
}
-CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
-CHEF_RB_TPL_PATH_KEYS = frozenset([
- 'log_location',
- 'validation_key',
- 'client_key',
- 'file_cache_path',
- 'json_attribs',
- 'pid_file',
- 'encrypted_data_bag_secret',
- 'chef_license',
-])
+CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"])
+CHEF_RB_TPL_PATH_KEYS = frozenset(
+ [
+ "log_location",
+ "validation_key",
+ "client_key",
+ "file_cache_path",
+ "json_attribs",
+ "pid_file",
+ "encrypted_data_bag_secret",
+ ]
+)
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend([
- 'server_url',
- 'node_name',
- 'environment',
- 'validation_name',
-])
+CHEF_RB_TPL_KEYS.extend(
+ [
+ "server_url",
+ "node_name",
+ "environment",
+ "validation_name",
+ "chef_license",
+ ]
+)
CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
-CHEF_RB_PATH = '/etc/chef/client.rb'
-CHEF_EXEC_PATH = '/usr/bin/chef-client'
-CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
+CHEF_RB_PATH = "/etc/chef/client.rb"
+CHEF_EXEC_PATH = "/usr/bin/chef-client"
+CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"])
frequency = PER_ALWAYS
distros = ["all"]
-schema = {
- 'id': 'cc_chef',
- 'name': 'Chef',
- 'title': 'module that configures, starts and installs chef',
- 'description': dedent("""\
+
+meta: MetaSchema = {
+ "id": "cc_chef",
+ "name": "Chef",
+ "title": "module that configures, starts and installs chef",
+ "description": dedent(
+ """\
This module enables chef to be installed (from packages,
gems, or from omnibus). Before this occurs, chef configuration is
written to disk (validation.pem, client.pem, firstboot.json,
@@ -101,9 +105,12 @@ schema = {
/var/log/chef and so-on). If configured, chef will be
installed and started in either daemon or non-daemon mode.
If run in non-daemon mode, post run actions are executed to do
- finishing activities such as removing validation.pem."""),
- 'distros': distros,
- 'examples': [dedent("""
+ finishing activities such as removing validation.pem."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """
chef:
directories:
- /etc/chef
@@ -124,246 +131,19 @@ schema = {
omnibus_url_retries: 2
server_url: https://chef.yourorg.com:4000
ssl_verify_mode: :verify_peer
- validation_name: yourorg-validator""")],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'chef': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'directories': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
- Create the necessary directories for chef to run. By
- default, it creates the following directories:
-
- {chef_dirs}""").format(
- chef_dirs="\n".join(
- [" - ``{}``".format(d) for d in CHEF_DIRS]
- )
- )
- },
- 'validation_cert': {
- 'type': 'string',
- 'description': dedent("""\
- Optional string to be written to file validation_key.
- Special value ``system`` means set use existing file.
- """)
- },
- 'validation_key': {
- 'type': 'string',
- 'default': CHEF_VALIDATION_PEM_PATH,
- 'description': dedent("""\
- Optional path for validation_cert. default to
- ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
- },
- 'firstboot_path': {
- 'type': 'string',
- 'default': CHEF_FB_PATH,
- 'description': dedent("""\
- Path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to ``{}``.""".format(CHEF_FB_PATH))
- },
- 'exec': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- define if we should run or not run chef (defaults to
- false, unless a gem installed is requested where this
- will then default to true).""")
- },
- 'client_key': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
- 'description': dedent("""\
- Optional path for client_cert. default to
- ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
- },
- 'encrypted_data_bag_secret': {
- 'type': 'string',
- 'default': None,
- 'description': dedent("""\
- Specifies the location of the secret key used by chef
- to encrypt data items. By default, this path is set
- to None, meaning that chef will have to look at the
- path ``{}`` for it.
- """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
- },
- 'environment': {
- 'type': 'string',
- 'default': CHEF_ENVIRONMENT,
- 'description': dedent("""\
- Specifies which environment chef will use. By default,
- it will use the ``{}`` configuration.
- """.format(CHEF_ENVIRONMENT))
- },
- 'file_backup_path': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
- 'description': dedent("""\
- Specifies the location in which backup files are
- stored. By default, it uses the
- ``{}`` location.""".format(
- CHEF_RB_TPL_DEFAULTS['file_backup_path']))
- },
- 'file_cache_path': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
- 'description': dedent("""\
- Specifies the location in which chef cache files will
- be saved. By default, it uses the ``{}``
- location.""".format(
- CHEF_RB_TPL_DEFAULTS['file_cache_path']))
- },
- 'json_attribs': {
- 'type': 'string',
- 'default': CHEF_FB_PATH,
- 'description': dedent("""\
- Specifies the location in which some chef json data is
- stored. By default, it uses the
- ``{}`` location.""".format(CHEF_FB_PATH))
- },
- 'log_level': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
- 'description': dedent("""\
- Defines the level of logging to be stored in the log
- file. By default this value is set to ``{}``.
- """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
- },
- 'log_location': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
- 'description': dedent("""\
- Specifies the location of the chef lof file. By
- default, the location is specified at
- ``{}``.""".format(
- CHEF_RB_TPL_DEFAULTS['log_location']))
- },
- 'node_name': {
- 'type': 'string',
- 'description': dedent("""\
- The name of the node to run. By default, we will
- use th instance id as the node name.""")
- },
- 'omnibus_url': {
- 'type': 'string',
- 'default': OMNIBUS_URL,
- 'description': dedent("""\
- Omnibus URL if chef should be installed through
- Omnibus. By default, it uses the
- ``{}``.""".format(OMNIBUS_URL))
- },
- 'omnibus_url_retries': {
- 'type': 'integer',
- 'default': OMNIBUS_URL_RETRIES,
- 'description': dedent("""\
- The number of retries that will be attempted to reach
- the Omnibus URL""")
- },
- 'omnibus_version': {
- 'type': 'string',
- 'description': dedent("""\
- Optional version string to require for omnibus
- install.""")
- },
- 'pid_file': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
- 'description': dedent("""\
- The location in which a process identification
- number (pid) is saved. By default, it saves
- in the ``{}`` location.""".format(
- CHEF_RB_TPL_DEFAULTS['pid_file']))
- },
- 'server_url': {
- 'type': 'string',
- 'description': 'The URL for the chef server'
- },
- 'show_time': {
- 'type': 'boolean',
- 'default': True,
- 'description': 'Show time in chef logs'
- },
- 'ssl_verify_mode': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
- 'description': dedent("""\
- Set the verify mode for HTTPS requests. We can have
- two possible values for this parameter:
-
- - ``:verify_none``: No validation of SSL \
- certificates.
- - ``:verify_peer``: Validate all SSL certificates.
-
- By default, the parameter is set as ``{}``.
- """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
- },
- 'validation_name': {
- 'type': 'string',
- 'description': dedent("""\
- The name of the chef-validator key that Chef Infra
- Client uses to access the Chef Infra Server during
- the initial Chef Infra Client run.""")
- },
- 'force_install': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
- If set to ``True``, forces chef installation, even
- if it is already installed.""")
- },
- 'initial_attributes': {
- 'type': 'object',
- 'items': {
- 'type': 'string'
- },
- 'description': dedent("""\
- Specify a list of initial attributes used by the
- cookbooks.""")
- },
- 'install_type': {
- 'type': 'string',
- 'default': 'packages',
- 'description': dedent("""\
- The type of installation for chef. It can be one of
- the following values:
-
- - ``packages``
- - ``gems``
- - ``omnibus``""")
- },
- 'run_list': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'description': 'A run list for a first boot json.'
- },
- "chef_license": {
- 'type': 'string',
- 'description': dedent("""\
- string that indicates if user accepts or not license
- related to some of chef products""")
- }
- }
- }
- }
+ validation_name: yourorg-validator"""
+ )
+ ],
+ "frequency": frequency,
}
-__doc__ = get_schema_doc(schema)
+__doc__ = get_meta_doc(meta)
def post_run_chef(chef_cfg, log):
- delete_pem = util.get_cfg_option_bool(chef_cfg,
- 'delete_validation_post_exec',
- default=False)
+ delete_pem = util.get_cfg_option_bool(
+ chef_cfg, "delete_validation_post_exec", default=False
+ )
if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
os.unlink(CHEF_VALIDATION_PEM_PATH)
@@ -386,16 +166,20 @@ def get_template_params(iid, chef_cfg, log):
else:
params[k] = util.get_cfg_option_str(chef_cfg, k)
# These ones are overwritten to be exact values...
- params.update({
- 'generated_by': util.make_header(),
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- default=iid),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- default='_default'),
- # These two are mandatory...
- 'server_url': chef_cfg['server_url'],
- 'validation_name': chef_cfg['validation_name'],
- })
+ params.update(
+ {
+ "generated_by": util.make_header(),
+ "node_name": util.get_cfg_option_str(
+ chef_cfg, "node_name", default=iid
+ ),
+ "environment": util.get_cfg_option_str(
+ chef_cfg, "environment", default="_default"
+ ),
+ # These two are mandatory...
+ "server_url": chef_cfg["server_url"],
+ "validation_name": chef_cfg["validation_name"],
+ }
+ )
return params
@@ -403,35 +187,37 @@ def handle(name, cfg, cloud, log, _args):
"""Handler method activated by cloud-init."""
# If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
+ if "chef" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'chef' key in configuration", name
+ )
return
- validate_cloudconfig_schema(cfg, schema)
- chef_cfg = cfg['chef']
+ chef_cfg = cfg["chef"]
# Ensure the chef directories we use exist
- chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
+ chef_dirs = util.get_cfg_option_list(chef_cfg, "directories")
if not chef_dirs:
chef_dirs = list(CHEF_DIRS)
for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
util.ensure_dir(d)
- vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
- vcert = chef_cfg.get('validation_cert')
+ vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH)
+ vcert = chef_cfg.get("validation_cert")
# special value 'system' means do not overwrite the file
# but still render the template to contain 'validation_key'
if vcert:
if vcert != "system":
util.write_file(vkey_path, vcert)
elif not os.path.isfile(vkey_path):
- log.warning("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
+ log.warning(
+ "chef validation_cert provided as 'system', but "
+ "validation_key path '%s' does not exist.",
+ vkey_path,
+ )
# Create the chef config from template
- template_fn = cloud.get_template_filename('chef_client.rb')
+ template_fn = cloud.get_template_filename("chef_client.rb")
if template_fn:
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
@@ -445,32 +231,33 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
- log.warning("No template found, not rendering to %s",
- CHEF_RB_PATH)
+ log.warning("No template found, not rendering to %s", CHEF_RB_PATH)
# Set the firstboot json
- fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
- default=CHEF_FB_PATH)
+ fb_filename = util.get_cfg_option_str(
+ chef_cfg, "firstboot_path", default=CHEF_FB_PATH
+ )
if not fb_filename:
log.info("First boot path empty, not writing first boot json file")
else:
initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
+ if "run_list" in chef_cfg:
+ initial_json["run_list"] = chef_cfg["run_list"]
+ if "initial_attributes" in chef_cfg:
+ initial_attributes = chef_cfg["initial_attributes"]
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
util.write_file(fb_filename, json.dumps(initial_json))
# Try to install chef, if its not already installed...
- force_install = util.get_cfg_option_bool(chef_cfg,
- 'force_install', default=False)
+ force_install = util.get_cfg_option_bool(
+ chef_cfg, "force_install", default=False
+ )
installed = subp.is_exe(CHEF_EXEC_PATH)
if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
elif installed:
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
else:
run = False
if run:
@@ -479,18 +266,21 @@ def handle(name, cfg, cloud, log, _args):
def run_chef(chef_cfg, log):
- log.debug('Running chef-client')
+ log.debug("Running chef-client")
cmd = [CHEF_EXEC_PATH]
- if 'exec_arguments' in chef_cfg:
- cmd_args = chef_cfg['exec_arguments']
+ if "exec_arguments" in chef_cfg:
+ cmd_args = chef_cfg["exec_arguments"]
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
- log.warning("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
+ log.warning(
+ "Unknown type %s provided for chef"
+ " 'exec_arguments' expected list, tuple,"
+ " or string",
+ type(cmd_args),
+ )
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
@@ -504,16 +294,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
The 'args' argument to subp will be updated with the full path to the
filename as the first argument.
"""
- basename = kwargs.pop('basename', "subp_blob")
+ basename = kwargs.pop("basename", "subp_blob")
- if len(args) == 0 and 'args' not in kwargs:
+ if len(args) == 0 and "args" not in kwargs:
args = [tuple()]
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
with temp_utils.tempdir(needs_exe=True) as tmpd:
tmpf = os.path.join(tmpd, basename)
- if 'args' in kwargs:
- kwargs['args'] = [tmpf] + list(kwargs['args'])
+ if "args" in kwargs:
+ kwargs["args"] = [tmpf] + list(kwargs["args"])
else:
args = list(args)
args[0] = [tmpf] + args[0]
@@ -540,36 +330,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
if omnibus_version is None:
args = []
else:
- args = ['-v', omnibus_version]
+ args = ["-v", omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
return subp_blob_in_tempfile(
- blob=content, args=args,
- basename='chef-omnibus-install', capture=False)
+ blob=content, args=args, basename="chef-omnibus-install", capture=False
+ )
def install_chef(cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
+ install_type = util.get_cfg_option_str(
+ chef_cfg, "install_type", "packages"
+ )
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
if install_type == "gems":
# This will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- RUBY_VERSION_DEFAULT)
+ chef_version = util.get_cfg_option_str(chef_cfg, "version", None)
+ ruby_version = util.get_cfg_option_str(
+ chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT
+ )
install_chef_from_gems(ruby_version, chef_version, cloud.distro)
# Retain backwards compat, by preferring True instead of False
# when not provided/overriden...
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
- elif install_type == 'packages':
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=True)
+ elif install_type == "packages":
# This will install and run the chef-client from packages
- cloud.distro.install_packages(('chef',))
- elif install_type == 'omnibus':
+ cloud.distro.install_packages(("chef",))
+ elif install_type == "omnibus":
omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
install_chef_from_omnibus(
url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
- omnibus_version=omnibus_version)
+ omnibus_version=omnibus_version,
+ )
else:
log.warning("Unknown chef install type '%s'", install_type)
run = False
@@ -578,25 +371,47 @@ def install_chef(cloud, chef_cfg, log):
def get_ruby_packages(version):
# return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
+ pkgs = ["ruby%s" % version, "ruby%s-dev" % version]
if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
+ pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8"))
return pkgs
def install_chef_from_gems(ruby_version, chef_version, distro):
distro.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
+ if not os.path.exists("/usr/bin/gem"):
+ util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem")
+ if not os.path.exists("/usr/bin/ruby"):
+ util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby")
if chef_version:
- subp.subp(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
+ subp.subp(
+ [
+ "/usr/bin/gem",
+ "install",
+ "chef",
+ "-v %s" % chef_version,
+ "--no-ri",
+ "--no-rdoc",
+ "--bindir",
+ "/usr/bin",
+ "-q",
+ ],
+ capture=False,
+ )
else:
- subp.subp(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'], capture=False)
+ subp.subp(
+ [
+ "/usr/bin/gem",
+ "install",
+ "chef",
+ "--no-ri",
+ "--no-rdoc",
+ "--bindir",
+ "/usr/bin",
+ "-q",
+ ],
+ capture=False,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 4d5a6aa2..c51818c3 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -2,46 +2,54 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Debug
------
-**Summary:** helper to debug cloud-init *internal* datastructures.
+"""Debug: Helper to debug cloud-init *internal* datastructures."""
+
+import copy
+from io import StringIO
+from textwrap import dedent
+
+from cloudinit import safeyaml, type_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+SKIP_KEYS = frozenset(["log_cfgs"])
+
+MODULE_DESCRIPTION = """\
This module will enable for outputting various internal information that
cloud-init sources provide to either a file or to the output console/log
location that this cloud-init has been configured with when running.
.. note::
Log configurations are not output.
-
-**Internal name:** ``cc_debug``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- debug:
- verbose: true/false (defaulting to true)
- output: (location to write output, defaulting to console + log)
"""
-import copy
-from io import StringIO
-
-from cloudinit import type_utils
-from cloudinit import util
-from cloudinit import safeyaml
-
-SKIP_KEYS = frozenset(['log_cfgs'])
+meta: MetaSchema = {
+ "id": "cc_debug",
+ "name": "Debug",
+ "title": "Helper to debug cloud-init *internal* datastructures",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ debug:
+ verbose: true
+ output: /tmp/my_debug.log
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def _make_header(text):
header = StringIO()
header.write("-" * 80)
header.write("\n")
- header.write(text.center(80, ' '))
+ header.write(text.center(80, " "))
header.write("\n")
header.write("-" * 80)
header.write("\n")
@@ -55,18 +63,16 @@ def _dumps(obj):
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
-
- verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
+ verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True)
if args:
# if args are provided (from cmdline) then explicitly set verbose
out_file = args[0]
verbose = True
else:
- out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
+ out_file = util.get_cfg_by_path(cfg, ("debug", "output"))
if not verbose:
- log.debug(("Skipping module named %s,"
- " verbose printing disabled"), name)
+ log.debug("Skipping module named %s, verbose printing disabled", name)
return
# Clean out some keys that we just don't care about showing...
dump_cfg = copy.deepcopy(cfg)
@@ -85,8 +91,9 @@ def handle(name, cfg, cloud, log, args):
to_print.write(_dumps(cloud.datasource.metadata))
to_print.write("\n")
to_print.write(_make_header("Misc"))
- to_print.write("Datasource: %s\n" %
- (type_utils.obj_name(cloud.datasource)))
+ to_print.write(
+ "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))
+ )
to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
@@ -102,4 +109,5 @@ def handle(name, cfg, cloud, log, args):
else:
util.multi_log("".join(content_to_file), console=True, stderr=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index dff93245..88cc28e2 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -6,52 +6,56 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Disable EC2 Metadata
---------------------
-**Summary:** disable aws ec2 metadata
+"""Disable EC2 Metadata: Disable AWS EC2 metadata."""
-This module can disable the ec2 datasource by rejecting the route to
-``169.254.169.254``, the usual route to the datasource. This module is disabled
-by default.
-
-**Internal name:** ``cc_disable_ec2_metadata``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- disable_ec2_metadata: <true/false>
-"""
-
-from cloudinit import subp
-from cloudinit import util
+from textwrap import dedent
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
-frequency = PER_ALWAYS
+REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"]
+REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"]
-REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
-REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
+meta: MetaSchema = {
+ "id": "cc_disable_ec2_metadata",
+ "name": "Disable EC2 Metadata",
+ "title": "Disable AWS EC2 Metadata",
+ "description": dedent(
+ """\
+ This module can disable the ec2 datasource by rejecting the route to
+ ``169.254.169.254``, the usual route to the datasource. This module
+ is disabled by default."""
+ ),
+ "distros": [ALL_DISTROS],
+ "frequency": PER_ALWAYS,
+ "examples": ["disable_ec2_metadata: true"],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if subp.which('ip'):
+ if subp.which("ip"):
reject_cmd = REJECT_CMD_IP
- elif subp.which('ifconfig'):
+ elif subp.which("ifconfig"):
reject_cmd = REJECT_CMD_IF
else:
- log.error(('Neither "route" nor "ip" command found, unable to '
- 'manipulate routing table'))
+ log.error(
+ 'Neither "route" nor "ip" command found, unable to '
+ "manipulate routing table"
+ )
return
subp.subp(reject_cmd, capture=False)
else:
- log.debug(("Skipping module named %s,"
- " disabling the ec2 route not enabled"), name)
+ log.debug(
+ "Skipping module named %s, disabling the ec2 route not enabled",
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d1200694..ee05ea87 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -5,11 +5,31 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Disk Setup
-----------
-**Summary:** configure partitions and filesystems
+"""Disk Setup: Configure partitions and filesystems."""
+
+import logging
+import os
+import shlex
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+# Define the commands to use
+SFDISK_CMD = subp.which("sfdisk")
+SGDISK_CMD = subp.which("sgdisk")
+LSBLK_CMD = subp.which("lsblk")
+BLKID_CMD = subp.which("blkid")
+BLKDEV_CMD = subp.which("blockdev")
+PARTPROBE_CMD = subp.which("partprobe")
+WIPEFS_CMD = subp.which("wipefs")
+LANG_C_ENV = {"LANG": "C"}
+LOG = logging.getLogger(__name__)
+
+MODULE_DESCRIPTION = """\
This module is able to configure simple partition tables and filesystems.
.. note::
@@ -25,99 +45,45 @@ will refer to the block device of the ephemeral image.
Disk partitioning is done using the ``disk_setup`` directive. This config
directive accepts a dictionary where each key is either a path to a block
device or an alias specified in ``device_aliases``, and each value is the
-configuration options for the device. The ``table_type`` option specifies the
-partition table type, either ``mbr`` or ``gpt``. The ``layout`` option
-specifies how partitions on the device are to be arranged. If ``layout`` is set
-to ``true``, a single partition using all the space on the device will be
-created. If set to ``false``, no partitions will be created. Partitions can be
-specified by providing a list to ``layout``, where each entry in the list is
-either a size or a list containing a size and the numerical value for a
-partition type. The size for partitions is specified in **percentage** of disk
-space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space).
-The ``overwrite`` option controls whether this module tries to be safe about
-writing partition tables or not. If ``overwrite: false`` is set, the device
-will be checked for a partition table and for a file system and if either is
-found, the operation will be skipped. If ``overwrite: true`` is set, no checks
-will be performed.
-
-.. note::
- Using ``overwrite: true`` is dangerous and can lead to data loss, so double
- check that the correct device has been specified if using this option.
-
-File system configuration is done using the ``fs_setup`` directive. This config
-directive accepts a list of filesystem configs. The device to create the
-filesystem on may be specified either as a path or as an alias in the format
-``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device.
-The partition can also be specified by setting ``partition`` to the desired
-partition number. The ``partition`` option may also be set to ``auto``, in
-which this module will search for the existance of a filesystem matching the
-``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip
-creating the filesystem if one is found. The ``partition`` option may also be
-set to ``any``, in which case any file system that matches ``type`` and
-``device`` will cause this module to skip filesystem creation for the
-``fs_setup`` entry, regardless of ``label`` matching or not. To write a
-filesystem directly to a device, use ``partition: none``. A label can be
-specified for the filesystem using ``label``, and the filesystem type can be
-specified using ``filesystem``.
-
-.. note::
- If specifying device using the ``<device name>.<partition number>`` format,
- the value of ``partition`` will be overwritten.
-
-.. note::
- Using ``overwrite: true`` for filesystems is dangerous and can lead to data
- loss, so double check the entry in ``fs_setup``.
-
-.. note::
- ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``.
-
-**Internal name:** ``cc_disk_setup``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- device_aliases:
- <alias name>: <device path>
- disk_setup:
- <alias name/path>:
- table_type: <'mbr'/'gpt'>
- layout:
- - [33,82]
- - 66
- overwrite: <true/false>
- fs_setup:
- - label: <label>
- filesystem: <filesystem type>
- device: <device>
- partition: <"auto"/"any"/"none"/<partition number>>
- overwrite: <true/false>
- replace_fs: <filesystem type>
+configuration options for the device. File system configuration is done using
+the ``fs_setup`` directive. This config directive accepts a list of
+filesystem configs.
"""
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-from cloudinit import subp
-import logging
-import os
-import shlex
-
-frequency = PER_INSTANCE
-
-# Define the commands to use
-UDEVADM_CMD = subp.which('udevadm')
-SFDISK_CMD = subp.which("sfdisk")
-SGDISK_CMD = subp.which("sgdisk")
-LSBLK_CMD = subp.which("lsblk")
-BLKID_CMD = subp.which("blkid")
-BLKDEV_CMD = subp.which("blockdev")
-WIPEFS_CMD = subp.which("wipefs")
-
-LANG_C_ENV = {'LANG': 'C'}
-
-LOG = logging.getLogger(__name__)
+meta: MetaSchema = {
+ "id": "cc_disk_setup",
+ "name": "Disk Setup",
+ "title": "Configure partitions and filesystems",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ device_aliases:
+ my_alias: /dev/sdb
+ disk_setup:
+ my_alias:
+ table_type: gpt
+ layout: [50, 50]
+ overwrite: true
+ fs_setup:
+ - label: fs1
+ filesystem: ext4
+ device: my_alias.1
+ cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
+ - label: fs2
+ device: my_alias.2
+ filesystem: ext4
+ mounts:
+ - ["my_alias.1", "/mnt1"]
+ - ["my_alias.2", "/mnt2"]
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(_name, cfg, cloud, log, _args):
@@ -125,9 +91,15 @@ def handle(_name, cfg, cloud, log, _args):
See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
"""
+ device_aliases = cfg.get("device_aliases", {})
+
+ def alias_to_device(cand):
+ name = device_aliases.get(cand)
+ return cloud.device_name_to_device(name or cand) or name
+
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
+ update_disk_setup_devices(disk_setup, alias_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
@@ -136,16 +108,19 @@ def handle(_name, cfg, cloud, log, _args):
try:
log.debug("Creating new partition table/disk")
- util.log_time(logfunc=LOG.debug,
- msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, definition))
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Creating partition on %s" % disk,
+ func=mkpart,
+ args=(disk, definition),
+ )
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
+ update_fs_setup_devices(fs_setup, alias_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warning("Invalid file system definition: %s" % definition)
@@ -153,10 +128,13 @@ def handle(_name, cfg, cloud, log, _args):
try:
log.debug("Creating new filesystem.")
- device = definition.get('device')
- util.log_time(logfunc=LOG.debug,
- msg="Creating fs for %s" % device,
- func=mkfs, args=(definition,))
+ device = definition.get("device")
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Creating fs for %s" % device,
+ func=mkfs,
+ args=(definition,),
+ )
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
@@ -169,15 +147,22 @@ def update_disk_setup_devices(disk_setup, tformer):
if transformed is None or transformed == origname:
continue
if transformed in disk_setup:
- LOG.info("Replacing %s in disk_setup for translation of %s",
- origname, transformed)
+ LOG.info(
+ "Replacing %s in disk_setup for translation of %s",
+ origname,
+ transformed,
+ )
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
+ if isinstance(disk_setup[transformed], dict):
+ disk_setup[transformed]["_origname"] = origname
del disk_setup[origname]
- LOG.debug("updated disk_setup device entry '%s' to '%s'",
- origname, transformed)
+ LOG.debug(
+ "updated disk_setup device entry '%s' to '%s'",
+ origname,
+ transformed,
+ )
def update_fs_setup_devices(disk_setup, tformer):
@@ -188,7 +173,7 @@ def update_fs_setup_devices(disk_setup, tformer):
LOG.warning("entry in disk_setup not a dict: %s", definition)
continue
- origname = definition.get('device')
+ origname = definition.get("device")
if origname is None:
continue
@@ -198,19 +183,24 @@ def update_fs_setup_devices(disk_setup, tformer):
tformed = tformer(dev)
if tformed is not None:
dev = tformed
- LOG.debug("%s is mapped to disk=%s part=%s",
- origname, tformed, part)
- definition['_origname'] = origname
- definition['device'] = tformed
+ LOG.debug(
+ "%s is mapped to disk=%s part=%s", origname, tformed, part
+ )
+ definition["_origname"] = origname
+ definition["device"] = tformed
if part:
# In origname with <dev>.N, N overrides 'partition' key.
- if 'partition' in definition:
- LOG.warning("Partition '%s' from dotted device name '%s' "
- "overrides 'partition' key in %s", part, origname,
- definition)
- definition['_partition'] = definition['partition']
- definition['partition'] = part
+ if "partition" in definition:
+ LOG.warning(
+ "Partition '%s' from dotted device name '%s' "
+ "overrides 'partition' key in %s",
+ part,
+ origname,
+ definition,
+ )
+ definition["_partition"] = definition["partition"]
+ definition["partition"] = part
def value_splitter(values, start=None):
@@ -222,7 +212,7 @@ def value_splitter(values, start=None):
if start:
_values = _values[start:]
- for key, value in [x.split('=') for x in _values]:
+ for key, value in [x.split("=") for x in _values]:
yield key, value
@@ -241,11 +231,16 @@ def enumerate_disk(device, nodeps=False):
name: the device name, i.e. sda
"""
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
- device]
+ lsblk_cmd = [
+ LSBLK_CMD,
+ "--pairs",
+ "--output",
+ "NAME,TYPE,FSTYPE,LABEL",
+ device,
+ ]
if nodeps:
- lsblk_cmd.append('--nodeps')
+ lsblk_cmd.append("--nodeps")
info = None
try:
@@ -259,10 +254,10 @@ def enumerate_disk(device, nodeps=False):
for part in parts:
d = {
- 'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
+ "name": None,
+ "type": None,
+ "fstype": None,
+ "label": None,
}
for key, value in value_splitter(part):
@@ -293,9 +288,9 @@ def is_device_valid(name, partition=False):
LOG.warning("Query against device %s failed", name)
return False
- if partition and d_type == 'part':
+ if partition and d_type == "part":
return True
- elif not partition and d_type == 'disk':
+ elif not partition and d_type == "disk":
return True
return False
@@ -311,7 +306,7 @@ def check_fs(device):
"""
out, label, fs_type, uuid = None, None, None, None
- blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
+ blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device]
try:
out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
@@ -322,11 +317,11 @@ def check_fs(device):
if out:
if len(out.splitlines()) == 1:
for key, value in value_splitter(out, start=1):
- if key.lower() == 'label':
+ if key.lower() == "label":
label = value
- elif key.lower() == 'type':
+ elif key.lower() == "type":
fs_type = value
- elif key.lower() == 'uuid':
+ elif key.lower() == "uuid":
uuid = value
return label, fs_type, uuid
@@ -340,8 +335,14 @@ def is_filesystem(device):
return fs_type
-def find_device_node(device, fs_type=None, label=None, valid_targets=None,
- label_match=True, replace_fs=None):
+def find_device_node(
+ device,
+ fs_type=None,
+ label=None,
+ valid_targets=None,
+ label_match=True,
+ replace_fs=None,
+):
"""
Find a device that is either matches the spec, or the first
@@ -356,31 +357,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
label = ""
if not valid_targets:
- valid_targets = ['disk', 'part']
+ valid_targets = ["disk", "part"]
raw_device_used = False
for d in enumerate_disk(device):
- if d['fstype'] == replace_fs and label_match is False:
+ if d["fstype"] == replace_fs and label_match is False:
# We found a device where we want to replace the FS
- return ('/dev/%s' % d['name'], False)
+ return ("/dev/%s" % d["name"], False)
- if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
+ if d["fstype"] == fs_type and (
+ (label_match and d["label"] == label) or not label_match
+ ):
# If we find a matching device, we return that
- return ('/dev/%s' % d['name'], True)
+ return ("/dev/%s" % d["name"], True)
- if d['type'] in valid_targets:
+ if d["type"] in valid_targets:
- if d['type'] != 'disk' or d['fstype']:
+ if d["type"] != "disk" or d["fstype"]:
raw_device_used = True
- if d['type'] == 'disk':
+ if d["type"] == "disk":
# Skip the raw disk, its the default
pass
- elif not d['fstype']:
- return ('/dev/%s' % d['name'], False)
+ elif not d["fstype"]:
+ return ("/dev/%s" % d["name"], False)
if not raw_device_used:
return (device, False)
@@ -423,7 +425,7 @@ def get_dyn_func(*args):
if len(args) < 2:
raise Exception("Unable to determine dynamic funcation name")
- func_name = (args[0] % args[1])
+ func_name = args[0] % args[1]
func_args = args[2:]
try:
@@ -438,8 +440,8 @@ def get_dyn_func(*args):
def get_hdd_size(device):
try:
- size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device])
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e)) from e
@@ -471,13 +473,13 @@ def check_partition_mbr_layout(device, layout):
if device in _line[0]:
# We don't understand extended partitions yet
- if _line[-1].lower() in ['extended', 'empty']:
+ if _line[-1].lower() in ["extended", "empty"]:
continue
# Find the partition types
type_label = None
for x in sorted(range(1, len(_line)), reverse=True):
- if _line[x].isdigit() and _line[x] != '/':
+ if _line[x].isdigit() and _line[x] != "/":
type_label = _line[x]
break
@@ -486,7 +488,7 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
- prt_cmd = [SGDISK_CMD, '-p', device]
+ prt_cmd = [SGDISK_CMD, "-p", device]
try:
out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
@@ -512,7 +514,7 @@ def check_partition_gpt_layout(device, layout):
# Number Start (sector) End (sector) Size Code Name
# 1 2048 206847 100.0 MiB 0700 Microsoft basic data
for line in out_lines:
- if line.strip().startswith('Number'):
+ if line.strip().startswith("Number"):
break
codes = [line.strip().split()[5] for line in out_lines]
@@ -535,10 +537,16 @@ def check_partition_layout(table_type, device, layout):
function called check_partition_%s_layout
"""
found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout)
-
- LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s",
- table_type, device, layout, found_layout)
+ "check_partition_%s_layout", table_type, device, layout
+ )
+
+ LOG.debug(
+ "called check_partition_%s_layout(%s, %s), returned: %s",
+ table_type,
+ device,
+ layout,
+ found_layout,
+ )
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
# if a single partition exists.
@@ -549,10 +557,12 @@ def check_partition_layout(table_type, device, layout):
elif len(found_layout) == len(layout):
# This just makes sure that the number of requested
# partitions and the type labels are right
- layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None
- for x in layout]
- LOG.debug("Layout types=%s. Found types=%s",
- layout_types, found_layout)
+ layout_types = [
+ str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout
+ ]
+ LOG.debug(
+ "Layout types=%s. Found types=%s", layout_types, found_layout
+ )
for itype, ftype in zip(layout_types, found_layout):
if itype is not None and str(ftype) != str(itype):
return False
@@ -578,8 +588,9 @@ def get_partition_mbr_layout(size, layout):
# Create a single partition
return "0,"
- if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
+ if (len(layout) == 0 and isinstance(layout, list)) or not isinstance(
+ layout, list
+ ):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
@@ -607,8 +618,10 @@ def get_partition_mbr_layout(size, layout):
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
- raise Exception("Calculated partition definition is too big\n%s" %
- sfdisk_definition)
+ raise Exception(
+ "Calculated partition definition is too big\n%s"
+ % sfdisk_definition
+ )
return sfdisk_definition
@@ -622,14 +635,15 @@ def get_partition_gpt_layout(size, layout):
if isinstance(partition, list):
if len(partition) != 2:
raise Exception(
- "Partition was incorrectly defined: %s" % partition)
+ "Partition was incorrectly defined: %s" % partition
+ )
percent, partition_type = partition
else:
percent = partition
partition_type = None
part_size = int(float(size) * (float(percent) / 100))
- partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
+ partition_specs.append((partition_type, [0, "+{}".format(part_size)]))
# The last partition should use up all remaining space
partition_specs[-1][-1][-1] = 0
@@ -639,7 +653,7 @@ def get_partition_gpt_layout(size, layout):
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
- null = '\0'
+ null = "\0"
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
@@ -658,14 +672,14 @@ def purge_disk(device):
# wipe any file systems first
for d in enumerate_disk(device):
- if d['type'] not in ["disk", "crypt"]:
- wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
+ if d["type"] not in ["disk", "crypt"]:
+ wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]]
try:
- LOG.info("Purging filesystem on /dev/%s", d['name'])
+ LOG.info("Purging filesystem on /dev/%s", d["name"])
subp.subp(wipefs_cmd)
except Exception as e:
raise Exception(
- "Failed FS purge of /dev/%s" % d['name']
+ "Failed FS purge of /dev/%s" % d["name"]
) from e
purge_disk_ptable(device)
@@ -685,13 +699,16 @@ def get_partition_layout(table_type, size, layout):
def read_parttbl(device):
"""
- Use partprobe instead of 'udevadm'. Partprobe is the only
- reliable way to probe the partition table.
+ `Partprobe` is preferred over `blkdev` since it is more reliably
+ able to probe the partition table.
"""
- blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
+ if PARTPROBE_CMD is not None:
+ probe_cmd = [PARTPROBE_CMD, device]
+ else:
+ probe_cmd = [BLKDEV_CMD, "--rereadpt", device]
util.udevadm_settle()
try:
- subp.subp(blkdev_cmd)
+ subp.subp(probe_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
@@ -717,17 +734,24 @@ def exec_mkpart_mbr(device, layout):
def exec_mkpart_gpt(device, layout):
try:
- subp.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, "-Z", device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- subp.subp([SGDISK_CMD,
- '-n', '{}:{}:{}'.format(index, start, end), device])
+ subp.subp(
+ [
+ SGDISK_CMD,
+ "-n",
+ "{}:{}:{}".format(index, start, end),
+ device,
+ ]
+ )
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
subp.subp(
- [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
+ [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device]
+ )
except Exception:
LOG.warning("Failed to partition device %s", device)
raise
@@ -753,8 +777,10 @@ def assert_and_settle_device(device):
if not os.path.exists(device):
util.udevadm_settle()
if not os.path.exists(device):
- raise RuntimeError("Device %s did not exist and was not created "
- "with a udevadm settle." % device)
+ raise RuntimeError(
+ "Device %s did not exist and was not created "
+ "with a udevadm settle." % device
+ )
# Whether or not the device existed above, it is possible that udev
# events that would populate udev database (for reading by lsdname) have
@@ -781,9 +807,9 @@ def mkpart(device, definition):
device = os.path.realpath(device)
LOG.debug("Checking values for %s definition", device)
- overwrite = definition.get('overwrite', False)
- layout = definition.get('layout', False)
- table_type = definition.get('table_type', 'mbr')
+ overwrite = definition.get("overwrite", False)
+ layout = definition.get("layout", False)
+ table_type = definition.get("table_type", "mbr")
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
@@ -796,7 +822,8 @@ def mkpart(device, definition):
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
raise Exception(
- 'Device {device} is not a disk device!'.format(device=device))
+ "Device {device} is not a disk device!".format(device=device)
+ )
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
@@ -832,21 +859,21 @@ def lookup_force_flag(fs):
A force flag might be -F or -F, this look it up
"""
flags = {
- 'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- 'swap': '-f',
+ "ext": "-F",
+ "btrfs": "-f",
+ "xfs": "-f",
+ "reiserfs": "-f",
+ "swap": "-f",
}
- if 'ext' in fs.lower():
- fs = 'ext'
+ if "ext" in fs.lower():
+ fs = "ext"
if fs.lower() in flags:
return flags[fs]
LOG.warning("Force flag for %s is unknown.", fs)
- return ''
+ return ""
def mkfs(fs_cfg):
@@ -870,14 +897,14 @@ def mkfs(fs_cfg):
When 'cmd' is provided then no other parameter is required.
"""
- label = fs_cfg.get('label')
- device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition', 'any'))
- fs_type = fs_cfg.get('filesystem')
- fs_cmd = fs_cfg.get('cmd', [])
- fs_opts = fs_cfg.get('extra_opts', [])
- fs_replace = fs_cfg.get('replace_fs', False)
- overwrite = fs_cfg.get('overwrite', False)
+ label = fs_cfg.get("label")
+ device = fs_cfg.get("device")
+ partition = str(fs_cfg.get("partition", "any"))
+ fs_type = fs_cfg.get("filesystem")
+ fs_cmd = fs_cfg.get("cmd", [])
+ fs_opts = fs_cfg.get("extra_opts", [])
+ fs_replace = fs_cfg.get("replace_fs", False)
+ overwrite = fs_cfg.get("overwrite", False)
# ensure that we get a real device rather than a symbolic link
assert_and_settle_device(device)
@@ -890,14 +917,19 @@ def mkfs(fs_cfg):
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s",
- partition, device)
+ LOG.debug(
+ "Manual request of partition %s for %s", partition, device
+ )
# Check to see if the fs already exists
LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device '%s' has check_label='%s' check_fstype=%s",
- device, check_label, check_fstype)
+ LOG.debug(
+ "Device '%s' has check_label='%s' check_fstype=%s",
+ device,
+ check_label,
+ check_fstype,
+ )
if check_label == label and check_fstype == fs_type:
LOG.debug("Existing file system found at %s", device)
@@ -911,19 +943,23 @@ def mkfs(fs_cfg):
else:
LOG.debug("Device %s is cleared for formating", device)
- elif partition and str(partition).lower() in ('auto', 'any'):
+ elif partition and str(partition).lower() in ("auto", "any"):
# For auto devices, we match if the filesystem does exist
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
- if partition.lower() == 'any':
+ if partition.lower() == "any":
label_match = False
- device, reuse = find_device_node(device, fs_type=fs_type, label=label,
- label_match=label_match,
- replace_fs=fs_replace)
+ device, reuse = find_device_node(
+ device,
+ fs_type=fs_type,
+ label=label,
+ label_match=label_match,
+ replace_fs=fs_replace,
+ )
LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
@@ -934,18 +970,25 @@ def mkfs(fs_cfg):
LOG.debug("Replacing file system on %s as instructed.", device)
if not device:
- LOG.debug("No device aviable that matches request. "
- "Skipping fs creation for %s", fs_cfg)
+ LOG.debug(
+ "No device available that matches request. "
+ "Skipping fs creation for %s",
+ fs_cfg,
+ )
return
- elif not partition or str(partition).lower() == 'none':
+ elif not partition or str(partition).lower() == "none":
LOG.debug("Using the raw device to place filesystem %s on", label)
else:
LOG.debug("Error in device identification handling.")
return
- LOG.debug("File system type '%s' with label '%s' will be created on %s",
- fs_type, label, device)
+ LOG.debug(
+ "File system type '%s' with label '%s' will be created on %s",
+ fs_type,
+ label,
+ device,
+ )
# Make sure the device is defined
if not device:
@@ -956,26 +999,29 @@ def mkfs(fs_cfg):
if not (fs_type or fs_cmd):
raise Exception(
"No way to create filesystem '{label}'. fs_type or fs_cmd "
- "must be set.".format(label=label))
+ "must be set.".format(label=label)
+ )
# Create the commands
shell = False
if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {
- 'label': label,
- 'filesystem': fs_type,
- 'device': device,
+ fs_cmd = fs_cfg["cmd"] % {
+ "label": label,
+ "filesystem": fs_type,
+ "device": device,
}
shell = True
if overwrite:
LOG.warning(
"fs_setup:overwrite ignored because cmd was specified: %s",
- fs_cmd)
+ fs_cmd,
+ )
if fs_opts:
LOG.warning(
"fs_setup:extra_opts ignored because cmd was specified: %s",
- fs_cmd)
+ fs_cmd,
+ )
else:
# Find the mkfs command
mkfs_cmd = subp.which("mkfs.%s" % fs_type)
@@ -983,8 +1029,11 @@ def mkfs(fs_cfg):
mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
- LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
- fs_type, fs_type)
+ LOG.warning(
+ "Cannot create fstype '%s'. No mkfs.%s command",
+ fs_type,
+ fs_type,
+ )
return
fs_cmd = [mkfs_cmd, device]
@@ -1009,4 +1058,5 @@ def mkfs(fs_cfg):
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b1d99f97..a928082b 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -16,7 +16,7 @@ user configuration should be required.
**Internal name:** ``cc_emit_upstart``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** ubuntu, debian
"""
@@ -24,12 +24,12 @@ user configuration should be required.
import os
from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
from cloudinit import subp
+from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
LOG = logging.getLogger(__name__)
@@ -39,15 +39,18 @@ def is_upstart_system():
return False
myenv = os.environ.copy()
- if 'UPSTART_SESSION' in myenv:
- del myenv['UPSTART_SESSION']
- check_cmd = ['initctl', 'version']
+ if "UPSTART_SESSION" in myenv:
+ del myenv["UPSTART_SESSION"]
+ check_cmd = ["initctl", "version"]
try:
(out, _err) = subp.subp(check_cmd, env=myenv)
- return 'upstart' in out
+ return "upstart" in out
except subp.ProcessExecutionError as e:
- LOG.debug("'%s' returned '%s', not using upstart",
- ' '.join(check_cmd), e.exit_code)
+ LOG.debug(
+ "'%s' returned '%s', not using upstart",
+ " ".join(check_cmd),
+ e.exit_code,
+ )
return False
@@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args):
if not event_names:
# Default to the 'cloud-config'
# event for backwards compat.
- event_names = ['cloud-config']
+ event_names = ["cloud-config"]
if not is_upstart_system():
log.debug("not upstart system, '%s' disabled", name)
@@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args):
cfgpath = cloud.paths.get_ipath_cur("cloud_config")
for n in event_names:
- cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
+ cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath]
try:
subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 77984bca..50a81744 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -38,68 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
"""
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
BUILTIN_CFG = {
- 'config': None,
- 'config_path': '/etc/network/fan',
+ "config": None,
+ "config_path": "/etc/network/fan",
}
-def stop_update_start(service, config_file, content, systemd=False):
- if systemd:
- cmds = {'stop': ['systemctl', 'stop', service],
- 'start': ['systemctl', 'start', service],
- 'enable': ['systemctl', 'enable', service]}
- else:
- cmds = {'stop': ['service', 'stop'],
- 'start': ['service', 'start']}
-
- def run(cmd, msg):
- try:
- return subp.subp(cmd, capture=True)
- except subp.ProcessExecutionError as e:
- LOG.warning("failed: %s (%s): %s", service, cmd, e)
- return False
-
- stop_failed = not run(cmds['stop'], msg='stop %s' % service)
- if not content.endswith('\n'):
- content += '\n'
- util.write_file(config_file, content, omode="w")
+def stop_update_start(distro, service, config_file, content):
+ try:
+ distro.manage_service("stop", service)
+ stop_failed = False
+ except subp.ProcessExecutionError as e:
+ stop_failed = True
+ LOG.warning("failed to stop %s: %s", service, e)
- ret = run(cmds['start'], msg='start %s' % service)
- if ret and stop_failed:
- LOG.warning("success: %s started", service)
+ if not content.endswith("\n"):
+ content += "\n"
+ util.write_file(config_file, content, omode="w")
- if 'enable' in cmds:
- ret = run(cmds['enable'], msg='enable %s' % service)
+ try:
+ distro.manage_service("start", service)
+ if stop_failed:
+ LOG.warning("success: %s started", service)
+ except subp.ProcessExecutionError as e:
+ LOG.warning("failed to start %s: %s", service, e)
- return ret
+ distro.manage_service("enable", service)
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('fan')
+ cfgin = cfg.get("fan")
if not cfgin:
cfgin = {}
mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
- if not mycfg.get('config'):
+ if not mycfg.get("config"):
LOG.debug("%s: no 'fan' config entry. disabling", name)
return
- util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
+ util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w")
distro = cloud.distro
- if not subp.which('fanctl'):
- distro.install_packages(['ubuntu-fan'])
+ if not subp.which("fanctl"):
+ distro.install_packages(["ubuntu-fan"])
stop_update_start(
- service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'), systemd=distro.uses_systemd())
+ distro,
+ service="ubuntu-fan",
+ config_file=mycfg.get("config_path"),
+ content=mycfg.get("config"),
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index 3441f7a9..f443ccd8 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -21,7 +21,7 @@ specified as a jinja template with the following variables set:
**Internal name:** ``cc_final_message``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -31,10 +31,7 @@ specified as a jinja template with the following variables set:
"""
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
+from cloudinit import templater, util, version
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
@@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = (
def handle(_name, cfg, cloud, log, args):
- msg_in = ''
+ msg_in = ""
if len(args) != 0:
msg_in = str(args[0])
else:
@@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args):
cver = version.version_string()
try:
subs = {
- 'uptime': uptime,
- 'timestamp': ts,
- 'version': cver,
- 'datasource': str(cloud.datasource),
+ "uptime": uptime,
+ "timestamp": ts,
+ "version": cver,
+ "datasource": str(cloud.datasource),
}
subs.update(dict([(k.upper(), v) for k, v in subs.items()]))
- util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True, log=log)
+ util.multi_log(
+ "%s\n" % (templater.render_string(msg_in, subs)),
+ console=False,
+ stderr=True,
+ log=log,
+ )
except Exception:
util.logexc(log, "Failed to render final message template")
@@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args):
if cloud.datasource.is_disconnected:
log.warning("Used fallback datasource")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
index 924b967c..3c307153 100644
--- a/cloudinit/config/cc_foo.py
+++ b/cloudinit/config/cc_foo.py
@@ -53,4 +53,5 @@ frequency = PER_INSTANCE
def handle(name, _cfg, _cloud, log, _args):
log.debug("Hi from module %s", name)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 9f338ad1..43334caa 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -50,7 +50,7 @@ growpart is::
**Internal name:** ``cc_growpart``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -70,16 +70,15 @@ import re
import stat
from cloudinit import log as logging
+from cloudinit import subp, temp_utils, util
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
frequency = PER_ALWAYS
DEFAULT_CONFIG = {
- 'mode': 'auto',
- 'devices': ['/'],
- 'ignore_growroot_disabled': False,
+ "mode": "auto",
+ "devices": ["/"],
+ "ignore_growroot_disabled": False,
}
@@ -130,7 +129,7 @@ class ResizeFailedException(Exception):
class ResizeGrowPart(object):
def available(self):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
try:
(out, _err) = subp.subp(["growpart", "--help"], env=myenv)
@@ -142,21 +141,37 @@ class ResizeGrowPart(object):
return False
def resize(self, diskdev, partnum, partdev):
+ myenv = os.environ.copy()
+ myenv["LANG"] = "C"
before = get_size(partdev)
- try:
- subp.subp(["growpart", '--dry-run', diskdev, partnum])
- except subp.ProcessExecutionError as e:
- if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
- raise ResizeFailedException(e) from e
- return (before, before)
- try:
- subp.subp(["growpart", diskdev, partnum])
- except subp.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e) from e
+ # growpart uses tmp dir to store intermediate states
+ # and may conflict with systemd-tmpfiles-clean
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ growpart_tmp = os.path.join(tmpd, "growpart")
+ if not os.path.exists(growpart_tmp):
+ os.mkdir(growpart_tmp, 0o700)
+ myenv["TMPDIR"] = growpart_tmp
+ try:
+ subp.subp(
+ ["growpart", "--dry-run", diskdev, partnum], env=myenv
+ )
+ except subp.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ util.logexc(
+ LOG,
+ "Failed growpart --dry-run for (%s, %s)",
+ diskdev,
+ partnum,
+ )
+ raise ResizeFailedException(e) from e
+ return (before, before)
+
+ try:
+ subp.subp(["growpart", diskdev, partnum], env=myenv)
+ except subp.ProcessExecutionError as e:
+ util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
+ raise ResizeFailedException(e) from e
return (before, get_size(partdev))
@@ -164,7 +179,7 @@ class ResizeGrowPart(object):
class ResizeGpart(object):
def available(self):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
try:
(_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
@@ -222,7 +237,11 @@ def device_part_info(devpath):
# the device, like /dev/vtbd0p2.
if util.is_FreeBSD():
freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
- m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
+ m = re.search("^(/dev/.+)p([0-9])$", freebsd_part)
+ return (m.group(1), m.group(2))
+ elif util.is_DragonFlyBSD():
+ dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath)
+ m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part)
return (m.group(1), m.group(2))
if not os.path.exists(syspath):
@@ -259,7 +278,7 @@ def devent2dev(devent):
container = util.is_container()
# Ensure the path is a block device.
- if (dev == "/dev/root" and not container):
+ if dev == "/dev/root" and not container:
dev = util.rootdev_from_cmdline(util.get_cmdline())
if dev is None:
if os.path.exists(dev):
@@ -277,65 +296,102 @@ def resize_devices(resizer, devices):
try:
blockdev = devent2dev(devent)
except ValueError as e:
- info.append((devent, RESIZE.SKIPPED,
- "unable to convert to device: %s" % e,))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "unable to convert to device: %s" % e,
+ )
+ )
continue
try:
statret = os.stat(blockdev)
except OSError as e:
- info.append((devent, RESIZE.SKIPPED,
- "stat of '%s' failed: %s" % (blockdev, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "stat of '%s' failed: %s" % (blockdev, e),
+ )
+ )
continue
- if (not stat.S_ISBLK(statret.st_mode) and
- not stat.S_ISCHR(statret.st_mode)):
- info.append((devent, RESIZE.SKIPPED,
- "device '%s' not a block device" % blockdev,))
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(
+ statret.st_mode
+ ):
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "device '%s' not a block device" % blockdev,
+ )
+ )
continue
try:
(disk, ptnum) = device_part_info(blockdev)
except (TypeError, ValueError) as e:
- info.append((devent, RESIZE.SKIPPED,
- "device_part_info(%s) failed: %s" % (blockdev, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "device_part_info(%s) failed: %s" % (blockdev, e),
+ )
+ )
continue
try:
(old, new) = resizer.resize(disk, ptnum, blockdev)
if old == new:
- info.append((devent, RESIZE.NOCHANGE,
- "no change necessary (%s, %s)" % (disk, ptnum),))
+ info.append(
+ (
+ devent,
+ RESIZE.NOCHANGE,
+ "no change necessary (%s, %s)" % (disk, ptnum),
+ )
+ )
else:
- info.append((devent, RESIZE.CHANGED,
- "changed (%s, %s) from %s to %s" %
- (disk, ptnum, old, new),))
+ info.append(
+ (
+ devent,
+ RESIZE.CHANGED,
+ "changed (%s, %s) from %s to %s"
+ % (disk, ptnum, old, new),
+ )
+ )
except ResizeFailedException as e:
- info.append((devent, RESIZE.FAILED,
- "failed to resize: disk=%s, ptnum=%s: %s" %
- (disk, ptnum, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.FAILED,
+ "failed to resize: disk=%s, ptnum=%s: %s"
+ % (disk, ptnum, e),
+ )
+ )
return info
def handle(_name, cfg, _cloud, log, _args):
- if 'growpart' not in cfg:
- log.debug("No 'growpart' entry in cfg. Using default: %s" %
- DEFAULT_CONFIG)
- cfg['growpart'] = DEFAULT_CONFIG
+ if "growpart" not in cfg:
+ log.debug(
+ "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG
+ )
+ cfg["growpart"] = DEFAULT_CONFIG
- mycfg = cfg.get('growpart')
+ mycfg = cfg.get("growpart")
if not isinstance(mycfg, dict):
log.warning("'growpart' in config was not a dict")
return
- mode = mycfg.get('mode', "auto")
+ mode = mycfg.get("mode", "auto")
if util.is_false(mode):
log.debug("growpart disabled: mode=%s" % mode)
return
- if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
+ if util.is_false(mycfg.get("ignore_growroot_disabled", False)):
if os.path.isfile("/etc/growroot-disabled"):
log.debug("growpart disabled: /etc/growroot-disabled exists")
log.debug("use ignore_growroot_disabled to ignore")
@@ -354,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args):
raise e
return
- resized = util.log_time(logfunc=log.debug, msg="resize_devices",
- func=resize_devices, args=(resizer, devices))
+ resized = util.log_time(
+ logfunc=log.debug,
+ msg="resize_devices",
+ func=resize_devices,
+ args=(resizer, devices),
+ )
for (entry, action, msg) in resized:
if action == RESIZE.CHANGED:
log.info("'%s' resized: %s" % (entry, msg))
@@ -363,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("'%s' %s: %s" % (entry, action, msg))
-RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
+RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index eb03c664..ad7243d9 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true.
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.subp import ProcessExecutionError
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def fetch_idevs(log):
@@ -60,8 +59,9 @@ def fetch_idevs(log):
try:
# get the root disk where the /boot directory resides.
- disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
- capture=True)[0].strip()
+ disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[
+ 0
+ ].strip()
except ProcessExecutionError as e:
# grub-common may not be installed, especially on containers
# FileNotFoundError is a nested exception of ProcessExecutionError
@@ -81,26 +81,30 @@ def fetch_idevs(log):
if not disk or not os.path.exists(disk):
# If we failed to detect a disk, we can return early
- return ''
+ return ""
try:
# check if disk exists and use udevadm to fetch symlinks
- devices = subp.subp(
- ['udevadm', 'info', '--root', '--query=symlink', disk],
- capture=True
- )[0].strip().split()
+ devices = (
+ subp.subp(
+ ["udevadm", "info", "--root", "--query=symlink", disk],
+ capture=True,
+ )[0]
+ .strip()
+ .split()
+ )
except Exception:
util.logexc(
log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
)
- log.debug('considering these device symlinks: %s', ','.join(devices))
+ log.debug("considering these device symlinks: %s", ",".join(devices))
# filter symlinks for /dev/disk/by-id entries
- devices = [dev for dev in devices if 'disk/by-id' in dev]
- log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices))
+ devices = [dev for dev in devices if "disk/by-id" in dev]
+ log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices))
# select first device if there is one, else fall back to plain name
idevs = sorted(devices)[0] if devices else disk
- log.debug('selected %s', idevs)
+ log.debug("selected %s", idevs)
return idevs
@@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args):
if not mycfg:
mycfg = {}
- enabled = mycfg.get('enabled', True)
+ enabled = mycfg.get("enabled", True)
if util.is_false(enabled):
log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
return
idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None)
+ mycfg, "grub-pc/install_devices_empty", None
+ )
if idevs is None:
idevs = fetch_idevs(log)
@@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args):
# now idevs and idevs_empty are set to determined values
# or, those set by user
- dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n") %
- (idevs, idevs_empty))
+ dconf_sel = (
+ "grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n"
+ % (idevs, idevs_empty)
+ )
- log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
+ log.debug(
+ "Setting grub debconf-set-selections with '%s','%s'"
+ % (idevs, idevs_empty)
+ )
try:
- subp.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(["debconf-set-selections"], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
new file mode 100644
index 00000000..34c4557e
--- /dev/null
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -0,0 +1,151 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Install hotplug udev rules if supported and enabled"""
+import os
+from textwrap import dedent
+
+from cloudinit import stages, subp, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.event import EventScope, EventType
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+distros = [ALL_DISTROS]
+
+meta: MetaSchema = {
+ "id": "cc_install_hotplug",
+ "name": "Install Hotplug",
+ "title": "Install hotplug if supported and enabled",
+ "description": dedent(
+ """\
+ This module will install the udev rules to enable hotplug if
+ supported by the datasource and enabled in the userdata. The udev
+ rules will be installed as
+ ``/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules``.
+
+ When hotplug is enabled, newly added network devices will be added
+ to the system by cloud-init. After udev detects the event,
+ cloud-init will referesh the instance metadata from the datasource,
+ detect the device in the updated metadata, then apply the updated
+ network configuration.
+
+ Currently supported datasources: Openstack, EC2
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Enable hotplug of network devices
+ updates:
+ network:
+ when: ["hotplug"]
+ """
+ ),
+ dedent(
+ """\
+ # Enable network hotplug alongside boot event
+ updates:
+ network:
+ when: ["boot", "hotplug"]
+ """
+ ),
+ ],
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "updates": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "network": {
+ "type": "object",
+ "required": ["when"],
+ "additionalProperties": False,
+ "properties": {
+ "when": {
+ "type": "array",
+ "additionalProperties": False,
+ "items": {
+ "type": "string",
+ "additionalProperties": False,
+ "enum": [
+ "boot-new-instance",
+ "boot-legacy",
+ "boot",
+ "hotplug",
+ ],
+ },
+ }
+ },
+ }
+ },
+ }
+ },
+}
+
+__doc__ = get_meta_doc(meta, schema)
+
+
+HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+HOTPLUG_UDEV_RULES_TEMPLATE = """\
+# Installed by cloud-init due to network hotplug userdata
+ACTION!="add|remove", GOTO="cloudinit_end"
+LABEL="cloudinit_hook"
+SUBSYSTEM=="net", RUN+="{libexecdir}/hook-hotplug"
+LABEL="cloudinit_end"
+"""
+
+
+def handle(_name, cfg, cloud, log, _args):
+ validate_cloudconfig_schema(cfg, schema)
+ network_hotplug_enabled = (
+ "updates" in cfg
+ and "network" in cfg["updates"]
+ and "when" in cfg["updates"]["network"]
+ and "hotplug" in cfg["updates"]["network"]["when"]
+ )
+ hotplug_supported = EventType.HOTPLUG in (
+ cloud.datasource.get_supported_events([EventType.HOTPLUG]).get(
+ EventScope.NETWORK, set()
+ )
+ )
+ hotplug_enabled = stages.update_event_enabled(
+ datasource=cloud.datasource,
+ cfg=cfg,
+ event_source_type=EventType.HOTPLUG,
+ scope=EventScope.NETWORK,
+ )
+ if not (hotplug_supported and hotplug_enabled):
+ if os.path.exists(HOTPLUG_UDEV_PATH):
+ log.debug("Uninstalling hotplug, not enabled")
+ util.del_file(HOTPLUG_UDEV_PATH)
+ subp.subp(["udevadm", "control", "--reload-rules"])
+ elif network_hotplug_enabled:
+ log.warning(
+ "Hotplug is unsupported by current datasource. "
+ "Udev rules will NOT be installed."
+ )
+ else:
+ log.debug("Skipping hotplug install, not enabled")
+ return
+ if not subp.which("udevadm"):
+ log.debug("Skipping hotplug install, udevadm not found")
+ return
+
+ # This may need to turn into a distro property at some point
+ libexecdir = "/usr/libexec/cloud-init"
+ if not os.path.exists(libexecdir):
+ libexecdir = "/usr/lib/cloud-init"
+ util.write_file(
+ filename=HOTPLUG_UDEV_PATH,
+ content=HOTPLUG_UDEV_RULES_TEMPLATE.format(libexecdir=libexecdir),
+ )
+ subp.subp(["udevadm", "control", "--reload-rules"])
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
new file mode 100644
index 00000000..98ef326a
--- /dev/null
+++ b/cloudinit/config/cc_keyboard.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2022 Floris Bos
+#
+# Author: Floris Bos <bos@je-eigen-domein.nl>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""keyboard: set keyboard layout"""
+
+from textwrap import dedent
+
+from cloudinit import distros
+from cloudinit import log as logging
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+# FIXME: setting keyboard layout should be supported by all OSes.
+# But currently only implemented for Linux distributions that use systemd.
+osfamilies = ["arch", "debian", "redhat", "suse"]
+distros = distros.Distro.expand_osfamily(osfamilies)
+
+DEFAULT_KEYBOARD_MODEL = "pc105"
+
+meta: MetaSchema = {
+ "id": "cc_keyboard",
+ "name": "Keyboard",
+ "title": "Set keyboard layout",
+ "description": dedent(
+ """\
+ Handle keyboard configuration.
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Set keyboard layout to "us"
+ keyboard:
+ layout: us
+ """
+ ),
+ dedent(
+ """\
+ # Set specific keyboard layout, model, variant, options
+ keyboard:
+ layout: de
+ model: pc105
+ variant: nodeadkeys
+ options: compose:rwin
+ """
+ ),
+ ],
+ "frequency": frequency,
+}
+
+
+schema = {
+ "type": "object",
+ "properties": {
+ "keyboard": {
+ "type": "object",
+ "properties": {
+ "layout": {
+ "type": "string",
+ "description": dedent(
+ """\
+ Required. Keyboard layout. Corresponds to XKBLAYOUT.
+ """
+ ),
+ },
+ "model": {
+ "type": "string",
+ "default": DEFAULT_KEYBOARD_MODEL,
+ "description": dedent(
+ """\
+ Optional. Keyboard model. Corresponds to XKBMODEL.
+ """
+ ),
+ },
+ "variant": {
+ "type": "string",
+ "description": dedent(
+ """\
+ Optional. Keyboard variant. Corresponds to XKBVARIANT.
+ """
+ ),
+ },
+ "options": {
+ "type": "string",
+ "description": dedent(
+ """\
+ Optional. Keyboard options. Corresponds to XKBOPTIONS.
+ """
+ ),
+ },
+ },
+ "required": ["layout"],
+ "additionalProperties": False,
+ }
+ },
+}
+
+__doc__ = get_meta_doc(meta, schema)
+
+LOG = logging.getLogger(__name__)
+
+
+def handle(name, cfg, cloud, log, args):
+ if "keyboard" not in cfg:
+ LOG.debug(
+ "Skipping module named %s, no 'keyboard' section found", name
+ )
+ return
+ validate_cloudconfig_schema(cfg, schema)
+ kb_cfg = cfg["keyboard"]
+ layout = kb_cfg["layout"]
+ model = kb_cfg.get("model", DEFAULT_KEYBOARD_MODEL)
+ variant = kb_cfg.get("variant", "")
+ options = kb_cfg.get("options", "")
+ LOG.debug("Setting keyboard layout to '%s'", layout)
+ cloud.distro.set_keymap(layout, model, variant, options)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 0f2be52b..ab35e136 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,14 +9,17 @@
"""
Keys to Console
---------------
-**Summary:** control which SSH keys may be written to console
-
-For security reasons it may be desirable not to write SSH fingerprints and keys
-to the console. To avoid the fingerprint of types of SSH keys being written to
-console the ``ssh_fp_console_blacklist`` config key can be used. By default all
-types of keys will have their fingerprints written to console. To avoid keys
-of a key type being written to console the ``ssh_key_console_blacklist`` config
-key can be used. By default ``ssh-dss`` keys are not written to console.
+**Summary:** control which SSH host keys may be written to console
+
+For security reasons it may be desirable not to write SSH host keys and their
+fingerprints to the console. To avoid either being written to the console the
+``emit_keys_to_console`` config key under the main ``ssh`` config key can be
+used. To avoid the fingerprint of types of SSH host keys being written to
+console the ``ssh_fp_console_blacklist`` config key can be used. By default
+all types of keys will have their fingerprints written to console. To avoid
+host keys of a key type being written to console the
+``ssh_key_console_blacklist`` config key can be used. By default ``ssh-dss``
+host keys are not written to console.
**Internal name:** ``cc_keys_to_console``
@@ -26,50 +29,62 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
**Config keys**::
+ ssh:
+ emit_keys_to_console: false
+
ssh_fp_console_blacklist: <list of key types>
ssh_key_console_blacklist: <list of key types>
"""
import os
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
# This is a tool that cloud init provides
-HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
+HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints"
def _get_helper_tool_path(distro):
try:
base_lib = distro.usr_lib_exec
except AttributeError:
- base_lib = '/usr/lib'
+ base_lib = "/usr/lib"
return HELPER_TOOL_TPL % base_lib
def handle(name, cfg, cloud, log, _args):
+ if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
+ log.debug(
+ "Skipping module named %s, logging of SSH host keys disabled", name
+ )
+ return
+
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
- log.warning(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
+ log.warning(
+ "Unable to activate module %s, helper tool not found at %s",
+ name,
+ helper_path,
+ )
return
- fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist",
- ["ssh-dss"])
+ fp_blacklist = util.get_cfg_option_list(
+ cfg, "ssh_fp_console_blacklist", []
+ )
+ key_blacklist = util.get_cfg_option_list(
+ cfg, "ssh_key_console_blacklist", ["ssh-dss"]
+ )
try:
- cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
+ cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)]
(stdout, _stderr) = subp.subp(cmd)
- util.multi_log("%s\n" % (stdout.strip()),
- stderr=False, console=True)
+ util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True)
except Exception:
log.warning("Writing keys to the system console failed!")
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 299c4d01..03ebf411 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -60,10 +60,7 @@ from io import BytesIO
from configobj import ConfigObj
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, type_utils, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
@@ -71,15 +68,15 @@ frequency = PER_INSTANCE
LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
LS_DEFAULT_FILE = "/etc/default/landscape-client"
-distros = ['ubuntu']
+distros = ["ubuntu"]
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
}
}
@@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args):
raise RuntimeError(
"'landscape' key existed in config, but not a dictionary type,"
" is a {_type} instead".format(
- _type=type_utils.obj_name(ls_cloudcfg)))
+ _type=type_utils.obj_name(ls_cloudcfg)
+ )
+ )
if not ls_cloudcfg:
return
- cloud.distro.install_packages(('landscape-client',))
+ cloud.distro.install_packages(("landscape-client",))
merge_data = [
LSC_BUILTIN_CFG,
@@ -135,4 +134,5 @@ def merge_together(objs):
cfg.merge(ConfigObj(obj))
return cfg
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 4f8b7bf6..29f6a9b6 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -11,45 +11,55 @@
from textwrap import dedent
from cloudinit import util
-from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
-
frequency = PER_INSTANCE
-distros = ['all']
-schema = {
- 'id': 'cc_locale',
- 'name': 'Locale',
- 'title': 'Set system locale',
- 'description': dedent(
+distros = ["all"]
+meta: MetaSchema = {
+ "id": "cc_locale",
+ "name": "Locale",
+ "title": "Set system locale",
+ "description": dedent(
"""\
Configure the system locale and apply it system wide. By default use
the locale specified by the datasource."""
),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Set the locale to ar_AE
locale: ar_AE
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Set the locale to fr_CA in /etc/alternate_path/locale
locale: fr_CA
locale_configfile: /etc/alternate_path/locale
- """),
+ """
+ ),
],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'locale': {
- 'type': 'string',
- 'description': (
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "locale": {
+ "type": "string",
+ "description": (
"The locale to set as the system's locale (e.g. ar_PS)"
),
},
- 'locale_configfile': {
- 'type': 'string',
- 'description': (
+ "locale_configfile": {
+ "type": "string",
+ "description": (
"The file in which to write the locale configuration (defaults"
" to the distro's default location)"
),
@@ -57,7 +67,7 @@ schema = {
},
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, cloud, log, args):
@@ -67,8 +77,9 @@ def handle(name, cfg, cloud, log, args):
locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
if util.is_false(locale):
- log.debug("Skipping module named %s, disabled by config: %s",
- name, locale)
+ log.debug(
+ "Skipping module named %s, disabled by config: %s", name, locale
+ )
return
validate_cloudconfig_schema(cfg, schema)
@@ -77,4 +88,5 @@ def handle(name, cfg, cloud, log, args):
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 486037d9..13ddcbe9 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly.
domain: <domain>
"""
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
import os
-distros = ['ubuntu']
+from cloudinit import log as logging
+from cloudinit import subp, util
+
+distros = ["ubuntu"]
LOG = logging.getLogger(__name__)
@@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0"
def handle(name, cfg, cloud, log, args):
# Get config
- lxd_cfg = cfg.get('lxd')
+ lxd_cfg = cfg.get("lxd")
if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg",
- name)
+ log.debug(
+ "Skipping module named %s, not present or disabled by cfg", name
+ )
return
if not isinstance(lxd_cfg, dict):
- log.warning("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
+ log.warning(
+ "lxd config must be a dictionary. found a '%s'", type(lxd_cfg)
+ )
return
# Grab the configuration
- init_cfg = lxd_cfg.get('init')
+ init_cfg = lxd_cfg.get("init")
if not isinstance(init_cfg, dict):
- log.warning("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
+ log.warning(
+ "lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg),
+ )
init_cfg = {}
- bridge_cfg = lxd_cfg.get('bridge', {})
+ bridge_cfg = lxd_cfg.get("bridge", {})
if not isinstance(bridge_cfg, dict):
- log.warning("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
+ log.warning(
+ "lxd/bridge config must be a dictionary. found a '%s'",
+ type(bridge_cfg),
+ )
bridge_cfg = {}
# Install the needed packages
packages = []
if not subp.which("lxd"):
- packages.append('lxd')
+ packages.append("lxd")
- if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
- packages.append('zfsutils-linux')
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"):
+ packages.append("zfsutils-linux")
if len(packages):
try:
@@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd if init config is given
if init_cfg:
init_keys = (
- 'network_address', 'network_port', 'storage_backend',
- 'storage_create_device', 'storage_create_loop',
- 'storage_pool', 'trust_password')
- subp.subp(['lxd', 'waitready', '--timeout=300'])
- cmd = ['lxd', 'init', '--auto']
+ "network_address",
+ "network_port",
+ "storage_backend",
+ "storage_create_device",
+ "storage_create_loop",
+ "storage_pool",
+ "trust_password",
+ )
+ subp.subp(["lxd", "waitready", "--timeout=300"])
+ cmd = ["lxd", "init", "--auto"]
for k in init_keys:
if init_cfg.get(k):
- cmd.extend(["--%s=%s" %
- (k.replace('_', '-'), str(init_cfg[k]))])
+ cmd.extend(
+ ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))]
+ )
subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
- if os.path.exists("/etc/default/lxd-bridge") \
- and subp.which(dconf_comm):
+ if os.path.exists("/etc/default/lxd-bridge") and subp.which(
+ dconf_comm
+ ):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args):
# Update debconf database
try:
log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- subp.subp(['debconf-communicate'], data)
+ data = (
+ "\n".join(
+ ["set %s %s" % (k, v) for k, v in debconf.items()]
+ )
+ + "\n"
+ )
+ subp.subp(["debconf-communicate"], data)
except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" %
- dconf_comm)
+ util.logexc(
+ log, "Failed to run '%s' for lxd with" % dconf_comm
+ )
# Remove the existing configuration file (forces re-generation)
util.del_file("/etc/default/lxd-bridge")
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- subp.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
+ subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"])
else:
# Built-in LXD bridge support
cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
maybe_cleanup_default(
- net_name=net_name, did_init=bool(init_cfg),
- create=bool(cmd_create), attach=bool(cmd_attach))
+ net_name=net_name,
+ did_init=bool(init_cfg),
+ create=bool(cmd_create),
+ attach=bool(cmd_attach),
+ )
if cmd_create:
- log.debug("Creating lxd bridge: %s" %
- " ".join(cmd_create))
+ log.debug("Creating lxd bridge: %s" % " ".join(cmd_create))
_lxc(cmd_create)
if cmd_attach:
- log.debug("Setting up default lxd bridge: %s" %
- " ".join(cmd_attach))
+ log.debug(
+ "Setting up default lxd bridge: %s" % " ".join(cmd_attach)
+ )
_lxc(cmd_attach)
elif bridge_cfg:
raise RuntimeError(
- "Unable to configure lxd bridge without %s." + dconf_comm)
+ "Unable to configure lxd bridge without %s." + dconf_comm
+ )
def bridge_to_debconf(bridge_cfg):
@@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg):
if bridge_cfg.get("ipv4_address"):
debconf["lxd/bridge-ipv4"] = "true"
- debconf["lxd/bridge-ipv4-address"] = \
- bridge_cfg.get("ipv4_address")
- debconf["lxd/bridge-ipv4-netmask"] = \
- bridge_cfg.get("ipv4_netmask")
- debconf["lxd/bridge-ipv4-dhcp-first"] = \
- bridge_cfg.get("ipv4_dhcp_first")
- debconf["lxd/bridge-ipv4-dhcp-last"] = \
- bridge_cfg.get("ipv4_dhcp_last")
- debconf["lxd/bridge-ipv4-dhcp-leases"] = \
- bridge_cfg.get("ipv4_dhcp_leases")
- debconf["lxd/bridge-ipv4-nat"] = \
- bridge_cfg.get("ipv4_nat", "true")
+ debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address")
+ debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask")
+ debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get(
+ "ipv4_dhcp_first"
+ )
+ debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get(
+ "ipv4_dhcp_last"
+ )
+ debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get(
+ "ipv4_dhcp_leases"
+ )
+ debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true")
if bridge_cfg.get("ipv6_address"):
debconf["lxd/bridge-ipv6"] = "true"
- debconf["lxd/bridge-ipv6-address"] = \
- bridge_cfg.get("ipv6_address")
- debconf["lxd/bridge-ipv6-netmask"] = \
- bridge_cfg.get("ipv6_netmask")
- debconf["lxd/bridge-ipv6-nat"] = \
- bridge_cfg.get("ipv6_nat", "false")
+ debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address")
+ debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask")
+ debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get(
+ "ipv6_nat", "false"
+ )
if bridge_cfg.get("domain"):
debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
else:
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+ raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
return debconf
@@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg):
bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
cmd_create = []
- cmd_attach = ["network", "attach-profile", bridge_name,
- "default", "eth0"]
+ cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"]
if bridge_cfg.get("mode") == "existing":
return None, cmd_attach
if bridge_cfg.get("mode") != "new":
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+ raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
cmd_create = ["network", "create", bridge_name]
if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
- cmd_create.append("ipv4.address=%s/%s" %
- (bridge_cfg.get("ipv4_address"),
- bridge_cfg.get("ipv4_netmask")))
+ cmd_create.append(
+ "ipv4.address=%s/%s"
+ % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask"))
+ )
if bridge_cfg.get("ipv4_nat", "true") == "true":
cmd_create.append("ipv4.nat=true")
- if bridge_cfg.get("ipv4_dhcp_first") and \
- bridge_cfg.get("ipv4_dhcp_last"):
- dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"),
- bridge_cfg.get("ipv4_dhcp_last"))
+ if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get(
+ "ipv4_dhcp_last"
+ ):
+ dhcp_range = "%s-%s" % (
+ bridge_cfg.get("ipv4_dhcp_first"),
+ bridge_cfg.get("ipv4_dhcp_last"),
+ )
cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range)
else:
cmd_create.append("ipv4.address=none")
if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"):
- cmd_create.append("ipv6.address=%s/%s" %
- (bridge_cfg.get("ipv6_address"),
- bridge_cfg.get("ipv6_netmask")))
+ cmd_create.append(
+ "ipv6.address=%s/%s"
+ % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask"))
+ )
if bridge_cfg.get("ipv6_nat", "false") == "true":
cmd_create.append("ipv6.nat=true")
@@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg):
def _lxc(cmd):
- env = {'LC_ALL': 'C',
- 'HOME': os.environ.get('HOME', '/root'),
- 'USER': os.environ.get('USER', 'root')}
- subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ env = {
+ "LC_ALL": "C",
+ "HOME": os.environ.get("HOME", "/root"),
+ "USER": os.environ.get("USER", "root"),
+ }
+ subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env)
-def maybe_cleanup_default(net_name, did_init, create, attach,
- profile="default", nic_name="eth0"):
+def maybe_cleanup_default(
+ net_name, did_init, create, attach, profile="default", nic_name="eth0"
+):
"""Newer versions of lxc (3.0.1+) create a lxdbr0 network when
'lxd init --auto' is run. Older versions did not.
@@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 41ea4fc9..1b0158ec 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,18 +56,21 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = '/etc/mcollective/server.cfg'
+SERVER_CFG = "/etc/mcollective/server.cfg"
LOG = logging.getLogger(__name__)
-def configure(config, server_cfg=SERVER_CFG,
- pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
+def configure(
+ config,
+ server_cfg=SERVER_CFG,
+ pubcert_file=PUBCERT_FILE,
+ pricert_file=PRICERT_FILE,
+):
# Read server.cfg (if it exists) values from the
# original file in order to be able to mix the rest up.
try:
@@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG,
if e.errno != errno.ENOENT:
raise
else:
- LOG.debug("Did not find file %s (starting with an empty"
- " config)", server_cfg)
+ LOG.debug(
+ "Did not find file %s (starting with an empty config)",
+ server_cfg,
+ )
mcollective_config = ConfigObj()
for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
+ if cfg_name == "public-cert":
util.write_file(pubcert_file, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = pubcert_file
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
+ mcollective_config["plugin.ssl_server_public"] = pubcert_file
+ mcollective_config["securityprovider"] = "ssl"
+ elif cfg_name == "private-cert":
util.write_file(pricert_file, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = pricert_file
- mcollective_config['securityprovider'] = 'ssl'
+ mcollective_config["plugin.ssl_server_private"] = pricert_file
+ mcollective_config["securityprovider"] = "ssl"
else:
if isinstance(cfg, str):
# Just set it in the 'main' section
@@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG,
def handle(name, cfg, cloud, log, _args):
# If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'mcollective' key in configuration"), name)
+ if "mcollective" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'mcollective' key in configuration",
+ name,
+ )
return
- mcollective_cfg = cfg['mcollective']
+ mcollective_cfg = cfg["mcollective"]
# Start by installing the mcollective package ...
cloud.distro.install_packages(("mcollective",))
# ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- configure(config=mcollective_cfg['conf'])
+ if "conf" in mcollective_cfg:
+ configure(config=mcollective_cfg["conf"])
# restart mcollective to handle updated config
- subp.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(["service", "mcollective", "restart"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 3995704a..4fafb4af 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -17,7 +17,7 @@ false`` in config.
**Internal name:** ``cc_migrator``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -29,16 +29,14 @@ false`` in config.
import os
import shutil
-from cloudinit import helpers
-from cloudinit import util
-
+from cloudinit import helpers, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
def _migrate_canon_sems(cloud):
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
am_adjusted = 0
for sem_path in paths:
if not sem_path or not os.path.exists(sem_path):
@@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud):
def _migrate_legacy_sems(cloud, log):
legacy_adjust = {
- 'apt-update-upgrade': [
- 'apt-configure',
- 'package-update-upgrade-install',
+ "apt-update-upgrade": [
+ "apt-configure",
+ "package-update-upgrade-install",
],
}
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
for sem_path in paths:
if not sem_path or not os.path.exists(sem_path):
continue
@@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log):
util.del_file(os.path.join(sem_path, p))
(_name, freq) = os.path.splitext(p)
for m in migrate_to:
- log.debug("Migrating %s => %s with the same frequency",
- p, m)
+ log.debug(
+ "Migrating %s => %s with the same frequency", p, m
+ )
with sem_helper.lock(m, freq):
pass
@@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Skipping module named %s, migration disabled", name)
return
sems_moved = _migrate_canon_sems(cloud)
- log.debug("Migrated %s semaphore files to there canonicalized names",
- sems_moved)
+ log.debug(
+ "Migrated %s semaphore files to there canonicalized names", sems_moved
+ )
_migrate_legacy_sems(cloud, log)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index c22d1698..83eb5b1b 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -62,15 +62,12 @@ swap file is created.
maxsize: <size in bytes>
"""
-from string import whitespace
-
import logging
import os
import re
+from string import whitespace
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, type_utils, util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
@@ -105,25 +102,29 @@ def is_network_device(name):
def _get_nth_partition_for_device(device_path, partition_number):
- potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
- '-part%s' % (partition_number,)]
+ potential_suffixes = [
+ str(partition_number),
+ "p%s" % (partition_number,),
+ "-part%s" % (partition_number,),
+ ]
for suffix in potential_suffixes:
- potential_partition_device = '%s%s' % (device_path, suffix)
+ potential_partition_device = "%s%s" % (device_path, suffix)
if os.path.exists(potential_partition_device):
return potential_partition_device
return None
def _is_block_device(device_path, partition_path=None):
- device_name = os.path.realpath(device_path).split('/')[-1]
- sys_path = os.path.join('/sys/block/', device_name)
+ device_name = os.path.realpath(device_path).split("/")[-1]
+ sys_path = os.path.join("/sys/block/", device_name)
if partition_path is not None:
sys_path = os.path.join(
- sys_path, os.path.realpath(partition_path).split('/')[-1])
+ sys_path, os.path.realpath(partition_path).split("/")[-1]
+ )
return os.path.exists(sys_path)
-def sanitize_devname(startname, transformer, log):
+def sanitize_devname(startname, transformer, log, aliases=None):
log.debug("Attempting to determine the real name of %s", startname)
# workaround, allow user to specify 'ephemeral'
@@ -137,9 +138,14 @@ def sanitize_devname(startname, transformer, log):
return startname
device_path, partition_number = util.expand_dotted_devname(devname)
+ orig = device_path
+
+ if aliases:
+ device_path = aliases.get(device_path, device_path)
+ if orig != device_path:
+ log.debug("Mapped device alias %s to %s", orig, device_path)
if is_meta_device_name(device_path):
- orig = device_path
device_path = transformer(device_path)
if not device_path:
return None
@@ -154,8 +160,9 @@ def sanitize_devname(startname, transformer, log):
if partition_number is None:
partition_path = _get_nth_partition_for_device(device_path, 1)
else:
- partition_path = _get_nth_partition_for_device(device_path,
- partition_number)
+ partition_path = _get_nth_partition_for_device(
+ device_path, partition_number
+ )
if partition_path is None:
return None
@@ -169,12 +176,12 @@ def sanitize_devname(startname, transformer, log):
def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
# make a suggestion on the size of swap for this system.
if memsize is None:
- memsize = util.read_meminfo()['total']
+ memsize = util.read_meminfo()["total"]
GB = 2 ** 30
sugg_max = 8 * GB
- info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize}
+ info = {"avail": "na", "max_in": maxsize, "mem": memsize}
if fsys is None and maxsize is None:
# set max to 8GB default if no filesystem given
@@ -182,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
elif fsys:
statvfs = os.statvfs(fsys)
avail = statvfs.f_frsize * statvfs.f_bfree
- info['avail'] = avail
+ info["avail"] = avail
if maxsize is None:
# set to 25% of filesystem space
maxsize = min(int(avail / 4), sugg_max)
- elif maxsize > ((avail * .9)):
+ elif maxsize > ((avail * 0.9)):
# set to 90% of available disk space
- maxsize = int(avail * .9)
+ maxsize = int(avail * 0.9)
elif maxsize is None:
maxsize = sugg_max
- info['max'] = maxsize
+ info["max"] = maxsize
formulas = [
# < 1G: swap = double memory
@@ -221,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
if size is not None:
size = maxsize
- info['size'] = size
+ info["size"] = size
MB = 2 ** 20
pinfo = {}
@@ -231,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
else:
pinfo[k] = v
- LOG.debug("suggest %s swap for %s memory with '%s'"
- " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'],
- pinfo['avail'], pinfo['max_in'], pinfo['max'])
+ LOG.debug(
+ "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'",
+ pinfo["size"],
+ pinfo["mem"],
+ pinfo["avail"],
+ pinfo["max_in"],
+ pinfo["max"],
+ )
return size
@@ -243,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None:
errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
def create_swap(fname, size, method):
- LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
- fname, fstype, method)
+ LOG.debug(
+ "Creating swapfile in '%s' on fstype '%s' using '%s'",
+ fname,
+ fstype,
+ method,
+ )
if method == "fallocate":
- cmd = ['fallocate', '-l', '%sM' % size, fname]
+ cmd = ["fallocate", "-l", "%sM" % size, fname]
elif method == "dd":
- cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
- 'count=%s' % size]
+ cmd = [
+ "dd",
+ "if=/dev/zero",
+ "of=%s" % fname,
+ "bs=1M",
+ "count=%s" % size,
+ ]
try:
subp.subp(cmd, capture=True)
@@ -264,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None:
fstype = util.get_mount_info(swap_dir)[1]
- if (fstype == "xfs" and
- util.kernel_version() < (4, 18)) or fstype == "btrfs":
+ if (
+ fstype == "xfs" and util.kernel_version() < (4, 18)
+ ) or fstype == "btrfs":
create_swap(fname, size, "dd")
else:
try:
@@ -277,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None:
if os.path.exists(fname):
util.chmod(fname, 0o600)
try:
- subp.subp(['mkswap', fname])
+ subp.subp(["mkswap", fname])
except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -292,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None):
swap_dir = os.path.dirname(fname)
if str(size).lower() == "auto":
try:
- memsize = util.read_meminfo()['total']
+ memsize = util.read_meminfo()["total"]
except IOError:
LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(swap_dir)
- size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
- memsize=memsize)
+ size = suggested_swapsize(
+ fsys=swap_dir, maxsize=maxsize, memsize=memsize
+ )
mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
- util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
- args=[fname, mibsize])
+ util.log_time(
+ LOG.debug,
+ msg="Setting up swap file",
+ func=create_swapfile,
+ args=[fname, mibsize],
+ )
return fname
def handle_swapcfg(swapcfg):
"""handle the swap config, calling setup_swap if necessary.
- return None or (filename, size)
+ return None or (filename, size)
"""
if not isinstance(swapcfg, dict):
LOG.warning("input for swap config was not a dict.")
return None
- fname = swapcfg.get('filename', '/swap.img')
- size = swapcfg.get('size', 0)
- maxsize = swapcfg.get('maxsize', None)
+ fname = swapcfg.get("filename", "/swap.img")
+ size = swapcfg.get("size", 0)
+ maxsize = swapcfg.get("maxsize", None)
if not (size and fname):
LOG.debug("no need to setup swap")
@@ -330,8 +357,10 @@ def handle_swapcfg(swapcfg):
if os.path.exists(fname):
if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s exists, but no /proc/swaps exists, "
- "being safe", fname)
+ LOG.debug(
+ "swap file %s exists, but no /proc/swaps exists, being safe",
+ fname,
+ )
return fname
try:
for line in util.load_file("/proc/swaps").splitlines():
@@ -340,8 +369,9 @@ def handle_swapcfg(swapcfg):
return fname
LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
except Exception:
- LOG.warning("swap file %s exists. Error reading /proc/swaps",
- fname)
+ LOG.warning(
+ "swap file %s exists. Error reading /proc/swaps", fname
+ )
return fname
try:
@@ -362,14 +392,18 @@ def handle(_name, cfg, cloud, log, _args):
def_mnt_opts = "defaults,nobootwait"
uses_systemd = cloud.distro.uses_systemd()
if uses_systemd:
- def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service"
+ def_mnt_opts = (
+ "defaults,nofail,x-systemd.requires=cloud-init.service,_netdev"
+ )
defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
defvals = cfg.get("mount_default_fields", defvals)
# these are our default set of mounts
- defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
- ["swap", "none", "swap", "sw", "0", "0"]]
+ defmnts = [
+ ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
+ ["swap", "none", "swap", "sw", "0", "0"],
+ ]
cfgmnt = []
if "mounts" in cfg:
@@ -394,15 +428,22 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs[toks[0]] = line
fstab_lines.append(line)
+ device_aliases = cfg.get("device_aliases", {})
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
- log.warning("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
+ log.warning(
+ "Mount option %s not a list, got a %s instead",
+ (i + 1),
+ type_utils.obj_name(cfgmnt[i]),
+ )
continue
start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(
+ start, cloud.device_name_to_device, log, aliases=device_aliases
+ )
if sanitized != start:
log.debug("changed %s => %s" % (start, sanitized))
@@ -410,8 +451,11 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Ignoring nonexistent named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.info("Device %s already defined in fstab: %s",
- sanitized, fstab_devs[sanitized])
+ log.info(
+ "Device %s already defined in fstab: %s",
+ sanitized,
+ fstab_devs[sanitized],
+ )
continue
cfgmnt[i][0] = sanitized
@@ -444,7 +488,9 @@ def handle(_name, cfg, cloud, log, _args):
# entry has the same device name
for defmnt in defmnts:
start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ sanitized = sanitize_devname(
+ start, cloud.device_name_to_device, log, aliases=device_aliases
+ )
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
@@ -452,8 +498,11 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Ignoring nonexistent default named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.debug("Device %s already defined in fstab: %s",
- sanitized, fstab_devs[sanitized])
+ log.debug(
+ "Device %s already defined in fstab: %s",
+ sanitized,
+ fstab_devs[sanitized],
+ )
continue
defmnt[0] = sanitized
@@ -465,8 +514,7 @@ def handle(_name, cfg, cloud, log, _args):
break
if cfgmnt_has:
- log.debug(("Not including %s, already"
- " previously included"), start)
+ log.debug("Not including %s, already previously included", start)
continue
cfgmnt.append(defmnt)
@@ -479,7 +527,7 @@ def handle(_name, cfg, cloud, log, _args):
else:
actlist.append(x)
- swapret = handle_swapcfg(cfg.get('swap', {}))
+ swapret = handle_swapcfg(cfg.get("swap", {}))
if swapret:
actlist.append([swapret, "none", "swap", "sw", "0", "0"])
@@ -498,10 +546,11 @@ def handle(_name, cfg, cloud, log, _args):
needswap = True
if line[1].startswith("/"):
dirs.append(line[1])
- cc_lines.append('\t'.join(line))
+ cc_lines.append("\t".join(line))
- mount_points = [v['mountpoint'] for k, v in util.mounts().items()
- if 'mountpoint' in v]
+ mount_points = [
+ v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v
+ ]
for d in dirs:
try:
util.ensure_dir(d)
@@ -516,11 +565,12 @@ def handle(_name, cfg, cloud, log, _args):
sadds = [WS.sub(" ", n) for n in cc_lines]
sdrops = [WS.sub(" ", n) for n in fstab_removed]
- sops = (["- " + drop for drop in sdrops if drop not in sadds] +
- ["+ " + add for add in sadds if add not in sdrops])
+ sops = ["- " + drop for drop in sdrops if drop not in sadds] + [
+ "+ " + add for add in sadds if add not in sdrops
+ ]
fstab_lines.extend(cc_lines)
- contents = "%s\n" % ('\n'.join(fstab_lines))
+ contents = "%s\n" % "\n".join(fstab_lines)
util.write_file(FSTAB_PATH, contents)
activate_cmds = []
@@ -540,7 +590,7 @@ def handle(_name, cfg, cloud, log, _args):
fmt = "Activating swap and mounts with: %s"
for cmd in activate_cmds:
- fmt = "Activate mounts: %s:" + ' '.join(cmd)
+ fmt = "Activate mounts: %s:" + " ".join(cmd)
try:
subp.subp(cmd)
log.debug(fmt, "PASS")
@@ -548,4 +598,5 @@ def handle(_name, cfg, cloud, log, _args):
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index e183993f..25bba764 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -11,110 +11,136 @@ import os
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit import temp_utils
-from cloudinit import templater
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit import subp, temp_utils, templater, type_utils, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-NTP_CONF = '/etc/ntp.conf'
+NTP_CONF = "/etc/ntp.conf"
NR_POOL_SERVERS = 4
-distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel',
- 'sles', 'ubuntu']
+distros = [
+ "almalinux",
+ "alpine",
+ "centos",
+ "cloudlinux",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "opensuse",
+ "photon",
+ "rhel",
+ "rocky",
+ "sles",
+ "ubuntu",
+ "virtuozzo",
+]
NTP_CLIENT_CONFIG = {
- 'chrony': {
- 'check_exe': 'chronyd',
- 'confpath': '/etc/chrony.conf',
- 'packages': ['chrony'],
- 'service_name': 'chrony',
- 'template_name': 'chrony.conf.{distro}',
- 'template': None,
+ "chrony": {
+ "check_exe": "chronyd",
+ "confpath": "/etc/chrony.conf",
+ "packages": ["chrony"],
+ "service_name": "chrony",
+ "template_name": "chrony.conf.{distro}",
+ "template": None,
},
- 'ntp': {
- 'check_exe': 'ntpd',
- 'confpath': NTP_CONF,
- 'packages': ['ntp'],
- 'service_name': 'ntp',
- 'template_name': 'ntp.conf.{distro}',
- 'template': None,
+ "ntp": {
+ "check_exe": "ntpd",
+ "confpath": NTP_CONF,
+ "packages": ["ntp"],
+ "service_name": "ntp",
+ "template_name": "ntp.conf.{distro}",
+ "template": None,
},
- 'ntpdate': {
- 'check_exe': 'ntpdate',
- 'confpath': NTP_CONF,
- 'packages': ['ntpdate'],
- 'service_name': 'ntpdate',
- 'template_name': 'ntp.conf.{distro}',
- 'template': None,
+ "ntpdate": {
+ "check_exe": "ntpdate",
+ "confpath": NTP_CONF,
+ "packages": ["ntpdate"],
+ "service_name": "ntpdate",
+ "template_name": "ntp.conf.{distro}",
+ "template": None,
},
- 'systemd-timesyncd': {
- 'check_exe': '/lib/systemd/systemd-timesyncd',
- 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
- 'packages': [],
- 'service_name': 'systemd-timesyncd',
- 'template_name': 'timesyncd.conf',
- 'template': None,
+ "systemd-timesyncd": {
+ "check_exe": "/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf",
+ "packages": [],
+ "service_name": "systemd-timesyncd",
+ "template_name": "timesyncd.conf",
+ "template": None,
},
}
# This is Distro-specific configuration overrides of the base config
DISTRO_CLIENT_CONFIG = {
- 'alpine': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
- 'service_name': 'chronyd',
+ "alpine": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'packages': [],
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "packages": [],
+ "service_name": "ntpd",
},
},
- 'debian': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
+ "debian": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
},
},
- 'rhel': {
- 'ntp': {
- 'service_name': 'ntpd',
+ "opensuse": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'chrony': {
- 'service_name': 'chronyd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
+ },
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
},
},
- 'opensuse': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "photon": {
+ "chrony": {
+ "service_name": "chronyd",
+ },
+ "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"},
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'service_name': 'ntpd',
+ },
+ "rhel": {
+ "ntp": {
+ "service_name": "ntpd",
},
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ "chrony": {
+ "service_name": "chronyd",
},
},
- 'sles': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "sles": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
},
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
},
},
- 'ubuntu': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
+ "ubuntu": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
},
},
}
@@ -126,11 +152,12 @@ DISTRO_CLIENT_CONFIG = {
# configuration options before actually attempting to deploy with said
# configuration.
-schema = {
- 'id': 'cc_ntp',
- 'name': 'NTP',
- 'title': 'enable and configure ntp',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_ntp",
+ "name": "NTP",
+ "title": "enable and configure ntp",
+ "description": dedent(
+ """\
Handle ntp configuration. If ntp is not installed on the system and
ntp configuration is specified, ntp will be installed. If there is a
default ntp config file in the image or one is present in the
@@ -138,16 +165,20 @@ schema = {
appended to the filename before any changes are made. A list of ntp
pools and ntp servers can be provided under the ``ntp`` config key.
If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used
- in the format ``{0-3}.{distro}.pool.ntp.org``."""),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ in the format ``{0-3}.{distro}.pool.ntp.org``."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Override ntp with chrony configuration on Ubuntu
ntp:
enabled: true
ntp_client: chrony # Uses cloud-init default chrony configuration
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Provide a custom ntp client configuration
ntp:
enabled: true
@@ -174,122 +205,140 @@ schema = {
servers:
- ntp.server.local
- ntp.ubuntu.com
- - 192.168.23.2""")],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'ntp': {
- 'type': ['object', 'null'],
- 'properties': {
- 'pools': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'hostname'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ - 192.168.23.2"""
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "ntp": {
+ "type": ["object", "null"],
+ "properties": {
+ "pools": {
+ "type": "array",
+ "items": {"type": "string", "format": "hostname"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of ntp pools. If both pools and servers are
empty, 4 default pool servers will be provided of
the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
for Alpine Linux when using the Busybox NTP client
this setting will be ignored due to the limited
- functionality of Busybox's ntpd.""")
+ functionality of Busybox's ntpd."""
+ ),
},
- 'servers': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'hostname'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ "servers": {
+ "type": "array",
+ "items": {"type": "string", "format": "hostname"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of ntp servers. If both pools and servers are
empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ the format ``{0-3}.{distro}.pool.ntp.org``."""
+ ),
},
- 'ntp_client': {
- 'type': 'string',
- 'default': 'auto',
- 'description': dedent("""\
+ "ntp_client": {
+ "type": "string",
+ "default": "auto",
+ "description": dedent(
+ """\
Name of an NTP client to use to configure system NTP.
When unprovided or 'auto' the default client preferred
by the distribution will be used. The following
built-in client names can be used to override existing
configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ systemd-timesyncd."""
+ ),
},
- 'enabled': {
- 'type': 'boolean',
- 'default': True,
- 'description': dedent("""\
+ "enabled": {
+ "type": "boolean",
+ "default": True,
+ "description": dedent(
+ """\
Attempt to enable ntp clients if set to True. If set
to False, ntp client will not be configured or
- installed"""),
+ installed"""
+ ),
},
- 'config': {
- 'description': dedent("""\
+ "config": {
+ "description": dedent(
+ """\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
- 'type': ['object'],
- 'properties': {
- 'confpath': {
- 'type': 'string',
- 'description': dedent("""\
+ ``ntp_client`` specified."""
+ ),
+ "type": ["object"],
+ "properties": {
+ "confpath": {
+ "type": "string",
+ "description": dedent(
+ """\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""
+ ),
},
- 'check_exe': {
- 'type': 'string',
- 'description': dedent("""\
+ "check_exe": {
+ "type": "string",
+ "description": dedent(
+ """\
The executable name for the ``ntp_client``.
For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ 'ntpd' because it runs the ntpd binary."""
+ ),
},
- 'packages': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
+ "packages": {
+ "type": "array",
+ "items": {
+ "type": "string",
},
- 'uniqueItems': True,
- 'description': dedent("""\
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""
+ ),
},
- 'service_name': {
- 'type': 'string',
- 'description': dedent("""\
+ "service_name": {
+ "type": "string",
+ "description": dedent(
+ """\
The systemd or sysvinit service name used to
start and stop the ``ntp_client``
- service."""),
+ service."""
+ ),
},
- 'template': {
- 'type': 'string',
- 'description': dedent("""\
+ "template": {
+ "type": "string",
+ "description": dedent(
+ """\
Inline template allowing users to define their
own ``ntp_client`` configuration template.
The value must start with '## template:jinja'
to enable use of templating support.
- """),
+ """
+ ),
},
},
# Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
# of builtin client values.
- 'required': [],
- 'minProperties': 1, # If we have config, define something
- 'additionalProperties': False
+ "minProperties": 1, # If we have config, define something
+ "additionalProperties": False,
},
},
- 'required': [],
- 'additionalProperties': False
+ "additionalProperties": False,
}
- }
+ },
}
-REQUIRED_NTP_CONFIG_KEYS = frozenset([
- 'check_exe', 'confpath', 'packages', 'service_name'])
+REQUIRED_NTP_CONFIG_KEYS = frozenset(
+ ["check_exe", "confpath", "packages", "service_name"]
+)
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def distro_ntp_client_configs(distro):
@@ -319,21 +368,23 @@ def select_ntp_client(ntp_client, distro):
distro_cfg = distro_ntp_client_configs(distro.name)
# user specified client, return its config
- if ntp_client and ntp_client != 'auto':
- LOG.debug('Selected NTP client "%s" via user-data configuration',
- ntp_client)
+ if ntp_client and ntp_client != "auto":
+ LOG.debug(
+ 'Selected NTP client "%s" via user-data configuration', ntp_client
+ )
return distro_cfg.get(ntp_client, {})
# default to auto if unset in distro
- distro_ntp_client = distro.get_option('ntp_client', 'auto')
+ distro_ntp_client = distro.get_option("ntp_client", "auto")
clientcfg = {}
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if subp.which(cfg.get('check_exe')):
- LOG.debug('Selected NTP client "%s", already installed',
- client)
+ if subp.which(cfg.get("check_exe")):
+ LOG.debug(
+ 'Selected NTP client "%s", already installed', client
+ )
clientcfg = cfg
break
@@ -341,11 +392,14 @@ def select_ntp_client(ntp_client, distro):
client = distro.preferred_ntp_clients[0]
LOG.debug(
'Selected distro preferred NTP client "%s", not yet installed',
- client)
+ client,
+ )
clientcfg = distro_cfg.get(client)
else:
- LOG.debug('Selected NTP client "%s" via distro system config',
- distro_ntp_client)
+ LOG.debug(
+ 'Selected NTP client "%s" via distro system config',
+ distro_ntp_client,
+ )
clientcfg = distro_cfg.get(distro_ntp_client, {})
return clientcfg
@@ -363,7 +417,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
if subp.which(check_exe):
return
if packages is None:
- packages = ['ntp']
+ packages = ["ntp"]
install_func(packages)
@@ -388,25 +442,34 @@ def generate_server_names(distro):
names = []
pool_distro = distro
- if distro == 'sles':
+ if distro == "sles":
# For legal reasons x.pool.sles.ntp.org does not exist,
# use the opensuse pool
- pool_distro = 'opensuse'
- elif distro == 'alpine':
+ pool_distro = "opensuse"
+ elif distro == "alpine" or distro == "eurolinux":
# Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
- # so use general x.pool.ntp.org instead.
- pool_distro = ''
+ # so use general x.pool.ntp.org instead. The same applies to EuroLinux
+ pool_distro = ""
for x in range(0, NR_POOL_SERVERS):
- names.append(".".join(
- [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n]))
+ names.append(
+ ".".join(
+ [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n]
+ )
+ )
return names
-def write_ntp_config_template(distro_name, service_name=None, servers=None,
- pools=None, path=None, template_fn=None,
- template=None):
+def write_ntp_config_template(
+ distro_name,
+ service_name=None,
+ servers=None,
+ pools=None,
+ path=None,
+ template_fn=None,
+ template=None,
+):
"""Render a ntp client configuration for the specified client.
@param distro_name: string. The distro class name.
@@ -429,27 +492,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None,
if not pools:
pools = []
- if (len(servers) == 0 and distro_name == 'alpine' and
- service_name == 'ntpd'):
+ if (
+ len(servers) == 0
+ and distro_name == "alpine"
+ and service_name == "ntpd"
+ ):
# Alpine's Busybox ntpd only understands "servers" configuration
# and not "pool" configuration.
servers = generate_server_names(distro_name)
- LOG.debug(
- 'Adding distro default ntp servers: %s', ','.join(servers))
+ LOG.debug("Adding distro default ntp servers: %s", ",".join(servers))
elif len(servers) == 0 and len(pools) == 0:
pools = generate_server_names(distro_name)
LOG.debug(
- 'Adding distro default ntp pool servers: %s', ','.join(pools))
+ "Adding distro default ntp pool servers: %s", ",".join(pools)
+ )
if not path:
- raise ValueError('Invalid value for path parameter')
+ raise ValueError("Invalid value for path parameter")
if not template_fn and not template:
- raise ValueError('Not template_fn or template provided')
+ raise ValueError("Not template_fn or template provided")
- params = {'servers': servers, 'pools': pools}
+ params = {"servers": servers, "pools": pools}
if template:
- tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl")
template_fn = tfile[1] # filepath is second item in tuple
util.write_file(template_fn, content=template)
@@ -459,21 +525,6 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None,
util.del_file(template_fn)
-def reload_ntp(service, systemd=False):
- """Restart or reload an ntp system service.
-
- @param service: A string specifying the name of the service to be affected.
- @param systemd: A boolean indicating if the distro uses systemd, defaults
- to False.
- @returns: A tuple of stdout, stderr results from executing the action.
- """
- if systemd:
- cmd = ['systemctl', 'reload-or-restart', service]
- else:
- cmd = ['service', service, 'restart']
- subp.subp(cmd, capture=True)
-
-
def supplemental_schema_validation(ntp_config):
"""Validate user-provided ntp:config option values.
@@ -487,50 +538,62 @@ def supplemental_schema_validation(ntp_config):
errors = []
missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
if missing:
- keys = ', '.join(sorted(missing))
+ keys = ", ".join(sorted(missing))
errors.append(
- 'Missing required ntp:config keys: {keys}'.format(keys=keys))
- elif not any([ntp_config.get('template'),
- ntp_config.get('template_name')]):
+ "Missing required ntp:config keys: {keys}".format(keys=keys)
+ )
+ elif not any(
+ [ntp_config.get("template"), ntp_config.get("template_name")]
+ ):
errors.append(
- 'Either ntp:config:template or ntp:config:template_name values'
- ' are required')
+ "Either ntp:config:template or ntp:config:template_name values"
+ " are required"
+ )
for key, value in sorted(ntp_config.items()):
- keypath = 'ntp:config:' + key
- if key == 'confpath':
+ keypath = "ntp:config:" + key
+ if key == "confpath":
if not all([value, isinstance(value, str)]):
errors.append(
- 'Expected a config file path {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
- elif key == 'packages':
+ "Expected a config file path {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
+ elif key == "packages":
if not isinstance(value, list):
errors.append(
- 'Expected a list of required package names for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
- elif key in ('template', 'template_name'):
+ "Expected a list of required package names for {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
+ elif key in ("template", "template_name"):
if value is None: # Either template or template_name can be none
continue
if not isinstance(value, str):
errors.append(
- 'Expected a string type for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
+ "Expected a string type for {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
elif not isinstance(value, str):
errors.append(
- 'Expected a string type for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
+ "Expected a string type for {keypath}. Found ({value})".format(
+ keypath=keypath, value=value
+ )
+ )
if errors:
- raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
- errors='\n'.join(errors)))
+ raise ValueError(
+ r"Invalid ntp configuration:\n{errors}".format(
+ errors="\n".join(errors)
+ )
+ )
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
- if 'ntp' not in cfg:
+ if "ntp" not in cfg:
LOG.debug(
- "Skipping module named %s, not present or disabled by cfg", name)
+ "Skipping module named %s, not present or disabled by cfg", name
+ )
return
- ntp_cfg = cfg['ntp']
+ ntp_cfg = cfg["ntp"]
if ntp_cfg is None:
ntp_cfg = {} # Allow empty config which will install the package
@@ -538,55 +601,64 @@ def handle(name, cfg, cloud, log, _args):
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(
"'ntp' key existed in config, but not a dictionary type,"
- " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))
+ )
validate_cloudconfig_schema(cfg, schema)
# Allow users to explicitly enable/disable
- enabled = ntp_cfg.get('enabled', True)
+ enabled = ntp_cfg.get("enabled", True)
if util.is_false(enabled):
LOG.debug("Skipping module named %s, disabled by cfg", name)
return
# Select which client is going to be used and get the configuration
- ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
- cloud.distro)
-
+ ntp_client_config = select_ntp_client(
+ ntp_cfg.get("ntp_client"), cloud.distro
+ )
# Allow user ntp config to override distro configurations
ntp_client_config = util.mergemanydict(
- [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
+ [ntp_client_config, ntp_cfg.get("config", {})], reverse=True
+ )
supplemental_schema_validation(ntp_client_config)
- rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
+ rename_ntp_conf(confpath=ntp_client_config.get("confpath"))
template_fn = None
- if not ntp_client_config.get('template'):
- template_name = (
- ntp_client_config.get('template_name').replace('{distro}',
- cloud.distro.name))
+ if not ntp_client_config.get("template"):
+ template_name = ntp_client_config.get("template_name").replace(
+ "{distro}", cloud.distro.name
+ )
template_fn = cloud.get_template_filename(template_name)
if not template_fn:
- msg = ('No template found, not rendering %s' %
- ntp_client_config.get('template_name'))
+ msg = (
+ "No template found, not rendering %s"
+ % ntp_client_config.get("template_name")
+ )
raise RuntimeError(msg)
- write_ntp_config_template(cloud.distro.name,
- service_name=ntp_client_config.get(
- 'service_name'),
- servers=ntp_cfg.get('servers', []),
- pools=ntp_cfg.get('pools', []),
- path=ntp_client_config.get('confpath'),
- template_fn=template_fn,
- template=ntp_client_config.get('template'))
-
- install_ntp_client(cloud.distro.install_packages,
- packages=ntp_client_config['packages'],
- check_exe=ntp_client_config['check_exe'])
+ write_ntp_config_template(
+ cloud.distro.name,
+ service_name=ntp_client_config.get("service_name"),
+ servers=ntp_cfg.get("servers", []),
+ pools=ntp_cfg.get("pools", []),
+ path=ntp_client_config.get("confpath"),
+ template_fn=template_fn,
+ template=ntp_client_config.get("template"),
+ )
+
+ install_ntp_client(
+ cloud.distro.install_packages,
+ packages=ntp_client_config["packages"],
+ check_exe=ntp_client_config["check_exe"],
+ )
try:
- reload_ntp(ntp_client_config['service_name'],
- systemd=cloud.distro.uses_systemd())
+ cloud.distro.manage_service(
+ "reload", ntp_client_config.get("service_name")
+ )
except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 036baf85..14cdfab8 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,8 +43,7 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
REBOOT_FILE = "/var/run/reboot-required"
REBOOT_CMD = ["/sbin/reboot"]
@@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
log.debug("Rebooted, but still running after %s seconds", int(elapsed))
# If we got here, not good
elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
+ raise RuntimeError(
+ "Reboot did not happen after %s seconds!" % (int(elapsed))
+ )
def handle(_name, cfg, cloud, log, _args):
# Handle the old style + new config names
- update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
- upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
- reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
- 'package_reboot_if_required')
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
+ update = _multi_cfg_bool_get(cfg, "apt_update", "package_update")
+ upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade")
+ reboot_if_required = _multi_cfg_bool_get(
+ cfg, "apt_reboot_if_required", "package_reboot_if_required"
+ )
+ pkglist = util.get_cfg_option_list(cfg, "packages", [])
errors = []
if update or len(pkglist) or upgrade:
@@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args):
reboot_fn_exists = os.path.isfile(REBOOT_FILE)
if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
try:
- log.warning("Rebooting after upgrade or install per "
- "%s", REBOOT_FILE)
+ log.warning(
+ "Rebooting after upgrade or install per %s", REBOOT_FILE
+ )
# Flush the above warning + anything else out...
logging.flushLoggers(log)
_fire_reboot(log)
@@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args):
errors.append(e)
if len(errors):
- log.warning("%s failed with exceptions, re-raising the last one",
- len(errors))
+ log.warning(
+ "%s failed with exceptions, re-raising the last one", len(errors)
+ )
raise errors[-1]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 733c3910..a0e1da78 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -24,6 +24,19 @@ keys to post. Available keys are:
- ``hostname``
- ``fdqn``
+Data is sent as ``x-www-form-urlencoded`` arguments.
+
+**Example HTTP POST**::
+
+ POST / HTTP/1.1
+ Content-Length: 1337
+ User-Agent: Cloud-Init/21.4
+ Accept-Encoding: gzip, deflate
+ Accept: */*
+ Content-Type: application/x-www-form-urlencoded
+
+ pub_key_dsa=dsa_contents&pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal
+
**Internal name:** ``cc_phone_home``
**Module frequency:** per instance
@@ -41,22 +54,19 @@ keys to post. Available keys are:
tries: 10
"""
-from cloudinit import templater
-from cloudinit import url_helper
-from cloudinit import util
-
+from cloudinit import templater, url_helper, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
POST_LIST_ALL = [
- 'pub_key_dsa',
- 'pub_key_rsa',
- 'pub_key_ecdsa',
- 'pub_key_ed25519',
- 'instance_id',
- 'hostname',
- 'fqdn'
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn",
]
@@ -74,48 +84,58 @@ def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
- if 'phone_home' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'phone_home' configuration found"), name)
+ if "phone_home" not in cfg:
+ log.debug(
+ "Skipping module named %s, "
+ "no 'phone_home' configuration found",
+ name,
+ )
return
- ph_cfg = cfg['phone_home']
-
- if 'url' not in ph_cfg:
- log.warning(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
+ ph_cfg = cfg["phone_home"]
+
+ if "url" not in ph_cfg:
+ log.warning(
+ "Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration",
+ name,
+ )
return
- url = ph_cfg['url']
- post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries')
+ url = ph_cfg["url"]
+ post_list = ph_cfg.get("post", "all")
+ tries = ph_cfg.get("tries")
try:
tries = int(tries)
except Exception:
tries = 10
- util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
+ util.logexc(
+ log,
+ "Configuration entry 'tries' is not an integer, using %s instead",
+ tries,
+ )
if post_list == "all":
post_list = POST_LIST_ALL
all_keys = {}
- all_keys['instance_id'] = cloud.get_instance_id()
- all_keys['hostname'] = cloud.get_hostname()
- all_keys['fqdn'] = cloud.get_hostname(fqdn=True)
+ all_keys["instance_id"] = cloud.get_instance_id()
+ all_keys["hostname"] = cloud.get_hostname()
+ all_keys["fqdn"] = cloud.get_hostname(fqdn=True)
pubkeys = {
- 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
- 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
- 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
- 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
+ "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub",
+ "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub",
+ "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub",
+ "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub",
}
for (n, path) in pubkeys.items():
try:
all_keys[n] = util.load_file(path)
except Exception:
- util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
+ util.logexc(
+ log, "%s: failed to open, can not phone home that data!", path
+ )
submit_keys = {}
for k in post_list:
@@ -123,28 +143,37 @@ def handle(name, cfg, cloud, log, args):
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
- log.warning(("Requested key %s from 'post'"
- " configuration list not available"), k)
+ log.warning(
+ "Requested key %s from 'post'"
+ " configuration list not available",
+ k,
+ )
# Get them read to be posted
real_submit_keys = {}
for (k, v) in submit_keys.items():
if v is None:
- real_submit_keys[k] = 'N/A'
+ real_submit_keys[k] = "N/A"
else:
real_submit_keys[k] = str(v)
# Incase the url is parameterized
url_params = {
- 'INSTANCE_ID': all_keys['instance_id'],
+ "INSTANCE_ID": all_keys["instance_id"],
}
url = templater.render_string(url, url_params)
try:
url_helper.read_file_or_url(
- url, data=real_submit_keys, retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
+ url,
+ data=real_submit_keys,
+ retries=tries,
+ sec_between=3,
+ ssl_details=util.fetch_ssl_details(cloud.paths),
+ )
except Exception:
- util.logexc(log, "Failed to post phone home data to %s in %s tries",
- url, tries)
+ util.logexc(
+ log, "Failed to post phone home data to %s in %s tries", url, tries
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 5780a7e9..d4eb68c0 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -58,9 +58,8 @@ import re
import subprocess
import time
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
@@ -75,9 +74,9 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = subp.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(["procstat", "-c", str(pid)])
line = output.splitlines()[1]
- m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
@@ -106,8 +105,9 @@ def check_condition(cond, log=None):
return False
else:
if log:
- log.warning(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
+ log.warning(
+ pre + "unexpected exit %s. " % ret + "do not apply change."
+ )
return False
except Exception as e:
if log:
@@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args):
devnull_fp = open(os.devnull, "w")
- log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
+ log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args)))
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
- condition, execmd, [args, devnull_fp])
+ util.fork_cb(
+ run_after_pid_gone,
+ mypid,
+ cmdline,
+ timeout,
+ log,
+ condition,
+ execmd,
+ [args, devnull_fp],
+ )
def load_power_state(cfg, distro):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
- pstate = cfg.get('power_state')
+ pstate = cfg.get("power_state")
if pstate is None:
return (None, None, None)
@@ -155,22 +163,25 @@ def load_power_state(cfg, distro):
if not isinstance(pstate, dict):
raise TypeError("power_state is not a dict.")
- modes_ok = ['halt', 'poweroff', 'reboot']
+ modes_ok = ["halt", "poweroff", "reboot"]
mode = pstate.get("mode")
if mode not in distro.shutdown_options_map:
raise TypeError(
- "power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(modes_ok), mode))
+ "power_state[mode] required, must be one of: %s. found: '%s'."
+ % (",".join(modes_ok), mode)
+ )
- args = distro.shutdown_command(mode=mode,
- delay=pstate.get("delay", "now"),
- message=pstate.get("message"))
+ args = distro.shutdown_command(
+ mode=mode,
+ delay=pstate.get("delay", "now"),
+ message=pstate.get("message"),
+ )
try:
- timeout = float(pstate.get('timeout', 30.0))
+ timeout = float(pstate.get("timeout", 30.0))
except ValueError as e:
raise ValueError(
- "failed to convert timeout '%s' to float." % pstate['timeout']
+ "failed to convert timeout '%s' to float." % pstate["timeout"]
) from e
condition = pstate.get("condition", True)
@@ -186,8 +197,12 @@ def doexit(sysexit):
def execmd(exe_args, output=None, data_in=None):
ret = 1
try:
- proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
- stdout=output, stderr=subprocess.STDOUT)
+ proc = subprocess.Popen(
+ exe_args,
+ stdin=subprocess.PIPE,
+ stdout=output,
+ stderr=subprocess.STDOUT,
+ )
proc.communicate(data_in)
ret = proc.returncode
except Exception:
@@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
except Exception as e:
fatal("Unexpected Exception: %s" % e)
- time.sleep(.25)
+ time.sleep(0.25)
if not msg:
fatal("Unexpected error in run_after_pid_gone")
@@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
func(*args)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index bc981cf4..f51f49bc 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -29,22 +29,44 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
ones that work with puppet 3.x and with distributions that ship modified
puppet 4.x that uses the old paths.
+Agent packages from the puppetlabs repositories can be installed by setting
+``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR
+paths will be adjusted accordingly. To maintain backwards compatibility this
+setting defaults to ``packages`` which will install puppet from the distro
+packages.
+
+If installing ``aio`` packages, ``collection`` can also be set to one of
+``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly
+counterparts) in order to install specific release streams. By default, the
+puppetlabs repository will be purged after installation finishes; set
+``cleanup`` to ``false`` to prevent this. AIO packages are installed through a
+shell script which is downloaded on the machine and then executed; the path to
+this script can be overridden using the ``aio_install_url`` key.
+
Puppet configuration can be specified under the ``conf`` key. The
configuration is specified as a dictionary containing high-level ``<section>``
keys and lists of ``<key>=<value>`` pairs within each section. Each section
name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
-such, section names should be one of: ``main``, ``master``, ``agent`` or
+such, section names should be one of: ``main``, ``server``, ``agent`` or
``user`` and keys should be valid puppet configuration options. The
``certname`` key supports string substitutions for ``%i`` and ``%f``,
corresponding to the instance id and fqdn of the machine respectively.
If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
-instead will be used as the puppermaster certificate. It should be specified
+instead will be used as the puppetserver certificate. It should be specified
in pem format as a multi-line string (using the ``|`` yaml notation).
-Additionally it's possible to create a csr_attributes.yaml for
-CSR attributes and certificate extension requests.
+Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
+attributes and certificate extension requests.
See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
+By default, the puppet service will be automatically enabled after installation
+and set to automatically start on boot. To override this in favor of manual
+puppet execution set ``start_service`` to ``false``.
+
+A single manual run can be triggered by setting ``exec`` to ``true``, and
+additional arguments can be passed to ``puppet agent`` via the ``exec_args``
+key (by default the agent will execute with the ``--test`` flag).
+
**Internal name:** ``cc_puppet``
**Module frequency:** per instance
@@ -56,13 +78,20 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
puppet:
install: <true/false>
version: <version>
+ collection: <aio collection>
+ install_type: <packages/aio>
+ aio_install_url: 'https://git.io/JBhoQ'
+ cleanup: <true/false>
conf_file: '/etc/puppet/puppet.conf'
ssl_dir: '/var/lib/puppet/ssl'
csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
package_name: 'puppet'
+ exec: <true/false>
+ exec_args: ['--test']
+ start_service: <true/false>
conf:
agent:
- server: "puppetmaster.example.org"
+ server: "puppetserver.example.org"
certname: "%i.%f"
ca_cert: |
-------BEGIN CERTIFICATE-------
@@ -79,23 +108,20 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
import os
import socket
-import yaml
from io import StringIO
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
+import yaml
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml'
-PUPPET_PACKAGE_NAME = 'puppet'
+from cloudinit import helpers, subp, temp_utils, url_helper, util
+AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
-class PuppetConstants(object):
- def __init__(self, puppet_conf_file, puppet_ssl_dir,
- csr_attributes_path, log):
+class PuppetConstants(object):
+ def __init__(
+ self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log
+ ):
self.conf_path = puppet_conf_file
self.ssl_dir = puppet_ssl_dir
self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs")
@@ -105,51 +131,140 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- subp.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ if os.path.exists("/etc/default/puppet"):
+ subp.subp(
+ [
+ "sed",
+ "-i",
+ "-e",
+ "s/^START=.*/START=yes/",
+ "/etc/default/puppet",
+ ],
+ capture=False,
+ )
+ elif os.path.exists("/bin/systemctl"):
+ subp.subp(
+ ["/bin/systemctl", "enable", "puppet.service"], capture=False
+ )
+ elif os.path.exists("/sbin/chkconfig"):
+ subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
else:
- log.warning(("Sorry we do not know how to enable"
- " puppet services on this system"))
+ log.warning(
+ "Sorry we do not know how to enable puppet services on this system"
+ )
+
+
+def get_config_value(puppet_bin, setting):
+ """Get the config value for a given setting using `puppet config print`
+ :param puppet_bin: path to puppet binary
+ :param setting: setting to query
+ """
+ out, _ = subp.subp([puppet_bin, "config", "print", setting])
+ return out.rstrip()
+
+
+def install_puppet_aio(
+ url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True
+):
+ """Install puppet-agent from the puppetlabs repositories using the one-shot
+ shell script
+
+ :param url: URL from where to download the install script
+ :param version: version to install, blank defaults to latest
+ :param collection: collection to install, blank defaults to latest
+ :param cleanup: whether to purge the puppetlabs repo after installation
+ """
+ args = []
+ if version is not None:
+ args = ["-v", version]
+ if collection is not None:
+ args += ["-c", collection]
+
+ # Purge puppetlabs repos after installation
+ if cleanup:
+ args += ["--cleanup"]
+ content = url_helper.readurl(url=url, retries=5).contents
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, "puppet-install")
+ util.write_file(tmpf, content, mode=0o700)
+ return subp.subp([tmpf] + args, capture=False)
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'puppet' configuration found"), name)
+ if "puppet" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'puppet' configuration found", name
+ )
return
- puppet_cfg = cfg['puppet']
+ puppet_cfg = cfg["puppet"]
# Start by installing the puppet package if necessary...
- install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
- version = util.get_cfg_option_str(puppet_cfg, 'version', None)
+ install = util.get_cfg_option_bool(puppet_cfg, "install", True)
+ version = util.get_cfg_option_str(puppet_cfg, "version", None)
+ collection = util.get_cfg_option_str(puppet_cfg, "collection", None)
+ install_type = util.get_cfg_option_str(
+ puppet_cfg, "install_type", "packages"
+ )
+ cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True)
+ run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False)
+ start_puppetd = util.get_cfg_option_bool(
+ puppet_cfg, "start_service", default=True
+ )
+ aio_install_url = util.get_cfg_option_str(
+ puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL
+ )
+
+ # AIO and distro packages use different paths
+ if install_type == "aio":
+ puppet_user = "root"
+ puppet_bin = "/opt/puppetlabs/bin/puppet"
+ puppet_package = "puppet-agent"
+ else: # default to 'packages'
+ puppet_user = "puppet"
+ puppet_bin = "puppet"
+ puppet_package = "puppet"
+
package_name = util.get_cfg_option_str(
- puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME)
+ puppet_cfg, "package_name", puppet_package
+ )
+ if not install and version:
+ log.warning(
+ "Puppet install set to false but version supplied, doing nothing."
+ )
+ elif install:
+ log.debug(
+ "Attempting to install puppet %s from %s",
+ version if version else "latest",
+ install_type,
+ )
+
+ if install_type == "packages":
+ cloud.distro.install_packages((package_name, version))
+ elif install_type == "aio":
+ install_puppet_aio(aio_install_url, version, collection, cleanup)
+ else:
+ log.warning("Unknown puppet install type '%s'", install_type)
+ run = False
+
conf_file = util.get_cfg_option_str(
- puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
- ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)
+ puppet_cfg, "conf_file", get_config_value(puppet_bin, "config")
+ )
+ ssl_dir = util.get_cfg_option_str(
+ puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir")
+ )
csr_attributes_path = util.get_cfg_option_str(
- puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH)
+ puppet_cfg,
+ "csr_attributes_path",
+ get_config_value(puppet_bin, "csr_attributes"),
+ )
p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
- if not install and version:
- log.warning(("Puppet install set false but version supplied,"
- " doing nothing."))
- elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
-
- cloud.distro.install_packages((package_name, version))
# ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
+ if "conf" in puppet_cfg:
# Add all sections from the conf object to puppet.conf
contents = util.load_file(p_constants.conf_path)
# Create object for reading puppet.conf values
@@ -158,29 +273,31 @@ def handle(name, cfg, cloud, log, _args):
# mix the rest up. First clean them up
# (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
- cleaned_contents = '\n'.join(cleaned_lines)
+ cleaned_contents = "\n".join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
puppet_config.read_file(
- StringIO(cleaned_contents),
- source=p_constants.conf_path)
- for (cfg_name, cfg) in puppet_cfg['conf'].items():
+ StringIO(cleaned_contents), source=p_constants.conf_path
+ )
+ for (cfg_name, cfg) in puppet_cfg["conf"].items():
# Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
- if cfg_name == 'ca_cert':
+ # Dump the puppetserver ca certificate in the correct place
+ if cfg_name == "ca_cert":
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(p_constants.ssl_dir, 0o771)
- util.chownbyname(p_constants.ssl_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_dir, puppet_user, "root")
util.ensure_dir(p_constants.ssl_cert_dir)
- util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root')
+ util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root")
util.write_file(p_constants.ssl_cert_path, cfg)
- util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root')
+ util.chownbyname(
+ p_constants.ssl_cert_path, puppet_user, "root"
+ )
else:
# Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.items():
- if o == 'certname':
+ if o == "certname":
# Expand %f as the fqdn
# TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
@@ -191,19 +308,46 @@ def handle(name, cfg, cloud, log, _args):
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- util.rename(p_constants.conf_path, "%s.old"
- % (p_constants.conf_path))
+ util.rename(
+ p_constants.conf_path, "%s.old" % (p_constants.conf_path)
+ )
util.write_file(p_constants.conf_path, puppet_config.stringify())
- if 'csr_attributes' in puppet_cfg:
- util.write_file(p_constants.csr_attributes_path,
- yaml.dump(puppet_cfg['csr_attributes'],
- default_flow_style=False))
+ if "csr_attributes" in puppet_cfg:
+ util.write_file(
+ p_constants.csr_attributes_path,
+ yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False),
+ )
# Set it up so it autostarts
- _autostart_puppet(log)
+ if start_puppetd:
+ _autostart_puppet(log)
+
+ # Run the agent if needed
+ if run:
+ log.debug("Running puppet-agent")
+ cmd = [puppet_bin, "agent"]
+ if "exec_args" in puppet_cfg:
+ cmd_args = puppet_cfg["exec_args"]
+ if isinstance(cmd_args, (list, tuple)):
+ cmd.extend(cmd_args)
+ elif isinstance(cmd_args, str):
+ cmd.extend(cmd_args.split())
+ else:
+ log.warning(
+ "Unknown type %s provided for puppet"
+ " 'exec_args' expected list, tuple,"
+ " or string",
+ type(cmd_args),
+ )
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ else:
+ cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
+ subp.subp(cmd, capture=False)
+
+ if start_puppetd:
+ # Start puppetd
+ subp.subp(["service", "puppet", "start"], capture=False)
- # Start puppetd
- subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 146758ad..87be5348 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -28,26 +28,24 @@ This module handles
**Internal name:** ``cc_refresh_rmc_and_interface``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** RHEL
"""
+import errno
+
from cloudinit import log as logging
+from cloudinit import netinfo, subp, util
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import netinfo
-
-import errno
frequency = PER_ALWAYS
LOG = logging.getLogger(__name__)
# Ensure that /opt/rsct/bin has been added to standard PATH of the
# distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl .
-RMCCTRL = 'rmcctrl'
+RMCCTRL = "rmcctrl"
def handle(name, _cfg, _cloud, _log, _args):
@@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args):
return
LOG.debug(
- 'Making the IPv6 up explicitly. '
- 'Ensuring IPv6 interface is not being handled by NetworkManager '
- 'and it is restarted to re-establish the communication with '
- 'the hypervisor')
+ "Making the IPv6 up explicitly. "
+ "Ensuring IPv6 interface is not being handled by NetworkManager "
+ "and it is restarted to re-establish the communication with "
+ "the hypervisor"
+ )
ifaces = find_ipv6_ifaces()
@@ -80,7 +79,7 @@ def find_ipv6_ifaces():
ifaces = []
for iface, data in info.items():
if iface == "lo":
- LOG.debug('Skipping localhost interface')
+ LOG.debug("Skipping localhost interface")
if len(data.get("ipv4", [])) != 0:
# skip this interface, as it has ipv4 addrs
continue
@@ -92,16 +91,16 @@ def refresh_ipv6(interface):
# IPv6 interface is explicitly brought up, subsequent to which the
# RMC services are restarted to re-establish the communication with
# the hypervisor.
- subp.subp(['ip', 'link', 'set', interface, 'down'])
- subp.subp(['ip', 'link', 'set', interface, 'up'])
+ subp.subp(["ip", "link", "set", interface, "down"])
+ subp.subp(["ip", "link", "set", interface, "up"])
def sysconfig_path(iface):
- return '/etc/sysconfig/network-scripts/ifcfg-' + iface
+ return "/etc/sysconfig/network-scripts/ifcfg-" + iface
def restart_network_manager():
- subp.subp(['systemctl', 'restart', 'NetworkManager'])
+ subp.subp(["systemctl", "restart", "NetworkManager"])
def disable_ipv6(iface_file):
@@ -113,12 +112,11 @@ def disable_ipv6(iface_file):
contents = util.load_file(iface_file)
except IOError as e:
if e.errno == errno.ENOENT:
- LOG.debug("IPv6 interface file %s does not exist\n",
- iface_file)
+ LOG.debug("IPv6 interface file %s does not exist\n", iface_file)
else:
raise e
- if 'IPV6INIT' not in contents:
+ if "IPV6INIT" not in contents:
LOG.debug("Interface file %s did not have IPV6INIT", iface_file)
return
@@ -135,11 +133,12 @@ def disable_ipv6(iface_file):
def search(contents):
# Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file.
- return(
- contents.startswith("IPV6ADDR") or
- contents.startswith("IPADDR6") or
- contents.startswith("IPV6INIT") or
- contents.startswith("NM_CONTROLLED"))
+ return (
+ contents.startswith("IPV6ADDR")
+ or contents.startswith("IPADDR6")
+ or contents.startswith("IPV6INIT")
+ or contents.startswith("NM_CONTROLLED")
+ )
def refresh_rmc():
@@ -152,8 +151,8 @@ def refresh_rmc():
# until the subsystem and all resource managers are stopped.
# -s : start Resource Monitoring & Control subsystem.
try:
- subp.subp([RMCCTRL, '-z'])
- subp.subp([RMCCTRL, '-s'])
+ subp.subp([RMCCTRL, "-z"])
+ subp.subp([RMCCTRL, "-s"])
except Exception:
- util.logexc(LOG, 'Failed to refresh the RMC subsystem.')
+ util.logexc(LOG, "Failed to refresh the RMC subsystem.")
raise
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 1cd72774..3b929903 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages.
import os
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-from cloudinit import subp
frequency = PER_INSTANCE
@@ -49,34 +48,34 @@ frequency = PER_INSTANCE
# The symlink for RMCCTRL and RECFGCT are
# /usr/sbin/rsct/bin/rmcctrl and
# /usr/sbin/rsct/install/bin/recfgct respectively.
-RSCT_PATH = '/opt/rsct/install/bin'
-RMCCTRL = 'rmcctrl'
-RECFGCT = 'recfgct'
+RSCT_PATH = "/opt/rsct/install/bin"
+RMCCTRL = "rmcctrl"
+RECFGCT = "recfgct"
LOG = logging.getLogger(__name__)
-NODE_ID_FILE = '/etc/ct_node_id'
+NODE_ID_FILE = "/etc/ct_node_id"
def handle(name, _cfg, cloud, _log, _args):
# Ensuring node id has to be generated only once during first boot
- if cloud.datasource.platform_type == 'none':
- LOG.debug('Skipping creation of new ct_node_id node')
+ if cloud.datasource.platform_type == "none":
+ LOG.debug("Skipping creation of new ct_node_id node")
return
if not os.path.isdir(RSCT_PATH):
LOG.debug("module disabled, RSCT_PATH not present")
return
- orig_path = os.environ.get('PATH')
+ orig_path = os.environ.get("PATH")
try:
add_path(orig_path)
reset_rmc()
finally:
if orig_path:
- os.environ['PATH'] = orig_path
+ os.environ["PATH"] = orig_path
else:
- del os.environ['PATH']
+ del os.environ["PATH"]
def reconfigure_rsct_subsystems():
@@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems():
LOG.debug(out.strip())
return out
except subp.ProcessExecutionError:
- util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.')
+ util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.")
raise
def get_node_id():
try:
fp = util.load_file(NODE_ID_FILE)
- node_id = fp.split('\n')[0]
+ node_id = fp.split("\n")[0]
return node_id
except Exception:
- util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE)
+ util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE)
raise
@@ -107,25 +106,25 @@ def add_path(orig_path):
# So thet cloud init automatically find and
# run RECFGCT to create new node_id.
suff = ":" + orig_path if orig_path else ""
- os.environ['PATH'] = RSCT_PATH + suff
- return os.environ['PATH']
+ os.environ["PATH"] = RSCT_PATH + suff
+ return os.environ["PATH"]
def rmcctrl():
# Stop the RMC subsystem and all resource managers so that we can make
# some changes to it
try:
- return subp.subp([RMCCTRL, '-z'])
+ return subp.subp([RMCCTRL, "-z"])
except Exception:
- util.logexc(LOG, 'Failed to stop the RMC subsystem.')
+ util.logexc(LOG, "Failed to stop the RMC subsystem.")
raise
def reset_rmc():
- LOG.debug('Attempting to reset RMC.')
+ LOG.debug("Attempting to reset RMC.")
node_id_before = get_node_id()
- LOG.debug('Node ID at beginning of module: %s', node_id_before)
+ LOG.debug("Node ID at beginning of module: %s", node_id_before)
# Stop the RMC subsystem and all resource managers so that we can make
# some changes to it
@@ -133,11 +132,11 @@ def reset_rmc():
reconfigure_rsct_subsystems()
node_id_after = get_node_id()
- LOG.debug('Node ID at end of module: %s', node_id_after)
+ LOG.debug("Node ID at end of module: %s", node_id_after)
# Check if new node ID is generated or not
# by comparing old and new node ID
if node_id_after == node_id_before:
- msg = 'New node ID did not get generated.'
+ msg = "New node ID did not get generated."
LOG.error(msg)
raise Exception(msg)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 9afbb847..19b923a8 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -13,22 +13,25 @@ import os
import stat
from textwrap import dedent
+from cloudinit import subp, util
from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
NOBLOCK = "noblock"
frequency = PER_ALWAYS
-distros = ['all']
-
-schema = {
- 'id': 'cc_resizefs',
- 'name': 'Resizefs',
- 'title': 'Resize filesystem',
- 'description': dedent("""\
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_resizefs",
+ "name": "Resizefs",
+ "title": "Resize filesystem",
+ "description": dedent(
+ """\
Resize a filesystem to use all avaliable space on partition. This
module is useful along with ``cc_growpart`` and will ensure that if the
root partition has been resized the root filesystem will be resized
@@ -37,22 +40,29 @@ schema = {
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
enabled by setting ``resize_rootfs`` to ``true``. This module can be
- disabled altogether by setting ``resize_rootfs`` to ``false``."""),
- 'distros': distros,
- 'examples': [
- 'resize_rootfs: false # disable root filesystem resize operation'],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'resize_rootfs': {
- 'enum': [True, False, NOBLOCK],
- 'description': dedent("""\
- Whether to resize the root partition. Default: 'true'""")
+ disabled altogether by setting ``resize_rootfs`` to ``false``."""
+ ),
+ "distros": distros,
+ "examples": [
+ "resize_rootfs: false # disable root filesystem resize operation"
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "resize_rootfs": {
+ "enum": [True, False, NOBLOCK],
+ "description": dedent(
+ """\
+ Whether to resize the root partition. Default: 'true'"""
+ ),
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
@@ -61,28 +71,38 @@ def _resize_btrfs(mount_point, devpth):
# Use a subvolume that is not ro to trick the resize operation to do the
# "right" thing. The use of ".snapshot" is specific to "snapper" a generic
# solution would be walk the subvolumes and find a rw mounted subvolume.
- if (not util.mount_is_read_write(mount_point) and
- os.path.isdir("%s/.snapshots" % mount_point)):
- return ('btrfs', 'filesystem', 'resize', 'max',
- '%s/.snapshots' % mount_point)
+ if not util.mount_is_read_write(mount_point) and os.path.isdir(
+ "%s/.snapshots" % mount_point
+ ):
+ return (
+ "btrfs",
+ "filesystem",
+ "resize",
+ "max",
+ "%s/.snapshots" % mount_point,
+ )
else:
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
+ return ("btrfs", "filesystem", "resize", "max", mount_point)
def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
+ return ("resize2fs", devpth)
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', mount_point)
+ return ("xfs_growfs", mount_point)
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', mount_point)
+ return ("growfs", "-y", mount_point)
def _resize_zfs(mount_point, devpth):
- return ('zpool', 'online', '-e', mount_point, devpth)
+ return ("zpool", "online", "-e", mount_point, devpth)
+
+
+def _resize_hammer2(mount_point, devpth):
+ return ("hammer2", "growfs", mount_point)
def _can_skip_resize_ufs(mount_point, devpth):
@@ -94,7 +114,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
# growfs exits with 1 for almost all cases up to this one.
# This means we can't just use rcs=[0, 1] as subp parameter:
try:
- subp.subp(['growfs', '-N', devpth])
+ subp.subp(["growfs", "-N", devpth])
except subp.ProcessExecutionError as e:
if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
# This FS is already at the desired size
@@ -108,16 +128,15 @@ def _can_skip_resize_ufs(mount_point, devpth):
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
- ('zfs', _resize_zfs),
+ ("btrfs", _resize_btrfs),
+ ("ext", _resize_ext),
+ ("xfs", _resize_xfs),
+ ("ufs", _resize_ufs),
+ ("zfs", _resize_zfs),
+ ("hammer2", _resize_hammer2),
]
-RESIZE_FS_PRECHECK_CMDS = {
- 'ufs': _can_skip_resize_ufs
-}
+RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs}
def can_skip_resize(fs_type, resize_what, devpth):
@@ -141,52 +160,66 @@ def maybe_get_writable_device_path(devpath, info, log):
container = util.is_container()
# Ensure the path is a block device.
- if (devpath == "/dev/root" and not os.path.exists(devpath) and
- not container):
+ if (
+ devpath == "/dev/root"
+ and not os.path.exists(devpath)
+ and not container
+ ):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
- if devpath == 'overlayroot':
+ if devpath == "overlayroot":
log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
return None
# FreeBSD zpool can also just use gpt/<label>
# with that in mind we can not do an os.stat on "gpt/whatever"
# therefore return the devpath already here.
- if devpath.startswith('gpt/'):
- log.debug('We have a gpt label - just go ahead')
+ if devpath.startswith("gpt/"):
+ log.debug("We have a gpt label - just go ahead")
return devpath
# Alternatively, our device could simply be a name as returned by gpart,
# such as da0p3
- if not devpath.startswith('/dev/') and not os.path.exists(devpath):
- fulldevpath = '/dev/' + devpath.lstrip('/')
- log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'",
- devpath, fulldevpath)
+ if not devpath.startswith("/dev/") and not os.path.exists(devpath):
+ fulldevpath = "/dev/" + devpath.lstrip("/")
+ log.debug(
+ "'%s' doesn't appear to be a valid device path. Trying '%s'",
+ devpath,
+ fulldevpath,
+ )
devpath = fulldevpath
try:
statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpath, info)
+ log.debug(
+ "Device '%s' did not exist in container. cannot resize: %s",
+ devpath,
+ info,
+ )
elif exc.errno == errno.ENOENT:
- log.warning("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning(
+ "Device '%s' did not exist. cannot resize: %s", devpath, info
+ )
else:
raise exc
return None
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpath, info))
+ log.debug(
+ "device '%s' not a block device in container."
+ " cannot resize: %s" % (devpath, info)
+ )
else:
- log.warning("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning(
+ "device '%s' not a block device. cannot resize: %s"
+ % (devpath, info)
+ )
return None
return devpath # The writable block devpath
@@ -215,8 +248,8 @@ def handle(name, cfg, _cloud, log, args):
# we will have to get the zpool name out of this
# and set the resize_what variable to the zpool
# so the _resize_zfs function gets the right attribute.
- if fs_type == 'zfs':
- zpool = devpth.split('/')[0]
+ if fs_type == "zfs":
+ zpool = devpth.split("/")[0]
devpth = util.get_device_info_from_zpool(zpool)
if not devpth:
return # could not find device from zpool
@@ -231,8 +264,9 @@ def handle(name, cfg, _cloud, log, args):
resizer = None
if can_skip_resize(fs_type, resize_what, devpth):
- log.debug("Skip resize filesystem type %s for %s",
- fs_type, resize_what)
+ log.debug(
+ "Skip resize filesystem type %s for %s", fs_type, resize_what
+ )
return
fstype_lc = fs_type.lower()
@@ -242,29 +276,42 @@ def handle(name, cfg, _cloud, log, args):
break
if not resizer:
- log.warning("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
+ log.warning(
+ "Not resizing unknown filesystem type %s for %s",
+ fs_type,
+ resize_what,
+ )
return
resize_cmd = resizer(resize_what, devpth)
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
- ' '.join(resize_cmd))
+ log.debug(
+ "Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
+ )
if resize_root == NOBLOCK:
# Fork to a child that will run
# the resize command
util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
+ util.log_time,
+ logfunc=log.debug,
+ msg="backgrounded Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
-
- action = 'Resized'
+ util.log_time(
+ logfunc=log.debug,
+ msg="Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+
+ action = "Resized"
if resize_root == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
- resize_root)
+ action = "Resizing (via forking)"
+ log.debug(
+ "%s root filesystem (type=%s, val=%s)", action, fs_type, resize_root
+ )
def do_resize(resize_cmd, log):
@@ -276,4 +323,5 @@ def do_resize(resize_cmd, log):
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs_vyos.py b/cloudinit/config/cc_resizefs_vyos.py
index f8eb84fe..b54f2e27 100644
--- a/cloudinit/config/cc_resizefs_vyos.py
+++ b/cloudinit/config/cc_resizefs_vyos.py
@@ -6,31 +6,33 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""Resizefs: cloud-config module which resizes the filesystem"""
+"""Resizefs_vyos: cloud-config module which resizes filesystems"""
import errno
import os
import stat
from textwrap import dedent
+from cloudinit import subp, util
from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
NOBLOCK = "noblock"
RESIZEFS_LIST_DEFAULT = ['/']
frequency = PER_ALWAYS
-distros = ['all']
-
-# Renamed to schema_vyos to pass build tests without modifying upstream sources
-schema_vyos = {
- 'id': 'cc_resizefs_vyos',
- 'name': 'Resizefs',
- 'title': 'Resize filesystem',
- 'description': dedent("""\
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_resizefs_vyos",
+ "name": "Resizefs_vyos",
+ "title": "Resize filesystems",
+ "description": dedent(
+ """\
Resize filesystems to use all avaliable space on partition. This
module is useful along with ``cc_growpart`` and will ensure that if a
partition has been resized the filesystem will be resized
@@ -38,34 +40,40 @@ schema_vyos = {
partition and will block the boot process while the resize command is
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
- enabled by setting ``resizefs_enabled`` to ``noblock``. This module can
- be disabled altogether by setting ``resizefs_enabled`` to ``false``.
- """),
- 'distros': distros,
- 'examples': [
- 'resizefs_enabled: false # disable filesystems resize operation'
- 'resize_fs: ["/", "/dev/vda1"]'],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'resizefs_enabled': {
- 'enum': [True, False, NOBLOCK],
- 'description': dedent("""\
- Whether to resize the partitions. Default: 'true'""")
+ enabled by setting ``resizefs_enabled`` to ``true``. This module can
+ be disabled altogether by setting ``resizefs_enabled`` to ``false``."""
+ ),
+ "distros": distros,
+ "examples": [
+ "resizefs_enabled: false # disable filesystems resize operation",
+ "resizefs_list: [\"/\", \"/dev/vda1\"]"],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "resizefs_enabled": {
+ "enum": [True, False, NOBLOCK],
+ "description": dedent(
+ """\
+ Whether to resize the partitions. Default: 'true'"""
+ ),
},
- 'resizefs_list': {
- 'type': 'array',
- 'items': {'type': 'string'},
- 'additionalItems': False, # Reject items non-string
- 'description': dedent("""\
+ "resizefs_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "additionalItems": False, # Reject items non-string
+ "description": dedent(
+ """\
List of partitions filesystems on which should be resized.
- Default: '/'""")
+ Default: '/'"""
+ )
}
- }
+ },
}
-# Renamed to schema_vyos to pass build tests without modifying upstream sources
-__doc__ = get_schema_doc(schema_vyos) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
@@ -74,28 +82,38 @@ def _resize_btrfs(mount_point, devpth):
# Use a subvolume that is not ro to trick the resize operation to do the
# "right" thing. The use of ".snapshot" is specific to "snapper" a generic
# solution would be walk the subvolumes and find a rw mounted subvolume.
- if (not util.mount_is_read_write(mount_point) and
- os.path.isdir("%s/.snapshots" % mount_point)):
- return ('btrfs', 'filesystem', 'resize', 'max',
- '%s/.snapshots' % mount_point)
+ if not util.mount_is_read_write(mount_point) and os.path.isdir(
+ "%s/.snapshots" % mount_point
+ ):
+ return (
+ "btrfs",
+ "filesystem",
+ "resize",
+ "max",
+ "%s/.snapshots" % mount_point,
+ )
else:
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
+ return ("btrfs", "filesystem", "resize", "max", mount_point)
def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
+ return ("resize2fs", devpth)
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', mount_point)
+ return ("xfs_growfs", mount_point)
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', mount_point)
+ return ("growfs", "-y", mount_point)
def _resize_zfs(mount_point, devpth):
- return ('zpool', 'online', '-e', mount_point, devpth)
+ return ("zpool", "online", "-e", mount_point, devpth)
+
+
+def _resize_hammer2(mount_point, devpth):
+ return ("hammer2", "growfs", mount_point)
def _can_skip_resize_ufs(mount_point, devpth):
@@ -107,7 +125,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
# growfs exits with 1 for almost all cases up to this one.
# This means we can't just use rcs=[0, 1] as subp parameter:
try:
- subp.subp(['growfs', '-N', devpth])
+ subp.subp(["growfs", "-N", devpth])
except subp.ProcessExecutionError as e:
if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
# This FS is already at the desired size
@@ -121,23 +139,22 @@ def _can_skip_resize_ufs(mount_point, devpth):
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
- ('zfs', _resize_zfs),
+ ("btrfs", _resize_btrfs),
+ ("ext", _resize_ext),
+ ("xfs", _resize_xfs),
+ ("ufs", _resize_ufs),
+ ("zfs", _resize_zfs),
+ ("hammer2", _resize_hammer2),
]
-RESIZE_FS_PRECHECK_CMDS = {
- 'ufs': _can_skip_resize_ufs
-}
+RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs}
-def can_skip_resize(fs_type, resize_item, devpth):
+def can_skip_resize(fs_type, resize_what, devpth):
fstype_lc = fs_type.lower()
for i, func in RESIZE_FS_PRECHECK_CMDS.items():
if fstype_lc.startswith(i):
- return func(resize_item, devpth)
+ return func(resize_what, devpth)
return False
@@ -154,56 +171,150 @@ def maybe_get_writable_device_path(devpath, info, log):
container = util.is_container()
# Ensure the path is a block device.
- if (devpath == "/dev/root" and not os.path.exists(devpath) and
- not container):
+ if (
+ devpath == "/dev/root"
+ and not os.path.exists(devpath)
+ and not container
+ ):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
- if devpath == 'overlayroot':
+ if devpath == "overlayroot":
log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
return None
# FreeBSD zpool can also just use gpt/<label>
# with that in mind we can not do an os.stat on "gpt/whatever"
# therefore return the devpath already here.
- if devpath.startswith('gpt/'):
- log.debug('We have a gpt label - just go ahead')
+ if devpath.startswith("gpt/"):
+ log.debug("We have a gpt label - just go ahead")
return devpath
# Alternatively, our device could simply be a name as returned by gpart,
# such as da0p3
- if not devpath.startswith('/dev/') and not os.path.exists(devpath):
- fulldevpath = '/dev/' + devpath.lstrip('/')
- log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'",
- devpath, fulldevpath)
+ if not devpath.startswith("/dev/") and not os.path.exists(devpath):
+ fulldevpath = "/dev/" + devpath.lstrip("/")
+ log.debug(
+ "'%s' doesn't appear to be a valid device path. Trying '%s'",
+ devpath,
+ fulldevpath,
+ )
devpath = fulldevpath
try:
statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpath, info)
+ log.debug(
+ "Device '%s' did not exist in container. cannot resize: %s",
+ devpath,
+ info,
+ )
elif exc.errno == errno.ENOENT:
- log.warning("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning(
+ "Device '%s' did not exist. cannot resize: %s", devpath, info
+ )
else:
raise exc
return None
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpath, info))
+ log.debug(
+ "device '%s' not a block device in container."
+ " cannot resize: %s" % (devpath, info)
+ )
else:
- log.warning("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning(
+ "device '%s' not a block device. cannot resize: %s"
+ % (devpath, info)
+ )
return None
return devpath # The writable block devpath
+def resize_fs(resize_what, log, resize_enabled):
+ result = util.get_mount_info(resize_what, log)
+ if not result:
+ log.warning("Could not determine filesystem type of %s", resize_what)
+ return
+
+ (devpth, fs_type, mount_point) = result
+
+ # if we have a zfs then our device path at this point
+ # is the zfs label. For example: vmzroot/ROOT/freebsd
+ # we will have to get the zpool name out of this
+ # and set the resize_what variable to the zpool
+ # so the _resize_zfs function gets the right attribute.
+ if fs_type == "zfs":
+ zpool = devpth.split("/")[0]
+ devpth = util.get_device_info_from_zpool(zpool)
+ if not devpth:
+ return # could not find device from zpool
+ resize_what = zpool
+
+ info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
+ log.debug("resize_info: %s" % info)
+
+ devpth = maybe_get_writable_device_path(devpth, info, log)
+ if not devpth:
+ return # devpath was not a writable block device
+
+ resizer = None
+ if can_skip_resize(fs_type, resize_what, devpth):
+ log.debug(
+ "Skip resize filesystem type %s for %s", fs_type, resize_what
+ )
+ return
+
+ fstype_lc = fs_type.lower()
+ for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
+ if fstype_lc.startswith(pfix):
+ resizer = root_cmd
+ break
+
+ if not resizer:
+ log.warning(
+ "Not resizing unknown filesystem type %s for %s",
+ fs_type,
+ resize_what,
+ )
+ return
+
+ resize_cmd = resizer(resize_what, devpth)
+ log.debug(
+ "Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
+ )
+
+ if resize_enabled == NOBLOCK:
+ # Fork to a child that will run
+ # the resize command
+ util.fork_cb(
+ util.log_time,
+ logfunc=log.debug,
+ msg="backgrounded Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+ else:
+ util.log_time(
+ logfunc=log.debug,
+ msg="Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+
+ action = "Resized"
+ if resize_enabled == NOBLOCK:
+ action = "Resizing (via forking)"
+ log.debug(
+ "%s filesystem on %s (type=%s, val=%s)", action, resize_what,
+ fs_type, resize_enabled
+ )
+
+
def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
resize_enabled = args[0]
@@ -217,84 +328,19 @@ def handle(name, cfg, _cloud, log, args):
resizefs_enabled instead!""")
resize_enabled = resize_rootfs_option
- # Renamed to schema_vyos to pass build tests without modifying upstream
- validate_cloudconfig_schema(cfg, schema_vyos)
+ validate_cloudconfig_schema(cfg, schema)
if not util.translate_bool(resize_enabled, addons=[NOBLOCK]):
log.debug("Skipping module named %s, resizing disabled", name)
return
# Get list of partitions to resize
- resize_what = util.get_cfg_option_list(cfg, "resizefs_list",
+ resize_list = util.get_cfg_option_list(cfg, "resizefs_list",
RESIZEFS_LIST_DEFAULT)
- log.debug("Filesystems to resize: %s", resize_what)
-
- # Resize all filesystems from resize_what
- for resize_item in resize_what:
-
- result = util.get_mount_info(resize_item, log)
- if not result:
- log.warning("Could not determine filesystem type of %s",
- resize_item)
- return
-
- (devpth, fs_type, mount_point) = result
-
- # if we have a zfs then our device path at this point
- # is the zfs label. For example: vmzroot/ROOT/freebsd
- # we will have to get the zpool name out of this
- # and set the resize_item variable to the zpool
- # so the _resize_zfs function gets the right attribute.
- if fs_type == 'zfs':
- zpool = devpth.split('/')[0]
- devpth = util.get_device_info_from_zpool(zpool)
- if not devpth:
- return # could not find device from zpool
- resize_item = zpool
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point,
- resize_item)
- log.debug("resize_info: %s" % info)
-
- devpth = maybe_get_writable_device_path(devpth, info, log)
- if not devpth:
- return # devpath was not a writable block device
-
- resizer = None
- if can_skip_resize(fs_type, resize_item, devpth):
- log.debug("Skip resize filesystem type %s for %s",
- fs_type, resize_item)
- return
-
- fstype_lc = fs_type.lower()
- for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
- if fstype_lc.startswith(pfix):
- resizer = root_cmd
- break
-
- if not resizer:
- log.warning("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_item)
- return
-
- resize_cmd = resizer(resize_item, devpth)
- log.debug("Resizing %s (%s) using %s", resize_item, fs_type,
- ' '.join(resize_cmd))
-
- if resize_enabled == NOBLOCK:
- # Fork to a child that will run
- # the resize command
- util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
- else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
+ log.debug("Filesystems to resize: %s", resize_list)
- action = 'Resized'
- if resize_enabled == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s filesystem on %s (type=%s, val=%s)", action, resize_item,
- fs_type, resize_enabled)
+ # Resize all filesystems from resize_list
+ for resize_what in resize_list:
+ resize_fs(resize_what, log, resize_enabled)
def do_resize(resize_cmd, log):
@@ -306,4 +352,5 @@ def do_resize(resize_cmd, log):
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 7beb11ca..b2970d51 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -14,12 +14,12 @@ Resolv Conf
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
where configuration management such as puppet or chef own dns configuration.
-As Debian/Ubuntu will, by default, utilize resolvconf, and similarly RedHat
+As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
will use sysconfig, this module is likely to be of little use unless those
are configured correctly.
.. note::
- For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
+ For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
.. note::
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** alpine, fedora, rhel, sles
+**Supported distros:** alpine, fedora, photon, rhel, sles
**Config keys**::
@@ -47,23 +47,27 @@ are configured correctly.
"""
from cloudinit import log as logging
+from cloudinit import templater, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import templater
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
+distros = ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"]
+RESOLVE_CONFIG_TEMPLATE_MAP = {
+ "/etc/resolv.conf": "resolv.conf",
+ "/etc/systemd/resolved.conf": "systemd.resolved.conf",
+}
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
+
+def generate_resolv_conf(template_fn, params, target_fname):
flags = []
false_flags = []
- if 'options' in params:
- for key, val in params['options'].items():
+ if "options" in params:
+ for key, val in params["options"].items():
if isinstance(val, bool):
if val:
flags.append(key)
@@ -71,12 +75,12 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
false_flags.append(key)
for flag in flags + false_flags:
- del params['options'][flag]
+ del params["options"][flag]
- if not params.get('options'):
- params['options'] = {}
+ if not params.get("options"):
+ params["options"] = {}
- params['flags'] = flags
+ params["flags"] = flags
LOG.debug("Writing resolv.conf from template %s", template_fn)
templater.render_to_file(template_fn, target_fname, params)
@@ -92,24 +96,39 @@ def handle(name, cfg, cloud, log, _args):
@param args: Any module arguments from cloud.cfg
"""
if "manage_resolv_conf" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'manage_resolv_conf' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " no 'manage_resolv_conf' key in configuration",
+ name,
+ )
return
if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(("Skipping module named %s,"
- " 'manage_resolv_conf' present but set to False"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " 'manage_resolv_conf' present but set to False",
+ name,
+ )
return
if "resolv_conf" not in cfg:
log.warning("manage_resolv_conf True but no parameters provided!")
+ return
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
- log.warning("No template found, not rendering /etc/resolv.conf")
+ try:
+ template_fn = cloud.get_template_filename(
+ RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn]
+ )
+ except KeyError:
+ log.warning("No template found, not rendering resolve configs")
return
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
+ generate_resolv_conf(
+ template_fn=template_fn,
+ params=cfg["resolv_conf"],
+ target_fname=cloud.distro.resolve_conf_fn,
+ )
return
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28d62e9d..b81a7a9b 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -5,15 +5,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""
-RedHat Subscription
--------------------
+Red Hat Subscription
+--------------------
**Summary:** register red hat enterprise linux based system
-Register a RedHat system either by username and password *or* activation and
+Register a Red Hat system either by username and password *or* activation and
org. Following a sucessful registration, you can auto-attach subscriptions, set
the service level, add subscriptions based on pool id, enable/disable yum
repositories based on repo id, and alter the rhsm_baseurl and server-hostname
-in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register RedHat
+in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat
Subscription`` example config.
**Internal name:** ``cc_rh_subscription``
@@ -39,12 +39,11 @@ Subscription`` example config.
"""
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
-distros = ['fedora', 'rhel']
+distros = ["fedora", "rhel"]
def handle(name, cfg, _cloud, log, _args):
@@ -60,8 +59,9 @@ def handle(name, cfg, _cloud, log, _args):
raise SubscriptionError(verify_msg)
cont = sm.rhn_register()
if not cont:
- raise SubscriptionError("Registration failed or did not "
- "run completely")
+ raise SubscriptionError(
+ "Registration failed or did not run completely"
+ )
# Splitting up the registration, auto-attach, and servicelevel
# commands because the error codes, messages from subman are not
@@ -70,8 +70,7 @@ def handle(name, cfg, _cloud, log, _args):
# Attempt to change the service level
if sm.auto_attach and sm.servicelevel is not None:
if not sm._set_service_level():
- raise SubscriptionError("Setting of service-level "
- "failed")
+ raise SubscriptionError("Setting of service-level failed")
else:
sm.log.debug("Completed auto-attach with service level")
elif sm.auto_attach:
@@ -87,8 +86,9 @@ def handle(name, cfg, _cloud, log, _args):
return_stat = sm.addPool(sm.pools)
if not return_stat:
- raise SubscriptionError("Unable to attach pools {0}"
- .format(sm.pools))
+ raise SubscriptionError(
+ "Unable to attach pools {0}".format(sm.pools)
+ )
return_stat = sm.update_repos()
if not return_stat:
raise SubscriptionError("Unable to add or remove repos")
@@ -105,72 +105,87 @@ class SubscriptionError(Exception):
class SubscriptionManager(object):
- valid_rh_keys = ['org', 'activation-key', 'username', 'password',
- 'disable-repo', 'enable-repo', 'add-pool',
- 'rhsm-baseurl', 'server-hostname',
- 'auto-attach', 'service-level']
+ valid_rh_keys = [
+ "org",
+ "activation-key",
+ "username",
+ "password",
+ "disable-repo",
+ "enable-repo",
+ "add-pool",
+ "rhsm-baseurl",
+ "server-hostname",
+ "auto-attach",
+ "service-level",
+ ]
def __init__(self, cfg, log=None):
if log is None:
log = LOG
self.log = log
self.cfg = cfg
- self.rhel_cfg = self.cfg.get('rh_subscription', {})
- self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
- self.server_hostname = self.rhel_cfg.get('server-hostname')
- self.pools = self.rhel_cfg.get('add-pool')
- self.activation_key = self.rhel_cfg.get('activation-key')
- self.org = self.rhel_cfg.get('org')
- self.userid = self.rhel_cfg.get('username')
- self.password = self.rhel_cfg.get('password')
- self.auto_attach = self.rhel_cfg.get('auto-attach')
- self.enable_repo = self.rhel_cfg.get('enable-repo')
- self.disable_repo = self.rhel_cfg.get('disable-repo')
- self.servicelevel = self.rhel_cfg.get('service-level')
+ self.rhel_cfg = self.cfg.get("rh_subscription", {})
+ self.rhsm_baseurl = self.rhel_cfg.get("rhsm-baseurl")
+ self.server_hostname = self.rhel_cfg.get("server-hostname")
+ self.pools = self.rhel_cfg.get("add-pool")
+ self.activation_key = self.rhel_cfg.get("activation-key")
+ self.org = self.rhel_cfg.get("org")
+ self.userid = self.rhel_cfg.get("username")
+ self.password = self.rhel_cfg.get("password")
+ self.auto_attach = self.rhel_cfg.get("auto-attach")
+ self.enable_repo = self.rhel_cfg.get("enable-repo")
+ self.disable_repo = self.rhel_cfg.get("disable-repo")
+ self.servicelevel = self.rhel_cfg.get("service-level")
def log_success(self, msg):
- '''Simple wrapper for logging info messages. Useful for unittests'''
+ """Simple wrapper for logging info messages. Useful for unittests"""
self.log.info(msg)
def log_warn(self, msg):
- '''Simple wrapper for logging warning messages. Useful for unittests'''
+ """Simple wrapper for logging warning messages. Useful for unittests"""
self.log.warning(msg)
def _verify_keys(self):
- '''
+ """
Checks that the keys in the rh_subscription dict from the user-data
are what we expect.
- '''
+ """
for k in self.rhel_cfg:
if k not in self.valid_rh_keys:
- bad_key = "{0} is not a valid key for rh_subscription. "\
- "Valid keys are: "\
- "{1}".format(k, ', '.join(self.valid_rh_keys))
+ bad_key = (
+ "{0} is not a valid key for rh_subscription. "
+ "Valid keys are: "
+ "{1}".format(k, ", ".join(self.valid_rh_keys))
+ )
return False, bad_key
# Check for bad auto-attach value
- if (self.auto_attach is not None) and \
- not (util.is_true(self.auto_attach) or
- util.is_false(self.auto_attach)):
- not_bool = "The key auto-attach must be a boolean value "\
- "(True/False "
+ if (self.auto_attach is not None) and not (
+ util.is_true(self.auto_attach) or util.is_false(self.auto_attach)
+ ):
+ not_bool = (
+ "The key auto-attach must be a boolean value (True/False "
+ )
return False, not_bool
- if (self.servicelevel is not None) and ((not self.auto_attach) or
- (util.is_false(str(self.auto_attach)))):
- no_auto = ("The service-level key must be used in conjunction "
- "with the auto-attach key. Please re-run with "
- "auto-attach: True")
+ if (self.servicelevel is not None) and (
+ (not self.auto_attach) or (util.is_false(str(self.auto_attach)))
+ ):
+ no_auto = (
+ "The service-level key must be used in conjunction "
+ "with the auto-attach key. Please re-run with "
+ "auto-attach: True"
+ )
return False, no_auto
return True, None
def is_registered(self):
- '''
+ """
Checks if the system is already registered and returns
True if so, else False
- '''
- cmd = ['identity']
+ """
+ cmd = ["identity"]
try:
_sub_man_cli(cmd)
@@ -180,15 +195,18 @@ class SubscriptionManager(object):
return True
def rhn_register(self):
- '''
+ """
Registers the system by userid and password or activation key
and org. Returns True when successful False when not.
- '''
+ """
if (self.activation_key is not None) and (self.org is not None):
# register by activation key
- cmd = ['register', '--activationkey={0}'.
- format(self.activation_key), '--org={0}'.format(self.org)]
+ cmd = [
+ "register",
+ "--activationkey={0}".format(self.activation_key),
+ "--org={0}".format(self.org),
+ ]
# If the baseurl and/or server url are passed in, we register
# with them.
@@ -203,14 +221,18 @@ class SubscriptionManager(object):
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except subp.ProcessExecutionError as e:
if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
+ self.log_warn(
+ "Registration failed due to: {0}".format(e.stderr)
+ )
return False
elif (self.userid is not None) and (self.password is not None):
# register by username and password
- cmd = ['register', '--username={0}'.format(self.userid),
- '--password={0}'.format(self.password)]
+ cmd = [
+ "register",
+ "--username={0}".format(self.userid),
+ "--password={0}".format(self.password),
+ ]
# If the baseurl and/or server url are passed in, we register
# with them.
@@ -226,15 +248,18 @@ class SubscriptionManager(object):
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except subp.ProcessExecutionError as e:
if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
+ self.log_warn(
+ "Registration failed due to: {0}".format(e.stderr)
+ )
return False
else:
- self.log_warn("Unable to register system due to incomplete "
- "information.")
- self.log_warn("Use either activationkey and org *or* userid "
- "and password")
+ self.log_warn(
+ "Unable to register system due to incomplete information."
+ )
+ self.log_warn(
+ "Use either activationkey and org *or* userid and password"
+ )
return False
reg_id = return_out.split("ID: ")[1].rstrip()
@@ -242,19 +267,25 @@ class SubscriptionManager(object):
return True
def _set_service_level(self):
- cmd = ['attach', '--auto', '--servicelevel={0}'
- .format(self.servicelevel)]
+ cmd = [
+ "attach",
+ "--auto",
+ "--servicelevel={0}".format(self.servicelevel),
+ ]
try:
return_out = _sub_man_cli(cmd)[0]
except subp.ProcessExecutionError as e:
- if e.stdout.rstrip() != '':
+ if e.stdout.rstrip() != "":
for line in e.stdout.split("\n"):
- if line != '':
+ if line != "":
self.log_warn(line)
else:
- self.log_warn("Setting the service level failed with: "
- "{0}".format(e.stderr.strip()))
+ self.log_warn(
+ "Setting the service level failed with: {0}".format(
+ e.stderr.strip()
+ )
+ )
return False
for line in return_out.split("\n"):
if line != "":
@@ -262,7 +293,7 @@ class SubscriptionManager(object):
return True
def _set_auto_attach(self):
- cmd = ['attach', '--auto']
+ cmd = ["attach", "--auto"]
try:
return_out = _sub_man_cli(cmd)[0]
except subp.ProcessExecutionError as e:
@@ -274,52 +305,52 @@ class SubscriptionManager(object):
return True
def _getPools(self):
- '''
+ """
Gets the list pools for the active subscription and returns them
in list form.
- '''
+ """
available = []
consumed = []
# Get all available pools
- cmd = ['list', '--available', '--pool-only']
+ cmd = ["list", "--available", "--pool-only"]
results = _sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
- cmd = ['list', '--consumed', '--pool-only']
+ cmd = ["list", "--consumed", "--pool-only"]
results = _sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
def _getRepos(self):
- '''
+ """
Obtains the current list of active yum repositories and returns
them in list form.
- '''
+ """
- cmd = ['repos', '--list-enabled']
+ cmd = ["repos", "--list-enabled"]
return_out = _sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
- active_repos.append((repo.split(':')[1]).strip())
+ active_repos.append((repo.split(":")[1]).strip())
- cmd = ['repos', '--list-disabled']
+ cmd = ["repos", "--list-disabled"]
return_out = _sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
- inactive_repos.append((repo.split(':')[1]).strip())
+ inactive_repos.append((repo.split(":")[1]).strip())
return active_repos, inactive_repos
def addPool(self, pools):
- '''
+ """
Takes a list of subscription pools and "attaches" them to the
current subscription
- '''
+ """
# An empty list was passed
if len(pools) == 0:
@@ -328,31 +359,33 @@ class SubscriptionManager(object):
pool_available, pool_consumed = self._getPools()
pool_list = []
- cmd = ['attach']
+ cmd = ["attach"]
for pool in pools:
if (pool not in pool_consumed) and (pool in pool_available):
- pool_list.append('--pool={0}'.format(pool))
+ pool_list.append("--pool={0}".format(pool))
else:
self.log_warn("Pool {0} is not available".format(pool))
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
_sub_man_cli(cmd)
- self.log.debug("Attached the following pools to your "
- "system: %s", (", ".join(pool_list))
- .replace('--pool=', ''))
+ self.log.debug(
+ "Attached the following pools to your system: %s",
+ (", ".join(pool_list)).replace("--pool=", ""),
+ )
return True
except subp.ProcessExecutionError as e:
- self.log_warn("Unable to attach pool {0} "
- "due to {1}".format(pool, e))
+ self.log_warn(
+ "Unable to attach pool {0} due to {1}".format(pool, e)
+ )
return False
def update_repos(self):
- '''
+ """
Takes a list of yum repo ids that need to be disabled or enabled; then
it verifies if they are already enabled or disabled and finally
executes the action to disable or enable
- '''
+ """
erepos = self.enable_repo
drepos = self.disable_repo
@@ -378,7 +411,7 @@ class SubscriptionManager(object):
enable_list = []
enable_list_fail = []
for repoid in erepos:
- if (repoid in inactive_repos):
+ if repoid in inactive_repos:
enable_list.append("--enable={0}".format(repoid))
else:
enable_list_fail.append(repoid)
@@ -399,14 +432,16 @@ class SubscriptionManager(object):
if fail in active_repos:
self.log.debug("Repo %s is already enabled", fail)
else:
- self.log_warn("Repo {0} does not appear to "
- "exist".format(fail))
+ self.log_warn(
+ "Repo {0} does not appear to exist".format(fail)
+ )
if len(disable_list_fail) > 0:
for fail in disable_list_fail:
- self.log.debug("Repo %s not disabled "
- "because it is not enabled", fail)
+ self.log.debug(
+ "Repo %s not disabled because it is not enabled", fail
+ )
- cmd = ['repos']
+ cmd = ["repos"]
if len(disable_list) > 0:
cmd.extend(disable_list)
@@ -420,11 +455,15 @@ class SubscriptionManager(object):
return False
if len(enable_list) > 0:
- self.log.debug("Enabled the following repos: %s",
- (", ".join(enable_list)).replace('--enable=', ''))
+ self.log.debug(
+ "Enabled the following repos: %s",
+ (", ".join(enable_list)).replace("--enable=", ""),
+ )
if len(disable_list) > 0:
- self.log.debug("Disabled the following repos: %s",
- (", ".join(disable_list)).replace('--disable=', ''))
+ self.log.debug(
+ "Disabled the following repos: %s",
+ (", ".join(disable_list)).replace("--disable=", ""),
+ )
return True
def is_configured(self):
@@ -432,13 +471,12 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
- '''
+ """
Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
- '''
- return subp.subp(['subscription-manager'] + cmd,
- logstring=logstring_val)
+ """
+ return subp.subp(["subscription-manager"] + cmd, logstring=logstring_val)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index a5aca038..36a009a2 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -44,7 +44,7 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
# - read the blob of data from raw user data, and parse it as key/value
# - for each key that is found, download the content to
# the local instance/scripts directory and set them executable.
-# - the files in that directory will be run by the user-scripts module
+# - the files in that directory will be run by the scripts-user module
# Therefore, this must run before that.
#
#
@@ -52,14 +52,14 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
import os
from urllib.parse import parse_qs
-from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
+MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
def handle(name, _cfg, cloud, log, _args):
@@ -72,13 +72,16 @@ def handle(name, _cfg, cloud, log, _args):
try:
mdict = parse_qs(ud)
if not mdict or MY_HOOKNAME not in mdict:
- log.debug(("Skipping module %s, "
- "did not find %s in parsed"
- " raw userdata"), name, MY_HOOKNAME)
+ log.debug(
+ "Skipping module %s, did not find %s in parsed raw userdata",
+ name,
+ MY_HOOKNAME,
+ )
return
except Exception:
- util.logexc(log, "Failed to parse query string %s into a dictionary",
- ud)
+ util.logexc(
+ log, "Failed to parse query string %s into a dictionary", ud
+ )
raise
wrote_fns = []
@@ -87,7 +90,7 @@ def handle(name, _cfg, cloud, log, _args):
# These will eventually be then ran by the cc_scripts_user
# TODO(harlowja): maybe this should just be a new user data handler??
# Instead of a late module that acts like a user data handler?
- scripts_d = cloud.get_ipath_cur('scripts')
+ scripts_d = cloud.get_ipath_cur("scripts")
urls = mdict[MY_HOOKNAME]
for (i, url) in enumerate(urls):
fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
@@ -99,8 +102,9 @@ def handle(name, _cfg, cloud, log, _args):
wrote_fns.append(fname)
except Exception as e:
captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
- fname)
+ util.logexc(
+ log, "%s failed to read %s and write %s", MY_NAME, url, fname
+ )
if wrote_fns:
log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
@@ -110,8 +114,11 @@ def handle(name, _cfg, cloud, log, _args):
log.debug("%s urls were skipped or failed", skipped)
if captured_excps:
- log.warning("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
+ log.warning(
+ "%s failed with exceptions, re-raising the last one",
+ len(captured_excps),
+ )
raise captured_excps[-1]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 2a2bc931..db2a3c79 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -11,7 +11,7 @@
Rsyslog
-------
-**Summary:** configure system loggig via rsyslog
+**Summary:** configure system logging via rsyslog
This module configures remote system logging using rsyslog.
@@ -182,50 +182,45 @@ import os
import re
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
DEF_RELOAD = "auto"
DEF_REMOTES = {}
-KEYNAME_CONFIGS = 'configs'
-KEYNAME_FILENAME = 'config_filename'
-KEYNAME_DIR = 'config_dir'
-KEYNAME_RELOAD = 'service_reload_command'
-KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
-KEYNAME_LEGACY_DIR = 'rsyslog_dir'
-KEYNAME_REMOTES = 'remotes'
+KEYNAME_CONFIGS = "configs"
+KEYNAME_FILENAME = "config_filename"
+KEYNAME_DIR = "config_dir"
+KEYNAME_RELOAD = "service_reload_command"
+KEYNAME_LEGACY_FILENAME = "rsyslog_filename"
+KEYNAME_LEGACY_DIR = "rsyslog_dir"
+KEYNAME_REMOTES = "remotes"
LOG = logging.getLogger(__name__)
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
+COMMENT_RE = re.compile(r"[ ]*[#]+[ ]*")
HOST_PORT_RE = re.compile(
- r'^(?P<proto>[@]{0,2})'
- r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- r'([:](?P<port>[0-9]+))?$')
+ r"^(?P<proto>[@]{0,2})"
+ r"(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))"
+ r"([:](?P<port>[0-9]+))?$"
+)
-def reload_syslog(command=DEF_RELOAD, systemd=False):
- service = 'rsyslog'
+def reload_syslog(distro, command=DEF_RELOAD):
if command == DEF_RELOAD:
- if systemd:
- cmd = ['systemctl', 'reload-or-try-restart', service]
- else:
- cmd = ['service', service, 'restart']
- else:
- cmd = command
- subp.subp(cmd, capture=True)
+ service = distro.get_option("rsyslog_svcname", "rsyslog")
+ return distro.manage_service("try-reload", service)
+ return subp.subp(command, capture=True)
def load_config(cfg):
# return an updated config with entries of the correct type
# support converting the old top level format into new format
- mycfg = cfg.get('rsyslog', {})
+ mycfg = cfg.get("rsyslog", {})
- if isinstance(cfg.get('rsyslog'), list):
- mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
+ if isinstance(cfg.get("rsyslog"), list):
+ mycfg = {KEYNAME_CONFIGS: cfg.get("rsyslog")}
if KEYNAME_LEGACY_FILENAME in cfg:
mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
if KEYNAME_LEGACY_DIR in cfg:
@@ -236,7 +231,8 @@ def load_config(cfg):
(KEYNAME_DIR, DEF_DIR, str),
(KEYNAME_FILENAME, DEF_FILENAME, str),
(KEYNAME_RELOAD, DEF_RELOAD, (str, list)),
- (KEYNAME_REMOTES, DEF_REMOTES, dict))
+ (KEYNAME_REMOTES, DEF_REMOTES, dict),
+ )
for key, default, vtypes in fillup:
if key not in mycfg or not isinstance(mycfg[key], vtypes):
@@ -252,10 +248,11 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
for cur_pos, ent in enumerate(configs):
if isinstance(ent, dict):
if "content" not in ent:
- LOG.warning("No 'content' entry in config entry %s",
- cur_pos + 1)
+ LOG.warning(
+ "No 'content' entry in config entry %s", cur_pos + 1
+ )
continue
- content = ent['content']
+ content = ent["content"]
filename = ent.get("filename", def_fname)
else:
content = ent
@@ -306,9 +303,9 @@ def parse_remotes_line(line, name=None):
if not toks:
raise ValueError("Invalid host specification '%s'" % host_port)
- proto = toks.group('proto')
- addr = toks.group('addr') or toks.group('bracket_addr')
- port = toks.group('port')
+ proto = toks.group("proto")
+ addr = toks.group("addr") or toks.group("bracket_addr")
+ port = toks.group("port")
if addr.startswith("[") and not addr.endswith("]"):
raise ValueError("host spec had invalid brackets: %s" % addr)
@@ -316,15 +313,17 @@ def parse_remotes_line(line, name=None):
if comment and not name:
name = comment
- t = SyslogRemotesLine(name=name, match=match, proto=proto,
- addr=addr, port=port)
+ t = SyslogRemotesLine(
+ name=name, match=match, proto=proto, addr=addr, port=port
+ )
t.validate()
return t
class SyslogRemotesLine(object):
- def __init__(self, name=None, match=None, proto=None, addr=None,
- port=None):
+ def __init__(
+ self, name=None, match=None, proto=None, addr=None, port=None
+ ):
if not match:
match = "*.*"
self.name = name
@@ -357,7 +356,11 @@ class SyslogRemotesLine(object):
def __repr__(self):
return "[name=%s match=%s proto=%s address=%s port=%s]" % (
- self.name, self.match, self.proto, self.addr, self.port
+ self.name,
+ self.match,
+ self.proto,
+ self.addr,
+ self.port,
)
def __str__(self):
@@ -395,13 +398,14 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
LOG.warning("failed loading remote %s: %s [%s]", name, line, e)
if footer is not None:
lines.append(footer)
- return '\n'.join(lines) + "\n"
+ return "\n".join(lines) + "\n"
def handle(name, cfg, cloud, log, _args):
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
+ if "rsyslog" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'rsyslog' key in configuration", name
+ )
return
mycfg = load_config(cfg)
@@ -413,25 +417,25 @@ def handle(name, cfg, cloud, log, _args):
mycfg[KEYNAME_REMOTES],
header="# begin remotes",
footer="# end remotes",
- ))
+ )
+ )
- if not mycfg['configs']:
+ if not mycfg["configs"]:
log.debug("Empty config rsyslog['configs'], nothing to do")
return
changes = apply_rsyslog_changes(
configs=mycfg[KEYNAME_CONFIGS],
def_fname=mycfg[KEYNAME_FILENAME],
- cfg_dir=mycfg[KEYNAME_DIR])
+ cfg_dir=mycfg[KEYNAME_DIR],
+ )
if not changes:
log.debug("restart of syslog not necessary, no changes made")
return
try:
- restarted = reload_syslog(
- command=mycfg[KEYNAME_RELOAD],
- systemd=cloud.distro.uses_systemd()),
+ restarted = reload_syslog(cloud.distro, command=mycfg[KEYNAME_RELOAD])
except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
@@ -444,4 +448,5 @@ def handle(name, cfg, cloud, log, _args):
# the logging was setup to use it...
log.debug("%s configured %s files", name, changes)
+
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 1f75d6c5..c5206003 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -8,15 +8,17 @@
"""Runcmd: run arbitrary commands at rc.local with output to the console"""
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
-from cloudinit.distros import ALL_DISTROS
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
import os
from textwrap import dedent
+from cloudinit import util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
@@ -26,17 +28,21 @@ from textwrap import dedent
distros = [ALL_DISTROS]
-schema = {
- 'id': 'cc_runcmd',
- 'name': 'Runcmd',
- 'title': 'Run arbitrary commands',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_runcmd",
+ "name": "Runcmd",
+ "title": "Run arbitrary commands",
+ "description": dedent(
+ """\
Run arbitrary commands at a rc.local like level with output to the
console. Each item can be either a list or a string. If the item is a
- list, it will be properly executed as if passed to ``execve()`` (with
- the first arg as the command). If the item is a string, it will be
- written to a file and interpreted
- using ``sh``.
+ list, it will be properly quoted. Each item is written to
+ ``/var/lib/cloud/instance/runcmd`` to be later interpreted using
+ ``sh``.
+
+ Note that the ``runcmd`` module only writes the script to be run
+ later. The module that actually runs the script is ``scripts-user``
+ in the :ref:`Final` boot stage.
.. note::
@@ -47,50 +53,61 @@ schema = {
when writing files, do not use /tmp dir as it races with
systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
runcmd:
- [ ls, -l, / ]
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- [ wget, "http://example.org", -O, /tmp/index.html ]
- """)],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'runcmd': {
- 'type': 'array',
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ """
+ )
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "runcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ {"type": "null"},
+ ]
},
- 'additionalItems': False, # Reject items of non-string non-list
- 'additionalProperties': False,
- 'minItems': 1,
- 'required': [],
+ "additionalItems": False, # Reject items of non-string non-list
+ "additionalProperties": False,
+ "minItems": 1,
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, cloud, log, _args):
if "runcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'runcmd' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'runcmd' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
+ out_fn = os.path.join(cloud.get_ipath("scripts"), "runcmd")
cmd = cfg["runcmd"]
try:
content = util.shellify(cmd)
util.write_file(out_fn, content, 0o700)
- except Exception:
- util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
+ except Exception as e:
+ raise type(e)("Failed to shellify {} into file {}".format(cmd, out_fn))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index b61876aa..0eb46664 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -46,8 +46,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
from cloudinit import safeyaml, subp, util
-from cloudinit.distros import rhel_util
-
+from cloudinit.distros import bsd_utils
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -57,36 +56,40 @@ class SaltConstants(object):
"""
defines default distribution specific salt variables
"""
+
def __init__(self, cfg):
# constants tailored for FreeBSD
if util.is_FreeBSD():
- self.pkg_name = 'py36-salt'
- self.srv_name = 'salt_minion'
- self.conf_dir = '/usr/local/etc/salt'
+ self.pkg_name = "py-salt"
+ self.srv_name = "salt_minion"
+ self.conf_dir = "/usr/local/etc/salt"
# constants for any other OS
else:
- self.pkg_name = 'salt-minion'
- self.srv_name = 'salt-minion'
- self.conf_dir = '/etc/salt'
+ self.pkg_name = "salt-minion"
+ self.srv_name = "salt-minion"
+ self.conf_dir = "/etc/salt"
# if there are constants given in cloud config use those
- self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name',
- self.pkg_name)
- self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir',
- self.conf_dir)
- self.srv_name = util.get_cfg_option_str(cfg, 'service_name',
- self.srv_name)
+ self.pkg_name = util.get_cfg_option_str(cfg, "pkg_name", self.pkg_name)
+ self.conf_dir = util.get_cfg_option_str(
+ cfg, "config_dir", self.conf_dir
+ )
+ self.srv_name = util.get_cfg_option_str(
+ cfg, "service_name", self.srv_name
+ )
def handle(name, cfg, cloud, log, _args):
# If there isn't a salt key in the configuration don't do anything
- if 'salt_minion' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'salt_minion' key in configuration"), name)
+ if "salt_minion" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'salt_minion' key in configuration",
+ name,
+ )
return
- s_cfg = cfg['salt_minion']
+ s_cfg = cfg["salt_minion"]
const = SaltConstants(cfg=s_cfg)
# Start by installing the salt package ...
@@ -96,40 +99,40 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dir(const.conf_dir)
# ... and then update the salt configuration
- if 'conf' in s_cfg:
+ if "conf" in s_cfg:
# Add all sections from the conf object to minion config file
- minion_config = os.path.join(const.conf_dir, 'minion')
- minion_data = safeyaml.dumps(s_cfg.get('conf'))
+ minion_config = os.path.join(const.conf_dir, "minion")
+ minion_data = safeyaml.dumps(s_cfg.get("conf"))
util.write_file(minion_config, minion_data)
- if 'grains' in s_cfg:
+ if "grains" in s_cfg:
# add grains to /etc/salt/grains
- grains_config = os.path.join(const.conf_dir, 'grains')
- grains_data = safeyaml.dumps(s_cfg.get('grains'))
+ grains_config = os.path.join(const.conf_dir, "grains")
+ grains_data = safeyaml.dumps(s_cfg.get("grains"))
util.write_file(grains_config, grains_data)
# ... copy the key pair if specified
- if 'public_key' in s_cfg and 'private_key' in s_cfg:
+ if "public_key" in s_cfg and "private_key" in s_cfg:
pki_dir_default = os.path.join(const.conf_dir, "pki/minion")
if not os.path.isdir(pki_dir_default):
pki_dir_default = os.path.join(const.conf_dir, "pki")
- pki_dir = s_cfg.get('pki_dir', pki_dir_default)
+ pki_dir = s_cfg.get("pki_dir", pki_dir_default)
with util.umask(0o77):
util.ensure_dir(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- util.write_file(pub_name, s_cfg['public_key'])
- util.write_file(pem_name, s_cfg['private_key'])
+ pub_name = os.path.join(pki_dir, "minion.pub")
+ pem_name = os.path.join(pki_dir, "minion.pem")
+ util.write_file(pub_name, s_cfg["public_key"])
+ util.write_file(pem_name, s_cfg["private_key"])
# we need to have the salt minion service enabled in rc in order to be
# able to start the service. this does only apply on FreeBSD servers.
- if cloud.distro.osfamily == 'freebsd':
- rhel_util.update_sysconfig_file(
- '/etc/rc.conf', {'salt_minion_enable': 'YES'})
+ if cloud.distro.osfamily == "freebsd":
+ bsd_utils.set_rc_config_value("salt_minion_enable", "YES")
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- subp.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(["service", const.srv_name, "restart"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 1e3f419e..b7bfb7aa 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -17,7 +17,7 @@ module does not accept any config keys.
**Internal name:** ``cc_scripts_per_boot``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
"""
@@ -25,23 +25,27 @@ module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-SCRIPT_SUBDIR = 'per-boot'
+SCRIPT_SUBDIR = "per-boot"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 5966fb9a..ef102b1c 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -28,23 +28,27 @@ the system. As a result per-instance scripts will run again.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'per-instance'
+SCRIPT_SUBDIR = "per-instance"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index bcca859e..bf4231e7 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -26,23 +26,27 @@ be run in alphabetical order. This module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_ONCE
frequency = PER_ONCE
-SCRIPT_SUBDIR = 'per-once'
+SCRIPT_SUBDIR = "per-once"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 215703ef..e0d6c560 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -28,12 +28,11 @@ This module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'scripts'
+SCRIPT_SUBDIR = "scripts"
def handle(name, _cfg, cloud, log, _args):
@@ -44,8 +43,13 @@ def handle(name, _cfg, cloud, log, _args):
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index e0a4bfff..1b30fa1b 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,29 +28,33 @@ entry under the ``vendor_data`` config key.
import os
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'vendor'
+SCRIPT_SUBDIR = "vendor"
def handle(name, cfg, cloud, log, _args):
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
- runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
- SCRIPT_SUBDIR)
+ runparts_path = os.path.join(
+ cloud.get_ipath_cur(), "scripts", SCRIPT_SUBDIR
+ )
- prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
+ prefix = util.get_cfg_by_path(cfg, ("vendor_data", "prefix"), [])
try:
subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 4fb9b44e..67ba8ef5 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -24,15 +24,19 @@ Configuration for this module is under the ``random_seed`` config key. The
optionally be specified in encoded form, with the encoding specified in
``encoding``.
+If the cloud provides its own random seed data, it will be appended to ``data``
+before it is written to ``file``.
+
.. note::
when using a multiline value for ``data`` or specifying binary data, be
sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format
specifiers when appropriate
-Instead of specifying a data string, a command can be run to generate/collect
-the data to be written. The command should be specified as a list of args in
-the ``command`` key. If a command is specified that cannot be run, no error
-will be reported unless ``command_required`` is set to true.
+If the ``command`` key is specified, the given command will be executed. This
+will happen after ``file`` has been populated. That command's environment will
+contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is
+specified that cannot be run, no error will be reported unless
+``command_required`` is set to true.
For example, to use ``pollinate`` to gather data from a
remote entropy server and write it to ``/dev/urandom``, the following could be
@@ -64,9 +68,8 @@ import os
from io import BytesIO
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -74,12 +77,12 @@ LOG = logging.getLogger(__name__)
def _decode(data, encoding=None):
if not data:
- return b''
- if not encoding or encoding.lower() in ['raw']:
+ return b""
+ if not encoding or encoding.lower() in ["raw"]:
return util.encode_text(data)
- elif encoding.lower() in ['base64', 'b64']:
+ elif encoding.lower() in ["base64", "b64"]:
return base64.b64decode(data)
- elif encoding.lower() in ['gzip', 'gz']:
+ elif encoding.lower() in ["gzip", "gz"]:
return util.decomp_gzip(data, quiet=False, decode=None)
else:
raise IOError("Unknown random_seed encoding: %s" % (encoding))
@@ -96,7 +99,8 @@ def handle_random_seed_command(command, required, env=None):
if not subp.which(cmd):
if required:
raise ValueError(
- "command '{cmd}' not found but required=true".format(cmd=cmd))
+ "command '{cmd}' not found but required=true".format(cmd=cmd)
+ )
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
@@ -104,34 +108,39 @@ def handle_random_seed_command(command, required, env=None):
def handle(name, cfg, cloud, log, _args):
- mycfg = cfg.get('random_seed', {})
- seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', b'')
+ mycfg = cfg.get("random_seed", {})
+ seed_path = mycfg.get("file", "/dev/urandom")
+ seed_data = mycfg.get("data", b"")
seed_buf = BytesIO()
if seed_data:
- seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
+ seed_buf.write(_decode(seed_data, encoding=mycfg.get("encoding")))
# 'random_seed' is set up by Azure datasource, and comes already in
# openstack meta_data.json
metadata = cloud.datasource.metadata
- if metadata and 'random_seed' in metadata:
- seed_buf.write(util.encode_text(metadata['random_seed']))
+ if metadata and "random_seed" in metadata:
+ seed_buf.write(util.encode_text(metadata["random_seed"]))
seed_data = seed_buf.getvalue()
if len(seed_data):
- log.debug("%s: adding %s bytes of random seed entropy to %s", name,
- len(seed_data), seed_path)
+ log.debug(
+ "%s: adding %s bytes of random seed entropy to %s",
+ name,
+ len(seed_data),
+ seed_path,
+ )
util.append_file(seed_path, seed_data)
- command = mycfg.get('command', None)
- req = mycfg.get('command_required', False)
+ command = mycfg.get("command", None)
+ req = mycfg.get("command_required", False)
try:
env = os.environ.copy()
- env['RANDOM_SEED_FILE'] = seed_path
+ env["RANDOM_SEED_FILE"] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
log.warning("handling random command [%s] failed: %s", command, e)
raise e
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 1d23d80d..eb0ca328 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -18,8 +18,11 @@ A hostname and fqdn can be provided by specifying a full domain name under the
``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname``
key, and the fqdn of the cloud wil be used. If a fqdn specified with the
``hostname`` key, it will be handled properly, although it is better to use
-the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
-will be used.
+the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set,
+it is distro dependent whether ``hostname`` or ``fqdn`` is used,
+unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set
+it will force the use of FQDN in all distros, and if false then it will
+force the hostname use.
This module will run in the init-local stage before networking is configured
if the hostname is set by metadata or user data on the local system.
@@ -31,22 +34,22 @@ based on initial hostname.
**Internal name:** ``cc_set_hostname``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
**Config keys**::
preserve_hostname: <true/false>
+ prefer_fqdn_over_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
import os
-
-from cloudinit.atomic_helper import write_json
from cloudinit import util
+from cloudinit.atomic_helper import write_json
class SetHostnameError(Exception):
@@ -59,9 +62,20 @@ class SetHostnameError(Exception):
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
+ log.debug(
+ "Configuration option 'preserve_hostname' is set,"
+ " not setting the hostname in module %s",
+ name,
+ )
return
+
+ # Set prefer_fqdn_over_hostname value in distro
+ hostname_fqdn = util.get_cfg_option_bool(
+ cfg, "prefer_fqdn_over_hostname", None
+ )
+ if hostname_fqdn is not None:
+ cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
+
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
# Check for previous successful invocation of set-hostname
@@ -70,14 +84,15 @@ def handle(name, cfg, cloud, log, _args):
# previous-hostname file which only contains the base hostname.
# TODO consolidate previous-hostname and set-hostname artifact files and
# distro._read_hostname implementation so we only validate one artifact.
- prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
+ prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname")
prev_hostname = {}
if os.path.exists(prev_fn):
prev_hostname = util.load_json(util.load_file(prev_fn))
- hostname_changed = (hostname != prev_hostname.get('hostname') or
- fqdn != prev_hostname.get('fqdn'))
+ hostname_changed = hostname != prev_hostname.get(
+ "hostname"
+ ) or fqdn != prev_hostname.get("fqdn")
if not hostname_changed:
- log.debug('No hostname changes. Skipping set-hostname')
+ log.debug("No hostname changes. Skipping set-hostname")
return
log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
try:
@@ -86,6 +101,7 @@ def handle(name, cfg, cloud, log, _args):
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
util.logexc(log, msg)
raise SetHostnameError("%s: %s" % (msg, e)) from e
- write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
+ write_json(prev_fn, {"hostname": hostname, "fqdn": fqdn})
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index d6b5682d..d8df8e23 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -78,43 +78,36 @@ password.
"""
import re
-import sys
+from string import ascii_letters, digits
-from cloudinit.distros import ug_util
from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.distros import ug_util
from cloudinit.ssh_util import update_ssh_config
-from cloudinit import subp
-from cloudinit import util
-
-from string import ascii_letters, digits
LOG = logging.getLogger(__name__)
# We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits
- if x not in 'loLOI01']))
+PW_SET = "".join([x for x in ascii_letters + digits if x not in "loLOI01"])
-def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
+def handle_ssh_pwauth(pw_auth, distro):
"""Apply sshd PasswordAuthentication changes.
@param pw_auth: config setting from 'pw_auth'.
Best given as True, False, or "unchanged".
- @param service_cmd: The service command list (['service'])
- @param service_name: The name of the sshd service for the system.
+ @param distro: an instance of the distro class for the target distribution
@return: None"""
cfg_name = "PasswordAuthentication"
- if service_cmd is None:
- service_cmd = ["service"]
if util.is_true(pw_auth):
- cfg_val = 'yes'
+ cfg_val = "yes"
elif util.is_false(pw_auth):
- cfg_val = 'no'
+ cfg_val = "no"
else:
bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
- if pw_auth is None or pw_auth.lower() == 'unchanged':
+ if pw_auth is None or pw_auth.lower() == "unchanged":
LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
else:
LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
@@ -125,39 +118,35 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- if 'systemctl' in service_cmd:
- cmd = list(service_cmd) + ["restart", service_name]
- else:
- cmd = list(service_cmd) + [service_name, "restart"]
- subp.subp(cmd)
+ distro.manage_service("restart", distro.get_option("ssh_svcname", "ssh"))
LOG.debug("Restarted the SSH daemon.")
def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
+ if args:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
- if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
- del cfg['chpasswd']['list']
+ if "chpasswd" in cfg and "list" in cfg["chpasswd"]:
+ del cfg["chpasswd"]["list"]
else:
password = util.get_cfg_option_str(cfg, "password", None)
expire = True
plist = None
- if 'chpasswd' in cfg:
- chfg = cfg['chpasswd']
- if 'list' in chfg and chfg['list']:
- if isinstance(chfg['list'], list):
+ if "chpasswd" in cfg:
+ chfg = cfg["chpasswd"]
+ if "list" in chfg and chfg["list"]:
+ if isinstance(chfg["list"], list):
log.debug("Handling input for chpasswd as list.")
- plist = util.get_cfg_option_list(chfg, 'list', plist)
+ plist = util.get_cfg_option_list(chfg, "list", plist)
else:
log.debug("Handling input for chpasswd as multiline string.")
- plist = util.get_cfg_option_str(chfg, 'list', plist)
+ plist = util.get_cfg_option_str(chfg, "list", plist)
if plist:
plist = plist.splitlines()
- expire = util.get_cfg_option_bool(chfg, 'expire', expire)
+ expire = util.get_cfg_option_bool(chfg, "expire", expire)
if not plist and password:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
@@ -176,9 +165,9 @@ def handle(_name, cfg, cloud, log, args):
users = []
# N.B. This regex is included in the documentation (i.e. the module
# docstring), so any changes to it should be reflected there.
- prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}')
+ prog = re.compile(r"\$(1|2a|2y|5|6)(\$.+){2}")
for line in plist:
- u, p = line.split(':', 1)
+ u, p = line.split(":", 1)
if prog.match(p) is not None and ":" not in p:
hashed_plist_in.append(line)
hashed_users.append(u)
@@ -190,7 +179,7 @@ def handle(_name, cfg, cloud, log, args):
randlist.append("%s:%s" % (u, p))
plist_in.append("%s:%s" % (u, p))
users.append(u)
- ch_in = '\n'.join(plist_in) + '\n'
+ ch_in = "\n".join(plist_in) + "\n"
if users:
try:
log.debug("Changing password for %s:", users)
@@ -198,9 +187,10 @@ def handle(_name, cfg, cloud, log, args):
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set passwords with chpasswd for %s", users)
+ log, "Failed to set passwords with chpasswd for %s", users
+ )
- hashed_ch_in = '\n'.join(hashed_plist_in) + '\n'
+ hashed_ch_in = "\n".join(hashed_plist_in) + "\n"
if hashed_users:
try:
log.debug("Setting hashed password for %s:", hashed_users)
@@ -208,13 +198,19 @@ def handle(_name, cfg, cloud, log, args):
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set hashed passwords with chpasswd for %s",
- hashed_users)
+ log,
+ "Failed to set hashed passwords with chpasswd for %s",
+ hashed_users,
+ )
if len(randlist):
- blurb = ("Set the following 'random' passwords\n",
- '\n'.join(randlist))
- sys.stderr.write("%s\n%s\n" % blurb)
+ blurb = (
+ "Set the following 'random' passwords\n",
+ "\n".join(randlist),
+ )
+ util.multi_log(
+ "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False
+ )
if expire:
expired_users = []
@@ -228,9 +224,7 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- handle_ssh_pwauth(
- cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
- service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
+ handle_ssh_pwauth(cfg.get("ssh_pwauth"), cloud.distro)
if len(errors):
log.debug("%s errors occured, re-raising the last one", len(errors))
@@ -247,7 +241,8 @@ def chpasswd(distro, plist_in, hashed=False):
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
else:
- cmd = ['chpasswd'] + (['-e'] if hashed else [])
+ cmd = ["chpasswd"] + (["-e"] if hashed else [])
subp.subp(cmd, plist_in)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 20ed7d2f..9f343df0 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -8,24 +8,26 @@ import sys
from textwrap import dedent
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
-from cloudinit import subp
-from cloudinit import util
-
-distros = ['ubuntu']
+distros = ["ubuntu"]
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
-schema = {
- 'id': 'cc_snap',
- 'name': 'Snap',
- 'title': 'Install, configure and manage snapd and snap packages',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_snap",
+ "name": "Snap",
+ "title": "Install, configure and manage snapd and snap packages",
+ "description": dedent(
+ """\
This module provides a simple configuration namespace in cloud-init to
both setup snapd and install snaps.
@@ -56,9 +58,12 @@ schema = {
**Development only**: The ``squashfuse_in_container`` boolean can be
set true to install squashfuse package when in a container to enable
snap installs. Default is false.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
snap:
assertions:
00: |
@@ -69,14 +74,20 @@ schema = {
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
02: canonical-livepatch enable <AUTH_TOKEN>
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# LXC-based containers require squashfuse before snaps can be installed
snap:
commands:
00: apt-get install squashfuse -y
11: snap install emoj
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Convenience: the snap command can be omitted when specifying commands
# as a list and 'snap' will automatically be prepended.
# The following commands are equivalent:
@@ -86,7 +97,10 @@ schema = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# You can use a list of commands
snap:
commands:
@@ -94,58 +108,64 @@ schema = {
- ['snap', 'install', 'vlc']
- snap install vlc
- 'snap install vlc'
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# You can use a list of assertions
snap:
assertions:
- signed_assertion_blob_here
- |
signed_assertion_blob_here
- """)],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'snap': {
- 'type': 'object',
- 'properties': {
- 'assertions': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {'type': 'string'},
- 'additionalItems': False, # Reject items non-string
- 'minItems': 1,
- 'minProperties': 1,
- 'uniqueItems': True,
- 'additionalProperties': {'type': 'string'},
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "snap": {
+ "type": "object",
+ "properties": {
+ "assertions": {
+ "type": ["object", "array"], # Array of strings or dict
+ "items": {"type": "string"},
+ "additionalItems": False, # Reject items non-string
+ "minItems": 1,
+ "minProperties": 1,
+ "uniqueItems": True,
+ "additionalProperties": {"type": "string"},
},
- 'commands': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ "commands": {
+ "type": ["object", "array"], # Array of strings or dict
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ ]
},
- 'additionalItems': False, # Reject non-string & non-list
- 'minItems': 1,
- 'minProperties': 1,
- 'additionalProperties': {
- 'oneOf': [
- {'type': 'string'},
- {'type': 'array', 'items': {'type': 'string'}},
+ "additionalItems": False, # Reject non-string & non-list
+ "minItems": 1,
+ "minProperties": 1,
+ "additionalProperties": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}},
],
},
},
- 'squashfuse_in_container': {
- 'type': 'boolean'
- }
+ "squashfuse_in_container": {"type": "boolean"},
},
- 'additionalProperties': False, # Reject keys not in schema
- 'required': [],
- 'minProperties': 1
+ "additionalProperties": False, # Reject keys not in schema
+ "minProperties": 1,
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
SNAP_CMD = "snap"
ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
@@ -161,45 +181,49 @@ def add_assertions(assertions):
"""
if not assertions:
return
- LOG.debug('Importing user-provided snap assertions')
+ LOG.debug("Importing user-provided snap assertions")
if isinstance(assertions, dict):
assertions = assertions.values()
elif not isinstance(assertions, list):
raise TypeError(
- 'assertion parameter was not a list or dict: {assertions}'.format(
- assertions=assertions))
+ "assertion parameter was not a list or dict: {assertions}".format(
+ assertions=assertions
+ )
+ )
- snap_cmd = [SNAP_CMD, 'ack']
+ snap_cmd = [SNAP_CMD, "ack"]
combined = "\n".join(assertions)
for asrt in assertions:
- LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
+ LOG.debug("Snap acking: %s", asrt.split("\n")[0:2])
- util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
+ util.write_file(ASSERTIONS_FILE, combined.encode("utf-8"))
subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
"""Run the provided commands provided in snap:commands configuration.
- Commands are run individually. Any errors are collected and reported
- after attempting all commands.
+ Commands are run individually. Any errors are collected and reported
+ after attempting all commands.
- @param commands: A list or dict containing commands to run. Keys of a
- dict will be used to order the commands provided as dict values.
- """
+ @param commands: A list or dict containing commands to run. Keys of a
+ dict will be used to order the commands provided as dict values.
+ """
if not commands:
return
- LOG.debug('Running user-provided snap commands')
+ LOG.debug("Running user-provided snap commands")
if isinstance(commands, dict):
# Sort commands based on dictionary key
commands = [v for _, v in sorted(commands.items())]
elif not isinstance(commands, list):
raise TypeError(
- 'commands parameter was not a list or dict: {commands}'.format(
- commands=commands))
+ "commands parameter was not a list or dict: {commands}".format(
+ commands=commands
+ )
+ )
- fixed_snap_commands = prepend_base_command('snap', commands)
+ fixed_snap_commands = prepend_base_command("snap", commands)
cmd_failures = []
for command in fixed_snap_commands:
@@ -209,8 +233,9 @@ def run_commands(commands):
except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
- msg = 'Failures running snap commands:\n{cmd_failures}'.format(
- cmd_failures=cmd_failures)
+ msg = "Failures running snap commands:\n{cmd_failures}".format(
+ cmd_failures=cmd_failures
+ )
util.logexc(LOG, msg)
raise RuntimeError(msg)
@@ -226,23 +251,25 @@ def maybe_install_squashfuse(cloud):
util.logexc(LOG, "Package update failed")
raise
try:
- cloud.distro.install_packages(['squashfuse'])
+ cloud.distro.install_packages(["squashfuse"])
except Exception:
util.logexc(LOG, "Failed to install squashfuse")
raise
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snap', {})
+ cfgin = cfg.get("snap", {})
if not cfgin:
- LOG.debug(("Skipping module named %s,"
- " no 'snap' key in configuration"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'snap' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- if util.is_true(cfgin.get('squashfuse_in_container', False)):
+ if util.is_true(cfgin.get("squashfuse_in_container", False)):
maybe_install_squashfuse(cloud)
- add_assertions(cfgin.get('assertions', []))
- run_commands(cfgin.get('commands', []))
+ add_assertions(cfgin.get("assertions", []))
+ run_commands(cfgin.get("commands", []))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 95083607..3fa6c388 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -29,9 +29,8 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
from cloudinit import subp
-
-distros = ['redhat', 'fedora']
-required_packages = ['rhn-setup']
+distros = ["redhat", "fedora"]
+required_packages = ["rhn-setup"]
def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT"
@@ -41,7 +40,7 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(["rhn-profile-sync", "--verbose"], capture=False)
already_registered = True
except subp.ProcessExecutionError as e:
if e.exit_code != 1:
@@ -49,42 +48,58 @@ def is_registered():
return already_registered
-def do_register(server, profile_name,
- ca_cert_path=def_ca_cert_path,
- proxy=None, log=None,
- activation_key=None):
+def do_register(
+ server,
+ profile_name,
+ ca_cert_path=def_ca_cert_path,
+ proxy=None,
+ log=None,
+ activation_key=None,
+):
if log is not None:
- log.info("Registering using `rhnreg_ks` profile '%s'"
- " into server '%s'", profile_name, server)
- cmd = ['rhnreg_ks']
- cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server])
- cmd.extend(['--profilename', str(profile_name)])
+ log.info(
+ "Registering using `rhnreg_ks` profile '%s' into server '%s'",
+ profile_name,
+ server,
+ )
+ cmd = ["rhnreg_ks"]
+ cmd.extend(["--serverUrl", "https://%s/XMLRPC" % server])
+ cmd.extend(["--profilename", str(profile_name)])
if proxy:
cmd.extend(["--proxy", str(proxy)])
if ca_cert_path:
- cmd.extend(['--sslCACert', str(ca_cert_path)])
+ cmd.extend(["--sslCACert", str(ca_cert_path)])
if activation_key:
- cmd.extend(['--activationkey', str(activation_key)])
+ cmd.extend(["--activationkey", str(activation_key)])
subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
- if 'spacewalk' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'spacewalk' key in configuration"), name)
+ if "spacewalk" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'spacewalk' key in configuration",
+ name,
+ )
return
- cfg = cfg['spacewalk']
- spacewalk_server = cfg.get('server')
+ cfg = cfg["spacewalk"]
+ spacewalk_server = cfg.get("server")
if spacewalk_server:
# Need to have this installed before further things will work.
cloud.distro.install_packages(required_packages)
if not is_registered():
- do_register(spacewalk_server,
- cloud.datasource.get_hostname(fqdn=True),
- proxy=cfg.get("proxy"), log=log,
- activation_key=cfg.get('activation_key'))
+ do_register(
+ spacewalk_server,
+ cloud.datasource.get_hostname(fqdn=True),
+ proxy=cfg.get("proxy"),
+ log=log,
+ activation_key=cfg.get("activation_key"),
+ )
else:
- log.debug("Skipping module named %s, 'spacewalk/server' key"
- " was not found in configuration", name)
+ log.debug(
+ "Skipping module named %s, 'spacewalk/server' key"
+ " was not found in configuration",
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 05a16dbc..64486b9c 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -17,7 +17,7 @@ keys.
Authorized Keys
^^^^^^^^^^^^^^^
-Authorized keys are a list of public SSH keys that are allowed to connect to a
+Authorized keys are a list of public SSH keys that are allowed to connect to
a user account on a system. They are stored in `.ssh/authorized_keys` in that
account's home directory. Authorized keys for the default user defined in
``users`` can be specified using ``ssh_authorized_keys``. Keys
@@ -89,6 +89,10 @@ optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``,
key types. Not all key types have to be specified, ones left unspecified will
not be used. If this config option is used, then no keys will be generated.
+When host keys are generated the output of the ssh-keygen command(s) can be
+displayed on the console using the ``ssh_quiet_keygen`` configuration key.
+This settings defaults to False which displays the keygen output.
+
.. note::
when specifying private host keys in cloud-config, care should be taken to
ensure that the communication between the data source and the instance is
@@ -151,33 +155,33 @@ config flags are:
ssh_publish_hostkeys:
enabled: <true/false> (Defaults to true)
blacklist: <list of key types> (Defaults to [dsa])
+ ssh_quiet_keygen: <true/false>
"""
import glob
import os
import sys
+from cloudinit import ssh_util, subp, util
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
-from cloudinit import subp
-from cloudinit import util
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"]
+KEY_FILE_TPL = "/etc/ssh/ssh_host_%s_key"
PUBLISH_HOST_KEYS = True
# Don't publish the dsa hostkey by default since OpenSSH recommends not using
# it.
-HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
+HOST_KEY_PUBLISH_BLACKLIST = ["dsa"]
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
for k in GENERATE_KEY_NAMES:
CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+ {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)}
+ )
CONFIG_KEY_TO_FILE.update(
- {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)})
+ {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)}
+ )
PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
@@ -204,57 +208,86 @@ def handle(_name, cfg, cloud, log, _args):
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
util.write_file(tgt_fn, val, tgt_perms)
# set server to present the most recently identified certificate
- if '_certificate' in key:
- cert_config = {'HostCertificate': tgt_fn}
+ if "_certificate" in key:
+ cert_config = {"HostCertificate": tgt_fn}
ssh_util.update_ssh_config(cert_config)
- for (priv, pub) in PRIV_TO_PUB.items():
- if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
+ for private_type, public_type in PRIV_TO_PUB.items():
+ if (
+ public_type in cfg["ssh_keys"]
+ or private_type not in cfg["ssh_keys"]
+ ):
continue
- pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
- cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
+ private_file, public_file = (
+ CONFIG_KEY_TO_FILE[private_type][0],
+ CONFIG_KEY_TO_FILE[public_type][0],
+ )
+ cmd = ["sh", "-xc", KEY_GEN_TPL % (private_file, public_file)]
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
subp.subp(cmd, capture=False)
- log.debug("Generated a key for %s from %s", pair[0], pair[1])
+ log.debug(
+ f"Generated a key for {public_file} from {private_file}"
+ )
except Exception:
- util.logexc(log, "Failed generated a key for %s from %s",
- pair[0], pair[1])
+ util.logexc(
+ log,
+ "Failed generating a key for "
+ f"{public_file} from {private_file}",
+ )
else:
# if not, generate them
- genkeys = util.get_cfg_option_list(cfg,
- 'ssh_genkeytypes',
- GENERATE_KEY_NAMES)
+ genkeys = util.get_cfg_option_list(
+ cfg, "ssh_genkeytypes", GENERATE_KEY_NAMES
+ )
lang_c = os.environ.copy()
- lang_c['LANG'] = 'C'
+ lang_c["LANG"] = "C"
for keytype in genkeys:
keyfile = KEY_FILE_TPL % (keytype)
if os.path.exists(keyfile):
continue
util.ensure_dir(os.path.dirname(keyfile))
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+ cmd = ["ssh-keygen", "-t", keytype, "-N", "", "-f", keyfile]
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
out, err = subp.subp(cmd, capture=True, env=lang_c)
- sys.stdout.write(util.decode_binary(out))
+ if not util.get_cfg_option_bool(
+ cfg, "ssh_quiet_keygen", False
+ ):
+ sys.stdout.write(util.decode_binary(out))
+
+ gid = util.get_group_id("ssh_keys")
+ if gid != -1:
+ # perform same "sanitize permissions" as sshd-keygen
+ os.chown(keyfile, -1, gid)
+ os.chmod(keyfile, 0o640)
+ os.chmod(keyfile + ".pub", 0o644)
except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
- if (e.exit_code == 1 and
- err.lower().startswith("unknown key")):
+ if e.exit_code == 1 and err.lower().startswith(
+ "unknown key"
+ ):
log.debug("ssh-keygen: unknown key type '%s'", keytype)
else:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
+ util.logexc(
+ log,
+ "Failed generating key type %s to file %s",
+ keytype,
+ keyfile,
+ )
if "ssh_publish_hostkeys" in cfg:
host_key_blacklist = util.get_cfg_option_list(
- cfg["ssh_publish_hostkeys"], "blacklist",
- HOST_KEY_PUBLISH_BLACKLIST)
+ cfg["ssh_publish_hostkeys"],
+ "blacklist",
+ HOST_KEY_PUBLISH_BLACKLIST,
+ )
publish_hostkeys = util.get_cfg_option_bool(
- cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
+ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS
+ )
else:
host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
publish_hostkeys = PUBLISH_HOST_KEYS
@@ -270,15 +303,18 @@ def handle(_name, cfg, cloud, log, _args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- ssh_util.DISABLE_USER_OPTS)
+ disable_root_opts = util.get_cfg_option_str(
+ cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS
+ )
keys = []
- if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
+ if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
keys = cloud.get_public_ssh_keys() or []
else:
- log.debug('Skipping import of publish SSH keys per '
- 'config setting: allow_public_ssh_keys=False')
+ log.debug(
+ "Skipping import of publish SSH keys per "
+ "config setting: allow_public_ssh_keys=False"
+ )
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
@@ -298,12 +334,12 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
if disable_root:
if not user:
user = "NONE"
- key_prefix = disable_root_opts.replace('$USER', user)
- key_prefix = key_prefix.replace('$DISABLE_USER', 'root')
+ key_prefix = disable_root_opts.replace("$USER", user)
+ key_prefix = key_prefix.replace("$DISABLE_USER", "root")
else:
- key_prefix = ''
+ key_prefix = ""
- ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
+ ssh_util.setup_user_keys(keys, "root", options=key_prefix)
def get_public_host_keys(blacklist=None):
@@ -313,18 +349,21 @@ def get_public_host_keys(blacklist=None):
@returns: List of keys, each formatted as a two-element tuple.
e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
"""
- public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+ public_key_file_tmpl = "%s.pub" % (KEY_FILE_TPL,)
key_list = []
blacklist_files = []
if blacklist:
# Convert blacklist to filenames:
# 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'
- blacklist_files = [public_key_file_tmpl % (key_type,)
- for key_type in blacklist]
+ blacklist_files = [
+ public_key_file_tmpl % (key_type,) for key_type in blacklist
+ ]
# Get list of public key files and filter out blacklisted files.
- file_list = [hostfile for hostfile
- in glob.glob(public_key_file_tmpl % ('*',))
- if hostfile not in blacklist_files]
+ file_list = [
+ hostfile
+ for hostfile in glob.glob(public_key_file_tmpl % ("*",))
+ if hostfile not in blacklist_files
+ ]
# Read host key files, retrieve first two fields as a tuple and
# append that tuple to key_list.
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 05d30ad1..020c3469 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -28,23 +28,21 @@ the keys can be specified, but defaults to ``sha256``.
import base64
import hashlib
-from cloudinit.simpletable import SimpleTable
-
+from cloudinit import ssh_util, util
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
-from cloudinit import util
+from cloudinit.simpletable import SimpleTable
def _split_hash(bin_hash):
split_up = []
for i in range(0, len(bin_hash), 2):
- split_up.append(bin_hash[i:i + 2])
+ split_up.append(bin_hash[i : i + 2])
return split_up
-def _gen_fingerprint(b64_text, hash_meth='sha256'):
+def _gen_fingerprint(b64_text, hash_meth="sha256"):
if not b64_text:
- return ''
+ return ""
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
try:
hasher = hashlib.new(hash_meth)
@@ -54,58 +52,75 @@ def _gen_fingerprint(b64_text, hash_meth='sha256'):
# Raised when b64 not really b64...
# or when the hash type is not really
# a known/supported hash type...
- return '?'
+ return "?"
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and entry.keytype.lower().strip()
- in ssh_util.VALID_KEY_TYPES):
+ if (
+ entry.keytype
+ and entry.keytype.lower().strip() in ssh_util.VALID_KEY_TYPES
+ ):
return True
return False
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
- prefix='ci-info: '):
+def _pprint_key_entries(
+ user, key_fn, key_entries, hash_meth="sha256", prefix="ci-info: "
+):
if not key_entries:
- message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
- % (prefix, user))
- util.multi_log(message)
+ message = (
+ "%sno authorized SSH keys fingerprints found for user %s.\n"
+ % (prefix, user)
+ )
+ util.multi_log(message, console=True, stderr=False)
return
- tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
- 'Comment']
+ tbl_fields = [
+ "Keytype",
+ "Fingerprint (%s)" % (hash_meth),
+ "Options",
+ "Comment",
+ ]
tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
- row = [entry.keytype or '-',
- _gen_fingerprint(entry.base64, hash_meth) or '-',
- entry.options or '-',
- entry.comment or '-']
+ row = [
+ entry.keytype or "-",
+ _gen_fingerprint(entry.base64, hash_meth) or "-",
+ entry.options or "-",
+ entry.comment or "-",
+ ]
tbl.add_row(row)
authtbl_s = tbl.get_string()
authtbl_lines = authtbl_s.splitlines()
max_len = len(max(authtbl_lines, key=len))
lines = [
- util.center("Authorized keys from %s for user %s" %
- (key_fn, user), "+", max_len),
+ util.center(
+ "Authorized keys from %s for user %s" % (key_fn, user),
+ "+",
+ max_len,
+ ),
]
lines.extend(authtbl_lines)
for line in lines:
- util.multi_log(text="%s%s\n" % (prefix, line),
- stderr=False, console=True)
+ util.multi_log(
+ text="%s%s\n" % (prefix, line), stderr=False, console=True
+ )
def handle(name, cfg, cloud, log, _args):
- if util.is_true(cfg.get('no_ssh_fingerprints', False)):
- log.debug(("Skipping module named %s, "
- "logging of SSH fingerprints disabled"), name)
+ if util.is_true(cfg.get("no_ssh_fingerprints", False)):
+ log.debug(
+ "Skipping module named %s, logging of SSH fingerprints disabled",
+ name,
+ )
return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
- _pprint_key_entries(user_name, key_fn,
- key_entries, hash_meth)
+ _pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 856e5a9e..a9575c59 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -30,13 +30,13 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
- lp:user
"""
-from cloudinit.distros import ug_util
-from cloudinit import subp
-from cloudinit import util
import pwd
+from cloudinit import subp, util
+from cloudinit.distros import ug_util
+
# https://launchpad.net/ssh-import-id
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def handle(_name, cfg, cloud, log, args):
@@ -56,11 +56,11 @@ def handle(_name, cfg, cloud, log, args):
elist = []
for (user, user_cfg) in users.items():
import_ids = []
- if user_cfg['default']:
+ if user_cfg["default"]:
import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
else:
try:
- import_ids = user_cfg['ssh_import_id']
+ import_ids = user_cfg["ssh_import_id"]
except Exception:
log.debug("User %s is not configured for ssh_import_id", user)
continue
@@ -69,8 +69,9 @@ def handle(_name, cfg, cloud, log, args):
import_ids = util.uniq_merge(import_ids)
import_ids = [str(i) for i in import_ids]
except Exception:
- log.debug("User %s is not correctly configured for ssh_import_id",
- user)
+ log.debug(
+ "User %s is not correctly configured for ssh_import_id", user
+ )
continue
if not len(import_ids):
@@ -79,8 +80,9 @@ def handle(_name, cfg, cloud, log, args):
try:
import_ssh_ids(import_ids, user, log)
except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s", user,
- import_ids)
+ util.logexc(
+ log, "ssh-import-id failed for: %s %s", user, import_ids
+ )
elist.append(exc)
if len(elist):
@@ -107,4 +109,5 @@ def import_ssh_ids(ids, user, log):
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index a9de8fac..24e6099e 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -27,7 +27,6 @@ the timezone from cloud config.
"""
from cloudinit import util
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
@@ -46,4 +45,5 @@ def handle(name, cfg, cloud, log, args):
# Let the distro handle settings its timezone
cloud.distro.set_timezone(timezone)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index d61dc655..e469bb22 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -4,23 +4,25 @@
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
-
-UA_URL = 'https://ubuntu.com/advantage'
+UA_URL = "https://ubuntu.com/advantage"
-distros = ['ubuntu']
+distros = ["ubuntu"]
-schema = {
- 'id': 'cc_ubuntu_advantage',
- 'name': 'Ubuntu Advantage',
- 'title': 'Configure Ubuntu Advantage support services',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_ubuntu_advantage",
+ "name": "Ubuntu Advantage",
+ "title": "Configure Ubuntu Advantage support services",
+ "description": dedent(
+ """\
Attach machine to an existing Ubuntu Advantage support contract and
enable or disable support services such as Livepatch, ESM,
FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
@@ -32,14 +34,21 @@ schema = {
a reboot to ensure the machine is running the FIPS-compliant kernel.
See :ref:`Power State Change` for information on how to configure
cloud-init to perform this reboot.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract with a
# UA contract token obtained from %s.
ubuntu_advantage:
token: <ua_contract_token>
- """ % UA_URL), dedent("""\
+ """
+ % UA_URL
+ ),
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract enabling
# only fips and esm services. Services will only be enabled if
# the environment supports said service. Otherwise warnings will
@@ -49,7 +58,10 @@ schema = {
enable:
- fips
- esm
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract and enable
# the FIPS service. Perform a reboot once cloud-init has
# completed.
@@ -59,30 +71,35 @@ schema = {
token: <ua_contract_token>
enable:
- fips
- """)],
- 'frequency': PER_INSTANCE,
- 'type': 'object',
- 'properties': {
- 'ubuntu_advantage': {
- 'type': 'object',
- 'properties': {
- 'enable': {
- 'type': 'array',
- 'items': {'type': 'string'},
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "enable": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
+ "token": {
+ "type": "string",
+ "description": "A contract token obtained from %s."
+ % UA_URL,
},
- 'token': {
- 'type': 'string',
- 'description': (
- 'A contract token obtained from %s.' % UA_URL)
- }
},
- 'required': ['token'],
- 'additionalProperties': False
+ "required": ["token"],
+ "additionalProperties": False,
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
LOG = logging.getLogger(__name__)
@@ -91,52 +108,61 @@ def configure_ua(token=None, enable=None):
"""Call ua commandline client to attach or enable services."""
error = None
if not token:
- error = ('ubuntu_advantage: token must be provided')
+ error = "ubuntu_advantage: token must be provided"
LOG.error(error)
raise RuntimeError(error)
if enable is None:
enable = []
elif isinstance(enable, str):
- LOG.warning('ubuntu_advantage: enable should be a list, not'
- ' a string; treating as a single enable')
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a string; treating as a single enable"
+ )
enable = [enable]
elif not isinstance(enable, list):
- LOG.warning('ubuntu_advantage: enable should be a list, not'
- ' a %s; skipping enabling services',
- type(enable).__name__)
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a %s; skipping enabling services",
+ type(enable).__name__,
+ )
enable = []
- attach_cmd = ['ua', 'attach', token]
- LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
+ attach_cmd = ["ua", "attach", token]
+ LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(attach_cmd))
try:
subp.subp(attach_cmd)
except subp.ProcessExecutionError as e:
- msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
- error=str(e))
+ msg = "Failure attaching Ubuntu Advantage:\n{error}".format(
+ error=str(e)
+ )
util.logexc(LOG, msg)
raise RuntimeError(msg) from e
enable_errors = []
for service in enable:
try:
- cmd = ['ua', 'enable', service]
+ cmd = ["ua", "enable", "--assume-yes", service]
subp.subp(cmd, capture=True)
except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
msg = 'Failure enabling "{service}":\n{error}'.format(
- service=service, error=str(error))
+ service=service, error=str(error)
+ )
util.logexc(LOG, msg)
raise RuntimeError(
- 'Failure enabling Ubuntu Advantage service(s): {}'.format(
- ', '.join('"{}"'.format(service)
- for service, _ in enable_errors)))
+ "Failure enabling Ubuntu Advantage service(s): {}".format(
+ ", ".join(
+ '"{}"'.format(service) for service, _ in enable_errors
+ )
+ )
+ )
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if subp.which('ua'):
+ if subp.which("ua"):
return
try:
cloud.distro.update_package_sources()
@@ -144,7 +170,7 @@ def maybe_install_ua_tools(cloud):
util.logexc(LOG, "Package update failed")
raise
try:
- cloud.distro.install_packages(['ubuntu-advantage-tools'])
+ cloud.distro.install_packages(["ubuntu-advantage-tools"])
except Exception:
util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
raise
@@ -152,27 +178,35 @@ def maybe_install_ua_tools(cloud):
def handle(name, cfg, cloud, log, args):
ua_section = None
- if 'ubuntu-advantage' in cfg:
- LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
- ' Expected underscore delimited "ubuntu_advantage"; will'
- ' attempt to continue.')
- ua_section = cfg['ubuntu-advantage']
- if 'ubuntu_advantage' in cfg:
- ua_section = cfg['ubuntu_advantage']
+ if "ubuntu-advantage" in cfg:
+ LOG.warning(
+ 'Deprecated configuration key "ubuntu-advantage" provided.'
+ ' Expected underscore delimited "ubuntu_advantage"; will'
+ " attempt to continue."
+ )
+ ua_section = cfg["ubuntu-advantage"]
+ if "ubuntu_advantage" in cfg:
+ ua_section = cfg["ubuntu_advantage"]
if ua_section is None:
- LOG.debug("Skipping module named %s,"
- " no 'ubuntu_advantage' configuration found", name)
+ LOG.debug(
+ "Skipping module named %s,"
+ " no 'ubuntu_advantage' configuration found",
+ name,
+ )
return
validate_cloudconfig_schema(cfg, schema)
- if 'commands' in ua_section:
+ if "commands" in ua_section:
msg = (
'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"')
+ ' Expected "token"'
+ )
LOG.error(msg)
raise RuntimeError(msg)
maybe_install_ua_tools(cloud)
- configure_ua(token=ua_section.get('token'),
- enable=ua_section.get('enable'))
+ configure_ua(
+ token=ua_section.get("token"), enable=ua_section.get("enable")
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 2d1d2b32..44a3bdb4 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -5,55 +5,66 @@
import os
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
+from cloudinit import subp, temp_utils, type_utils, util
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import temp_utils
-from cloudinit import type_utils
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['ubuntu']
-schema = {
- 'id': 'cc_ubuntu_drivers',
- 'name': 'Ubuntu Drivers',
- 'title': 'Interact with third party drivers in Ubuntu.',
- 'description': dedent("""\
+distros = ["ubuntu"]
+meta: MetaSchema = {
+ "id": "cc_ubuntu_drivers",
+ "name": "Ubuntu Drivers",
+ "title": "Interact with third party drivers in Ubuntu.",
+ "description": dedent(
+ """\
This module interacts with the 'ubuntu-drivers' command to install
- third party driver packages."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ third party driver packages."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
drivers:
nvidia:
license-accepted: true
- """)],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'drivers': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'nvidia': {
- 'type': 'object',
- 'additionalProperties': False,
- 'required': ['license-accepted'],
- 'properties': {
- 'license-accepted': {
- 'type': 'boolean',
- 'description': ("Do you accept the NVIDIA driver"
- " license?"),
+ """
+ )
+ ],
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "drivers": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "nvidia": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ["license-accepted"],
+ "properties": {
+ "license-accepted": {
+ "type": "boolean",
+ "description": (
+ "Do you accept the NVIDIA driver license?"
+ ),
},
- 'version': {
- 'type': 'string',
- 'description': (
- 'The version of the driver to install (e.g.'
+ "version": {
+ "type": "string",
+ "description": (
+ "The version of the driver to install (e.g."
' "390", "410"). Defaults to the latest'
- ' version.'),
+ " version."
+ ),
},
},
},
@@ -62,9 +73,10 @@ schema = {
},
}
OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
- "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install'"
+)
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
# Use a debconf template to configure a global debconf variable
@@ -97,10 +109,11 @@ db_x_loadtemplatefile "$1" cloud-init
def install_drivers(cfg, pkg_install_func):
if not isinstance(cfg, dict):
raise TypeError(
- "'drivers' config expected dict, found '%s': %s" %
- (type_utils.obj_name(cfg), cfg))
+ "'drivers' config expected dict, found '%s': %s"
+ % (type_utils.obj_name(cfg), cfg)
+ )
- cfgpath = 'nvidia/license-accepted'
+ cfgpath = "nvidia/license-accepted"
# Call translate_bool to ensure that we treat string values like "yes" as
# acceptance and _don't_ treat string values like "nah" as acceptance
# because they're True-ish
@@ -109,46 +122,56 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not subp.which('ubuntu-drivers'):
- LOG.debug("'ubuntu-drivers' command not available. "
- "Installing ubuntu-drivers-common")
- pkg_install_func(['ubuntu-drivers-common'])
+ if not subp.which("ubuntu-drivers"):
+ LOG.debug(
+ "'ubuntu-drivers' command not available. "
+ "Installing ubuntu-drivers-common"
+ )
+ pkg_install_func(["ubuntu-drivers-common"])
- driver_arg = 'nvidia'
- version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
+ driver_arg = "nvidia"
+ version_cfg = util.get_cfg_by_path(cfg, "nvidia/version")
if version_cfg:
- driver_arg += ':{}'.format(version_cfg)
+ driver_arg += ":{}".format(version_cfg)
- LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)",
- cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
+ LOG.debug(
+ "Installing and activating NVIDIA drivers (%s=%s, version=%s)",
+ cfgpath,
+ nv_acc,
+ version_cfg if version_cfg else "latest",
+ )
# Register and set debconf selection linux/nvidia/latelink = true
tdir = temp_utils.mkdtemp(needs_exe=True)
- debconf_file = os.path.join(tdir, 'nvidia.template')
- debconf_script = os.path.join(tdir, 'nvidia-debconf.sh')
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ debconf_script = os.path.join(tdir, "nvidia-debconf.sh")
try:
util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
util.write_file(
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
- mode=0o755)
+ mode=0o755,
+ )
subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
- LOG, "Failed to register NVIDIA debconf template: %s", str(e))
+ LOG, "Failed to register NVIDIA debconf template: %s", str(e)
+ )
raise
finally:
if os.path.isdir(tdir):
util.del_dir(tdir)
try:
- subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ subp.subp(["ubuntu-drivers", "install", "--gpgpu", driver_arg])
except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
- LOG.warning('the available version of ubuntu-drivers is'
- ' too old to perform requested driver installation')
- elif 'No drivers found for installation.' in exc.stdout:
- LOG.warning('ubuntu-drivers found no drivers for installation')
+ LOG.warning(
+ "the available version of ubuntu-drivers is"
+ " too old to perform requested driver installation"
+ )
+ elif "No drivers found for installation." in exc.stdout:
+ LOG.warning("ubuntu-drivers found no drivers for installation")
raise
@@ -158,4 +181,4 @@ def handle(name, cfg, cloud, log, _args):
return
validate_cloudconfig_schema(cfg, schema)
- install_drivers(cfg['drivers'], cloud.distro.install_packages)
+ install_drivers(cfg["drivers"], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 03fffb96..f0aa9b0f 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -9,27 +9,28 @@
"""
Update Etc Hosts
----------------
-**Summary:** update ``/etc/hosts``
+**Summary:** update the hosts file (usually ``/etc/hosts``)
-This module will update the contents of ``/etc/hosts`` based on the
-hostname/fqdn specified in config. Management of ``/etc/hosts`` is controlled
-using ``manage_etc_hosts``. If this is set to false, cloud-init will not manage
-``/etc/hosts`` at all. This is the default behavior.
+This module will update the contents of the local hosts database (hosts file;
+usually ``/etc/hosts``) based on the hostname/fqdn specified in config.
+Management of the hosts file is controlled using ``manage_etc_hosts``. If this
+is set to false, cloud-init will not manage the hosts file at all. This is the
+default behavior.
-If set to ``true`` or ``template``, cloud-init will generate ``/etc/hosts``
+If set to ``true`` or ``template``, cloud-init will generate the hosts file
using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and
``$fqdn`` will be replaced with the hostname and fqdn respectively.
If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not
-rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the
-fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
-``ping <hostname>`` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
+rewrite the hosts file entirely, but rather will ensure that a entry for the
+fqdn with a distribution dependent ip is present (i.e. ``ping <hostname>`` will
+ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
.. note::
if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents
- of ``/etc/hosts`` will be updated every boot. to make any changes to
- ``/etc/hosts`` persistant they must be made in
+ of the hosts file will be updated every boot. To make any changes to
+ the hosts file persistent they must be made in
``/etc/cloud/templates/hosts.tmpl``
.. note::
@@ -38,7 +39,7 @@ fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
**Internal name:** ``cc_update_etc_hosts``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -49,9 +50,7 @@ fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
hostname: <fqdn/hostname>
"""
-from cloudinit import templater
-from cloudinit import util
-
+from cloudinit import templater, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
@@ -59,35 +58,48 @@ frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if util.translate_bool(manage_hosts, addons=['template']):
+
+ hosts_fn = cloud.distro.hosts_fn
+
+ if util.translate_bool(manage_hosts, addons=["template"]):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(
+ "Option 'manage_etc_hosts' was set, but no hostname was found"
+ )
return
# Render from a template file
- tpl_fn_name = cloud.get_template_filename("hosts.%s" %
- (cloud.distro.osfamily))
+ tpl_fn_name = cloud.get_template_filename(
+ "hosts.%s" % (cloud.distro.osfamily)
+ )
if not tpl_fn_name:
- raise RuntimeError(("No hosts template could be"
- " found for distro %s") %
- (cloud.distro.osfamily))
+ raise RuntimeError(
+ "No hosts template could be found for distro %s"
+ % (cloud.distro.osfamily)
+ )
- templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
+ templater.render_to_file(
+ tpl_fn_name, hosts_fn, {"hostname": hostname, "fqdn": fqdn}
+ )
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(
+ "Option 'manage_etc_hosts' was set, but no hostname was found"
+ )
return
- log.debug("Managing localhost in /etc/hosts")
+ log.debug("Managing localhost in %s", hosts_fn)
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
- log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
+ log.debug(
+ "Configuration option 'manage_etc_hosts' is not set,"
+ " not managing %s in module %s",
+ hosts_fn,
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index d5f4eb5a..09f6f6da 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -20,39 +20,52 @@ is set, then the hostname will not be altered.
**Internal name:** ``cc_update_hostname``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
**Config keys**::
preserve_hostname: <true/false>
+ prefer_fqdn_over_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
import os
-from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
+ log.debug(
+ "Configuration option 'preserve_hostname' is set,"
+ " not updating the hostname in module %s",
+ name,
+ )
return
+ # Set prefer_fqdn_over_hostname value in distro
+ hostname_fqdn = util.get_cfg_option_bool(
+ cfg, "prefer_fqdn_over_hostname", None
+ )
+ if hostname_fqdn is not None:
+ cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
+
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
+ prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
log.debug("Updating hostname to %s (%s)", fqdn, hostname)
cloud.distro.update_hostname(hostname, fqdn, prev_fn)
except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
- hostname)
+ util.logexc(
+ log, "Failed to update the hostname to %s (%s)", fqdn, hostname
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index ac4a4410..ef77a799 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -127,12 +127,12 @@ config keys for an entry in ``users`` are as follows:
uid: <user id>
"""
+from cloudinit import log as logging
+
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit.distros import ug_util
-from cloudinit import log as logging
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -149,26 +149,31 @@ def handle(name, cfg, cloud, _log, _args):
for (user, config) in users.items():
ssh_redirect_user = config.pop("ssh_redirect_user", False)
if ssh_redirect_user:
- if 'ssh_authorized_keys' in config or 'ssh_import_id' in config:
+ if "ssh_authorized_keys" in config or "ssh_import_id" in config:
raise ValueError(
- 'Not creating user %s. ssh_redirect_user cannot be'
- ' provided with ssh_import_id or ssh_authorized_keys' %
- user)
- if ssh_redirect_user not in (True, 'default'):
+ "Not creating user %s. ssh_redirect_user cannot be"
+ " provided with ssh_import_id or ssh_authorized_keys"
+ % user
+ )
+ if ssh_redirect_user not in (True, "default"):
raise ValueError(
- 'Not creating user %s. Invalid value of'
- ' ssh_redirect_user: %s. Expected values: true, default'
- ' or false.' % (user, ssh_redirect_user))
+ "Not creating user %s. Invalid value of"
+ " ssh_redirect_user: %s. Expected values: true, default"
+ " or false." % (user, ssh_redirect_user)
+ )
if default_user is None:
LOG.warning(
- 'Ignoring ssh_redirect_user: %s for %s.'
- ' No default_user defined.'
- ' Perhaps missing cloud configuration users: '
- ' [default, ..].',
- ssh_redirect_user, user)
+ "Ignoring ssh_redirect_user: %s for %s."
+ " No default_user defined."
+ " Perhaps missing cloud configuration users: "
+ " [default, ..].",
+ ssh_redirect_user,
+ user,
+ )
else:
- config['ssh_redirect_user'] = default_user
- config['cloud_public_ssh_keys'] = cloud_keys
+ config["ssh_redirect_user"] = default_user
+ config["cloud_public_ssh_keys"] = cloud_keys
cloud.distro.create_user(user, **config)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_vyos.py b/cloudinit/config/cc_vyos.py
index c19ecfe8..a7f75316 100644
--- a/cloudinit/config/cc_vyos.py
+++ b/cloudinit/config/cc_vyos.py
@@ -33,7 +33,10 @@ from cloudinit.sources import INSTANCE_JSON_FILE
from cloudinit.stages import Init
from cloudinit.util import load_file, load_json, get_hostname_fqdn
from cloudinit.sources.DataSourceOVF import get_properties as ovf_get_properties
-from vyos.configtree import ConfigTree
+try:
+ from vyos.configtree import ConfigTree
+except ImportError as err:
+ print(f'The module cannot be imported: {err}')
# configure logging
logger = logging.getLogger(__name__)
diff --git a/cloudinit/config/cc_vyos_userdata.py b/cloudinit/config/cc_vyos_userdata.py
index 95ba82de..5ad27b31 100644
--- a/cloudinit/config/cc_vyos_userdata.py
+++ b/cloudinit/config/cc_vyos_userdata.py
@@ -18,7 +18,10 @@ import re
from pathlib import Path
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
-from vyos.configtree import ConfigTree
+try:
+ from vyos.configtree import ConfigTree
+except ImportError as err:
+ print(f'The module cannot be imported: {err}')
# configure logging
logger = logging.getLogger(__name__)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 8601e707..37dae392 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -10,22 +10,25 @@ import base64
import os
from textwrap import dedent
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import util
-
+from cloudinit.config.schema import (
+ MetaSchema,
+ get_meta_doc,
+ validate_cloudconfig_schema,
+)
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
-UNKNOWN_ENC = 'text/plain'
+DEFAULT_DEFER = False
+UNKNOWN_ENC = "text/plain"
LOG = logging.getLogger(__name__)
-distros = ['all']
+distros = ["all"]
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
@@ -34,14 +37,22 @@ distros = ['all']
# configuration.
supported_encoding_types = [
- 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
- 'base64']
+ "gz",
+ "gzip",
+ "gz+base64",
+ "gzip+base64",
+ "gz+b64",
+ "gzip+b64",
+ "b64",
+ "base64",
+]
-schema = {
- 'id': 'cc_write_files',
- 'name': 'Write Files',
- 'title': 'write arbitrary files',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_write_files",
+ "name": "Write Files",
+ "title": "write arbitrary files",
+ "description": dedent(
+ """\
Write out arbitrary content to files, optionally setting permissions.
Parent folders in the path are created if absent.
Content can be specified in plain text or binary. Data encoded with
@@ -57,10 +68,12 @@ schema = {
Do not write files under /tmp during boot because of a race with
systemd-tmpfiles-clean that can cause temp files to get cleaned during
the early boot process. Use /run/somedir instead to avoid race
- LP:1707222."""),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ LP:1707222."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Write out base64 encoded content to /etc/sysconfig/selinux
write_files:
- encoding: b64
@@ -68,16 +81,20 @@ schema = {
owner: root:root
path: /etc/sysconfig/selinux
permissions: '0644'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Appending content to an existing file
write_files:
- content: |
15 * * * * root ship_logs
path: /etc/crontab
append: true
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Provide gziped binary content
write_files:
- encoding: gzip
@@ -85,110 +102,177 @@ schema = {
H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
path: /usr/bin/hello
permissions: '0755'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create an empty file on the system
write_files:
- path: /root/CLOUD_INIT_WAS_HERE
- """)],
- 'frequency': frequency,
- 'type': 'object',
- 'properties': {
- 'write_files': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'path': {
- 'type': 'string',
- 'description': dedent("""\
+ """
+ ),
+ dedent(
+ """\
+ # Defer writing the file until after the package (Nginx) is
+ # installed and its user is created alongside
+ write_files:
+ - path: /etc/nginx/conf.d/example.com.conf
+ content: |
+ server {
+ server_name example.com;
+ listen 80;
+ root /var/www;
+ location / {
+ try_files $uri $uri/ $uri.html =404;
+ }
+ }
+ owner: 'nginx:nginx'
+ permissions: '0640'
+ defer: true
+ """
+ ),
+ ],
+ "frequency": frequency,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "write_files": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": dedent(
+ """\
Path of the file to which ``content`` is decoded
and written
- """),
+ """
+ ),
},
- 'content': {
- 'type': 'string',
- 'default': '',
- 'description': dedent("""\
+ "content": {
+ "type": "string",
+ "default": "",
+ "description": dedent(
+ """\
Optional content to write to the provided ``path``.
When content is present and encoding is not '%s',
decode the content prior to writing. Default:
**''**
- """ % UNKNOWN_ENC),
+ """
+ % UNKNOWN_ENC
+ ),
},
- 'owner': {
- 'type': 'string',
- 'default': DEFAULT_OWNER,
- 'description': dedent("""\
+ "owner": {
+ "type": "string",
+ "default": DEFAULT_OWNER,
+ "description": dedent(
+ """\
Optional owner:group to chown on the file. Default:
**{owner}**
- """.format(owner=DEFAULT_OWNER)),
+ """.format(
+ owner=DEFAULT_OWNER
+ )
+ ),
},
- 'permissions': {
- 'type': 'string',
- 'default': oct(DEFAULT_PERMS).replace('o', ''),
- 'description': dedent("""\
+ "permissions": {
+ "type": "string",
+ "default": oct(DEFAULT_PERMS).replace("o", ""),
+ "description": dedent(
+ """\
Optional file permissions to set on ``path``
represented as an octal string '0###'. Default:
**'{perms}'**
- """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ """.format(
+ perms=oct(DEFAULT_PERMS).replace("o", "")
+ )
+ ),
},
- 'encoding': {
- 'type': 'string',
- 'default': UNKNOWN_ENC,
- 'enum': supported_encoding_types,
- 'description': dedent("""\
+ "encoding": {
+ "type": "string",
+ "default": UNKNOWN_ENC,
+ "enum": supported_encoding_types,
+ "description": dedent(
+ """\
Optional encoding type of the content. Default is
**text/plain** and no content decoding is
performed. Supported encoding types are:
- %s.""" % ", ".join(supported_encoding_types)),
+ %s."""
+ % ", ".join(supported_encoding_types)
+ ),
},
- 'append': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "append": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
Whether to append ``content`` to existing file if
``path`` exists. Default: **false**.
- """),
+ """
+ ),
+ },
+ "defer": {
+ "type": "boolean",
+ "default": DEFAULT_DEFER,
+ "description": dedent(
+ """\
+ Defer writing the file until 'final' stage, after
+ users were created, and packages were installed.
+ Default: **{defer}**.
+ """.format(
+ defer=DEFAULT_DEFER
+ )
+ ),
},
},
- 'required': ['path'],
- 'additionalProperties': False
+ "required": ["path"],
+ "additionalProperties": False,
},
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, _cloud, log, _args):
- files = cfg.get('write_files')
- if not files:
- log.debug(("Skipping module named %s,"
- " no/empty 'write_files' key in configuration"), name)
- return
validate_cloudconfig_schema(cfg, schema)
- write_files(name, files)
+ file_list = cfg.get("write_files", [])
+ filtered_files = [
+ f
+ for f in file_list
+ if not util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
+ ]
+ if not filtered_files:
+ log.debug(
+ "Skipping module named %s,"
+ " no/empty 'write_files' key in configuration",
+ name,
+ )
+ return
+ write_files(name, filtered_files)
def canonicalize_extraction(encoding_type):
if not encoding_type:
- encoding_type = ''
+ encoding_type = ""
encoding_type = encoding_type.lower().strip()
- if encoding_type in ['gz', 'gzip']:
- return ['application/x-gzip']
- if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
- return ['application/base64', 'application/x-gzip']
+ if encoding_type in ["gz", "gzip"]:
+ return ["application/x-gzip"]
+ if encoding_type in ["gz+base64", "gzip+base64", "gz+b64", "gzip+b64"]:
+ return ["application/base64", "application/x-gzip"]
# Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
# specifing it manually (which might be a possiblity)
- if encoding_type in ['b64', 'base64']:
- return ['application/base64']
+ if encoding_type in ["b64", "base64"]:
+ return ["application/base64"]
if encoding_type:
- LOG.warning("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
+ LOG.warning(
+ "Unknown encoding type %s, assuming %s", encoding_type, UNKNOWN_ENC
+ )
return [UNKNOWN_ENC]
@@ -197,17 +281,20 @@ def write_files(name, files):
return
for (i, f_info) in enumerate(files):
- path = f_info.get('path')
+ path = f_info.get("path")
if not path:
- LOG.warning("No path provided to write for entry %s in module %s",
- i + 1, name)
+ LOG.warning(
+ "No path provided to write for entry %s in module %s",
+ i + 1,
+ name,
+ )
continue
path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'))
- contents = extract_contents(f_info.get('content', ''), extractions)
- (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
- omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb'
+ extractions = canonicalize_extraction(f_info.get("encoding"))
+ contents = extract_contents(f_info.get("content", ""), extractions)
+ (u, g) = util.extract_usergroup(f_info.get("owner", DEFAULT_OWNER))
+ perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS)
+ omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb"
util.write_file(path, contents, omode=omode, mode=perms)
util.chownbyname(path, u, g)
@@ -229,20 +316,20 @@ def decode_perms(perm, default):
reps.append("%o" % r)
except TypeError:
reps.append("%r" % r)
- LOG.warning(
- "Undecodable permissions %s, returning default %s", *reps)
+ LOG.warning("Undecodable permissions %s, returning default %s", *reps)
return default
def extract_contents(contents, extraction_types):
result = contents
for t in extraction_types:
- if t == 'application/x-gzip':
+ if t == "application/x-gzip":
result = util.decomp_gzip(result, quiet=False, decode=False)
- elif t == 'application/base64':
+ elif t == "application/base64":
result = base64.b64decode(result)
elif t == UNKNOWN_ENC:
pass
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
new file mode 100644
index 00000000..1294628c
--- /dev/null
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2021 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Defer writing certain files"""
+
+from cloudinit import util
+from cloudinit.config.cc_write_files import DEFAULT_DEFER
+from cloudinit.config.cc_write_files import schema as write_files_schema
+from cloudinit.config.cc_write_files import write_files
+from cloudinit.config.schema import validate_cloudconfig_schema
+
+# meta is not used in this module, but it remains as code documentation
+#
+# id: cc_write_files_deferred'
+# name: 'Write Deferred Files
+# distros: ['all'],
+# frequency: PER_INSTANCE,
+# title:
+# write certain files, whose creation as been deferred, during
+# final stage
+# description:
+# This module is based on `'Write Files' <write-files>`__, and
+# will handle all files from the write_files list, that have been
+# marked as deferred and thus are not being processed by the
+# write-files module.
+#
+# *Please note that his module is not exposed to the user through
+# its own dedicated top-level directive.*
+
+schema = write_files_schema
+
+
+# Not exposed, because related modules should document this behaviour
+__doc__ = None
+
+
+def handle(name, cfg, _cloud, log, _args):
+ validate_cloudconfig_schema(cfg, schema)
+ file_list = cfg.get("write_files", [])
+ filtered_files = [
+ f
+ for f in file_list
+ if util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
+ ]
+ if not filtered_files:
+ log.debug(
+ "Skipping module named %s,"
+ " no deferred file defined in configuration",
+ name,
+ )
+ return
+ write_files(name, filtered_files)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 01fe683c..7a232689 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -16,9 +16,10 @@ entry, the config entry will be skipped.
**Internal name:** ``cc_yum_add_repo``
-**Module frequency:** per always
+**Module frequency:** always
-**Supported distros:** centos, fedora, rhel
+**Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora,
+ miraclelinux, openEuler, photon, rhel, rocky, virtuozzo
**Config keys**::
@@ -36,7 +37,18 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['centos', 'fedora', 'rhel']
+distros = [
+ "almalinux",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "openEuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "virtuozzo",
+]
def _canonicalize_id(repo_id):
@@ -77,25 +89,34 @@ def _format_repository_config(repo_id, repo_config):
def handle(name, cfg, _cloud, log, _args):
- repos = cfg.get('yum_repos')
+ repos = cfg.get("yum_repos")
if not repos:
- log.debug(("Skipping module named %s,"
- " no 'yum_repos' configuration found"), name)
+ log.debug(
+ "Skipping module named %s, no 'yum_repos' configuration found",
+ name,
+ )
return
- repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
- '/etc/yum.repos.d/')
+ repo_base_path = util.get_cfg_option_str(
+ cfg, "yum_repo_dir", "/etc/yum.repos.d/"
+ )
repo_locations = {}
repo_configs = {}
for (repo_id, repo_config) in repos.items():
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- log.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
+ log.info(
+ "Skipping repo %s, file %s already exists!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
elif canon_repo_id in repo_locations:
- log.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
+ log.info(
+ "Skipping repo %s, file %s already pending!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
if not repo_config:
repo_config = {}
@@ -107,21 +128,29 @@ def handle(name, cfg, _cloud, log, _args):
n_repo_config[k] = v
repo_config = n_repo_config
missing_required = 0
- for req_field in ['baseurl']:
+ for req_field in ["baseurl"]:
if req_field not in repo_config:
- log.warning(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
+ log.warning(
+ "Repository %s does not contain a %s"
+ " configuration 'required' entry",
+ repo_id,
+ req_field,
+ )
missing_required += 1
if not missing_required:
repo_configs[canon_repo_id] = repo_config
repo_locations[canon_repo_id] = repo_fn_pth
else:
- log.warning("Repository %s is missing %s required fields, "
- "skipping!", repo_id, missing_required)
+ log.warning(
+ "Repository %s is missing %s required fields, skipping!",
+ repo_id,
+ missing_required,
+ )
for (c_repo_id, path) in repo_locations.items():
- repo_blob = _format_repository_config(c_repo_id,
- repo_configs.get(c_repo_id))
+ repo_blob = _format_repository_config(
+ c_repo_id, repo_configs.get(c_repo_id)
+ )
util.write_file(path, repo_blob)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index 05855b0c..be444cce 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -5,22 +5,24 @@
"""zypper_add_repo: Add zyper repositories to the system"""
-import configobj
import os
from textwrap import dedent
-from cloudinit.config.schema import get_schema_doc
+import configobj
+
from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
-distros = ['opensuse', 'sles']
+distros = ["opensuse", "sles"]
-schema = {
- 'id': 'cc_zypper_add_repo',
- 'name': 'ZypperAddRepo',
- 'title': 'Configure zypper behavior and add zypper repositories',
- 'description': dedent("""\
+meta: MetaSchema = {
+ "id": "cc_zypper_add_repo",
+ "name": "ZypperAddRepo",
+ "title": "Configure zypper behavior and add zypper repositories",
+ "description": dedent(
+ """\
Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
configuration writer is "dumb" and will simply append the provided
configuration options to the configuration file. Option settings
@@ -28,9 +30,12 @@ schema = {
is parsed. The file is in INI format.
Add repositories to the system. No validation is performed on the
repository file entries, it is assumed the user is familiar with
- the zypper repository file format."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ the zypper repository file format."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
zypper:
repos:
- id: opensuse-oss
@@ -49,51 +54,59 @@ schema = {
servicesdir: /etc/zypp/services.d
download.use_deltarpm: true
# any setting in /etc/zypp/zypp.conf
- """)],
- 'frequency': PER_ALWAYS,
- 'type': 'object',
- 'properties': {
- 'zypper': {
- 'type': 'object',
- 'properties': {
- 'repos': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {
- 'type': 'string',
- 'description': dedent("""\
+ """
+ )
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+schema = {
+ "type": "object",
+ "properties": {
+ "zypper": {
+ "type": "object",
+ "properties": {
+ "repos": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": dedent(
+ """\
The unique id of the repo, used when
writing
- /etc/zypp/repos.d/<id>.repo.""")
+ /etc/zypp/repos.d/<id>.repo."""
+ ),
+ },
+ "baseurl": {
+ "type": "string",
+ "format": "uri", # built-in format type
+ "description": "The base repositoy URL",
},
- 'baseurl': {
- 'type': 'string',
- 'format': 'uri', # built-in format type
- 'description': 'The base repositoy URL'
- }
},
- 'required': ['id', 'baseurl'],
- 'additionalProperties': True
+ "required": ["id", "baseurl"],
+ "additionalProperties": True,
},
- 'minItems': 1
+ "minItems": 1,
},
- 'config': {
- 'type': 'object',
- 'description': dedent("""\
+ "config": {
+ "type": "object",
+ "description": dedent(
+ """\
Any supported zypo.conf key is written to
- /etc/zypp/zypp.conf'""")
- }
+ /etc/zypp/zypp.conf'"""
+ ),
+ },
},
- 'required': [],
- 'minProperties': 1, # Either config or repo must be provided
- 'additionalProperties': False, # only repos and config allowed
+ "minProperties": 1, # Either config or repo must be provided
+ "additionalProperties": False, # only repos and config allowed
}
- }
+ },
}
-__doc__ = get_schema_doc(schema) # Supplement python help()
+__doc__ = get_meta_doc(meta, schema) # Supplement python help()
LOG = logging.getLogger(__name__)
@@ -139,34 +152,43 @@ def _write_repos(repos, repo_base_path):
valid_repos = {}
for index, user_repo_config in enumerate(repos):
# Skip on absent required keys
- missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
+ missing_keys = set(["id", "baseurl"]).difference(set(user_repo_config))
if missing_keys:
LOG.warning(
"Repo config at index %d is missing required config keys: %s",
- index, ",".join(missing_keys))
+ index,
+ ",".join(missing_keys),
+ )
continue
- repo_id = user_repo_config.get('id')
+ repo_id = user_repo_config.get("id")
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- LOG.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
+ LOG.info(
+ "Skipping repo %s, file %s already exists!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
elif repo_id in valid_repos:
- LOG.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
+ LOG.info(
+ "Skipping repo %s, file %s already pending!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
# Do some basic key formatting
repo_config = dict(
(k.lower().strip().replace("-", "_"), v)
for k, v in user_repo_config.items()
- if k and k != 'id')
+ if k and k != "id"
+ )
# Set defaults if not present
- for field in ['enabled', 'autorefresh']:
+ for field in ["enabled", "autorefresh"]:
if field not in repo_config:
- repo_config[field] = '1'
+ repo_config[field] = "1"
valid_repos[repo_id] = (repo_fn_pth, repo_config)
@@ -179,39 +201,44 @@ def _write_zypp_config(zypper_config):
"""Write to the default zypp configuration file /etc/zypp/zypp.conf"""
if not zypper_config:
return
- zypp_config = '/etc/zypp/zypp.conf'
+ zypp_config = "/etc/zypp/zypp.conf"
zypp_conf_content = util.load_file(zypp_config)
- new_settings = ['# Added via cloud.cfg']
+ new_settings = ["# Added via cloud.cfg"]
for setting, value in zypper_config.items():
- if setting == 'configdir':
- msg = 'Changing the location of the zypper configuration is '
+ if setting == "configdir":
+ msg = "Changing the location of the zypper configuration is "
msg += 'not supported, skipping "configdir" setting'
LOG.warning(msg)
continue
if value:
- new_settings.append('%s=%s' % (setting, value))
+ new_settings.append("%s=%s" % (setting, value))
if len(new_settings) > 1:
- new_config = zypp_conf_content + '\n'.join(new_settings)
+ new_config = zypp_conf_content + "\n".join(new_settings)
else:
new_config = zypp_conf_content
util.write_file(zypp_config, new_config)
def handle(name, cfg, _cloud, log, _args):
- zypper_section = cfg.get('zypper')
+ zypper_section = cfg.get("zypper")
if not zypper_section:
- LOG.debug(("Skipping module named %s,"
- " no 'zypper' relevant configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s,"
+ " no 'zypper' relevant configuration found",
+ name,
+ )
return
- repos = zypper_section.get('repos')
+ repos = zypper_section.get("repos")
if not repos:
- LOG.debug(("Skipping module named %s,"
- " no 'repos' configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'repos' configuration found", name
+ )
return
- zypper_config = zypper_section.get('config', {})
- repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
+ zypper_config = zypper_section.get("config", {})
+ repo_base_path = zypper_config.get("reposdir", "/etc/zypp/repos.d/")
_write_zypp_config(zypper_config)
_write_repos(repos, repo_base_path)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cloud-init-schema.json b/cloudinit/config/cloud-init-schema.json
new file mode 100644
index 00000000..2d43d06a
--- /dev/null
+++ b/cloudinit/config/cloud-init-schema.json
@@ -0,0 +1,560 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "apt_configure.mirror": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["arches"],
+ "properties": {
+ "arches": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "uri": {"type": "string", "format": "uri"},
+ "search": {
+ "type": "array",
+ "items": {"type": "string", "format": "uri"},
+ "minItems": 1
+ },
+ "search_dns": {
+ "type": "boolean"
+ },
+ "keyid": {"type": "string"},
+ "key": {"type": "string"},
+ "keyserver": {"type": "string"}
+ }
+ },
+ "minItems": 1
+ },
+ "ca_certs.properties": {
+ "type": "object",
+ "properties": {
+ "remove-defaults": {
+ "description": "Deprecated key name. Use remove_defaults instead.",
+ "type": "boolean",
+ "default": false
+ },
+ "remove_defaults": {
+ "description": "Remove default CA certificates if true. Default: false",
+ "type": "boolean",
+ "default": false
+ },
+ "trusted": {
+ "description": "List of trusted CA certificates to add.",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ },
+ "cc_apk_configure": {
+ "type": "object",
+ "properties": {
+ "apk_repos": {
+ "type": "object",
+ "properties": {
+ "preserve_repositories": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
+ },
+ "alpine_repo": {
+ "type": ["object", "null"],
+ "properties": {
+ "base_url": {
+ "type": "string",
+ "default": "https://alpine.global.ssl.fastly.net/alpine",
+ "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``"
+ },
+ "community_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included."
+ },
+ "testing_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos."
+ },
+ "version": {
+ "type": "string",
+ "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
+ }
+ },
+ "required": ["version"],
+ "minProperties": 1,
+ "additionalProperties": false
+ },
+ "local_repo_base_url": {
+ "type": "string",
+ "description": "The base URL of an Alpine repository containing unofficial packages"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_apt_configure": {
+ "properties": {
+ "apt": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "preserve_sources_list": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``."
+ },
+ "disable_suites": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
+ },
+ "primary": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
+ },
+ "security": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "Please refer to the primary config documentation"
+ },
+ "add_apt_repo_match": {
+ "type": "string",
+ "default": "^[\\w-]+:\\w",
+ "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``"
+ },
+ "debconf_selections": {
+ "type": "object",
+ "minProperties": 1,
+ "patternProperties": {
+ "^.+$": {
+ "type": "string"
+ }
+ },
+ "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``"
+ },
+ "sources_list": {
+ "type": "string",
+ "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``"
+ },
+ "conf": {
+ "type": "string",
+ "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax."
+ },
+ "https_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``."
+ },
+ "http_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``."
+ },
+ "proxy": {
+ "type": "string",
+ "description": "Alias for defining a http APT proxy."
+ },
+ "ftp_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``."
+ },
+ "sources": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "type": "object",
+ "properties": {
+ "source": {
+ "type": "string"
+ },
+ "keyid": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "keyserver": {
+ "type": "string"
+ },
+ "filename": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ }
+ },
+ "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
+ }
+ }
+ }
+ }
+ },
+ "cc_apt_pipelining": {
+ "type": "object",
+ "properties": {
+ "apt_pipelining": {
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "boolean"},
+ {"type": "string", "enum": ["none", "unchanged", "os"]}
+ ]
+ }
+ }
+ },
+ "cc_bootcmd": {
+ "type": "object",
+ "properties": {
+ "bootcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"}
+ ]
+ },
+ "additionalItems": false,
+ "minItems": 1
+ }
+ }
+ },
+ "cc_byobu": {
+ "type": "object",
+ "properties": {
+ "byobu_by_default": {
+ "type": "string",
+ "enum": [
+ "enable-system",
+ "enable-user",
+ "disable-system",
+ "disable-user",
+ "enable",
+ "disable",
+ "user",
+ "system"
+ ]
+ }
+ }
+ },
+ "cc_ca_certs": {
+ "type": "object",
+ "properties": {
+ "ca_certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ },
+ "ca-certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ }
+ }
+ },
+ "cc_chef": {
+ "type": "object",
+ "properties": {
+ "chef": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "directories": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
+ },
+ "validation_cert": {
+ "type": "string",
+ "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file."
+ },
+ "validation_key": {
+ "type": "string",
+ "default": "/etc/chef/validation.pem",
+ "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``"
+ },
+ "firstboot_path": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``"
+ },
+ "exec": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)."
+ },
+ "client_key": {
+ "type": "string",
+ "default": "/etc/chef/client.pem",
+ "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``."
+ },
+ "encrypted_data_bag_secret": {
+ "type": "string",
+ "default": null,
+ "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it."
+ },
+ "environment": {
+ "type": "string",
+ "default": "_default",
+ "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration."
+ },
+ "file_backup_path": {
+ "type": "string",
+ "default": "/var/backups/chef",
+ "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location."
+ },
+ "file_cache_path": {
+ "type": "string",
+ "default": "/var/cache/chef",
+ "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location."
+ },
+ "json_attribs": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location."
+ },
+ "log_level": {
+ "type": "string",
+ "default": ":info",
+ "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``."
+ },
+ "log_location": {
+ "type": "string",
+ "default": "/var/log/chef/client.log",
+ "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``."
+ },
+ "node_name": {
+ "type": "string",
+ "description": "The name of the node to run. By default, we will use th instance id as the node name."
+ },
+ "omnibus_url": {
+ "type": "string",
+ "default": "https://www.chef.io/chef/install.sh",
+ "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``."
+ },
+ "omnibus_url_retries": {
+ "type": "integer",
+ "default": 5,
+ "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5."
+ },
+ "omnibus_version": {
+ "type": "string",
+ "description": "Optional version string to require for omnibus install."
+ },
+ "pid_file": {
+ "type": "string",
+ "default": "/var/run/chef/client.pid",
+ "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location."
+ },
+ "server_url": {
+ "type": "string",
+ "description": "The URL for the chef server"
+ },
+ "show_time": {
+ "type": "boolean",
+ "default": true,
+ "description": "Show time in chef logs"
+ },
+ "ssl_verify_mode": {
+ "type": "string",
+ "default": ":verify_none",
+ "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``."
+ },
+ "validation_name": {
+ "type": "string",
+ "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run."
+ },
+ "force_install": {
+ "type": "boolean",
+ "default": false,
+ "description": "If set to ``true``, forces chef installation, even if it is already installed."
+ },
+ "initial_attributes": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": "Specify a list of initial attributes used by the cookbooks."
+ },
+ "install_type": {
+ "type": "string",
+ "default": "packages",
+ "enum": [
+ "packages",
+ "gems",
+ "omnibus"
+ ],
+ "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``"
+ },
+ "run_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "A run list for a first boot json."
+ },
+ "chef_license": {
+ "type": "string",
+ "description": "string that indicates if user accepts or not license related to some of chef products"
+ }
+ }
+ }
+ }
+ },
+ "cc_debug": {
+ "type": "object",
+ "properties": {
+ "debug": {
+ "additionalProperties": false,
+ "minProperties": 1,
+ "type": "object",
+ "properties": {
+ "verbose": {
+ "description": "Should always be true for this module",
+ "type": "boolean"
+ },
+ "output": {
+ "description": "Location to write output. Defaults to console + log",
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "cc_disable_ec2_metadata": {
+ "type": "object",
+ "properties": {
+ "disable_ec2_metadata": {
+ "default": false,
+ "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.",
+ "type": "boolean"
+ }
+ }
+ },
+ "cc_disk_setup": {
+ "type": "object",
+ "properties": {
+ "device_aliases": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias_name>",
+ "type": "string",
+ "description": "Path to disk to be aliased by this name."
+ }
+ }
+ },
+ "disk_setup": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias name/path>",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "table_type": {
+ "type": "string",
+ "default": "mbr",
+ "enum": ["mbr", "gpt"],
+ "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
+ },
+ "layout": {
+ "type": ["string", "boolean", "array"],
+ "default": false,
+ "oneOf": [
+ {"type": "string", "enum": ["remove"]},
+ {"type": "boolean"},
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type": "array",
+ "items": {"type": "integer"},
+ "minItems": 2,
+ "maxItems": 2
+ }
+ ]
+ }
+ }
+ ],
+ "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "default": false,
+ "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "fs_setup": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "Label for the filesystem."
+ },
+ "filesystem": {
+ "type": "string",
+ "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``"
+ },
+ "device": {
+ "type": "string",
+ "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
+ },
+ "partition": {
+ "type": ["string", "integer"],
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": ["auto", "any", "none"]
+ },
+ {"type": "integer"}
+ ],
+ "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``"
+ },
+ "replace_fs": {
+ "type": "string",
+ "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
+ },
+ "extra_opts": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
+ },
+ "cmd": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "allOf": [
+ { "$ref": "#/$defs/cc_apk_configure" },
+ { "$ref": "#/$defs/cc_apt_configure" },
+ { "$ref": "#/$defs/cc_apt_pipelining" },
+ { "$ref": "#/$defs/cc_bootcmd" },
+ { "$ref": "#/$defs/cc_byobu" },
+ { "$ref": "#/$defs/cc_ca_certs" },
+ { "$ref": "#/$defs/cc_chef" },
+ { "$ref": "#/$defs/cc_debug" },
+ { "$ref": "#/$defs/cc_disable_ec2_metadata" },
+ { "$ref": "#/$defs/cc_disk_setup" }
+ ]
+}
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 456bab2c..1f969c97 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,22 +1,28 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit import importer
-from cloudinit.util import find_modules, load_file
-
import argparse
-from collections import defaultdict
-from copy import deepcopy
+import json
import logging
import os
import re
import sys
+from collections import defaultdict
+from copy import deepcopy
+from functools import partial
+
import yaml
-_YAML_MAP = {True: 'true', False: 'false', None: 'null'}
-SCHEMA_UNDEFINED = b'UNDEFINED'
-CLOUD_CONFIG_HEADER = b'#cloud-config'
+from cloudinit import importer
+from cloudinit.cmd.devel import read_cfg_paths
+from cloudinit.importer import MetaSchema
+from cloudinit.util import error, find_modules, load_file
+
+error = partial(error, sys_exit=True)
+LOG = logging.getLogger(__name__)
+
+_YAML_MAP = {True: "true", False: "false", None: "null"}
+CLOUD_CONFIG_HEADER = b"#cloud-config"
SCHEMA_DOC_TMPL = """
{name}
{title_underbar}
@@ -34,11 +40,12 @@ SCHEMA_DOC_TMPL = """
{property_doc}
{examples}
"""
-SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}"
SCHEMA_LIST_ITEM_TMPL = (
- '{prefix}Each item in **{prop_name}** list supports the following keys:')
-SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
-SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
+ "{prefix}Each item in **{prop_name}** list supports the following keys:"
+)
+SCHEMA_EXAMPLES_HEADER = "\n**Examples**::\n\n"
+SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{0} ---"
class SchemaValidationError(ValueError):
@@ -52,10 +59,12 @@ class SchemaValidationError(ValueError):
"""
self.schema_errors = schema_errors
error_messages = [
- '{0}: {1}'.format(config_key, message)
- for config_key, message in schema_errors]
+ "{0}: {1}".format(config_key, message)
+ for config_key, message in schema_errors
+ ]
message = "Cloud config schema errors: {0}".format(
- ', '.join(error_messages))
+ ", ".join(error_messages)
+ )
super(SchemaValidationError, self).__init__(message)
@@ -68,60 +77,142 @@ def is_schema_byte_string(checker, instance):
from jsonschema import Draft4Validator
except ImportError:
return False
- return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
- isinstance(instance, (bytes,)))
+ return Draft4Validator.TYPE_CHECKER.is_type(
+ instance, "string"
+ ) or isinstance(instance, (bytes,))
+
+
+def get_jsonschema_validator():
+ """Get metaschema validator and format checker
+
+ Older versions of jsonschema require some compatibility changes.
+
+ @returns: Tuple: (jsonschema.Validator, FormatChecker)
+ @raises: ImportError when jsonschema is not present
+ """
+ from jsonschema import Draft4Validator, FormatChecker
+ from jsonschema.validators import create
+
+ # Allow for bytes to be presented as an acceptable valid value for string
+ # type jsonschema attributes in cloud-init's schema.
+ # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
+
+ strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA)
+ strict_metaschema["additionalProperties"] = False
+ # This additional label allows us to specify a different name
+ # than the property key when generating docs.
+ # This is especially useful when using a "patternProperties" regex,
+ # otherwise the property label in the generated docs will be a
+ # regular expression.
+ # http://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties
+ strict_metaschema["properties"]["label"] = {"type": "string"}
-def validate_cloudconfig_schema(config, schema, strict=False):
+ if hasattr(Draft4Validator, "TYPE_CHECKER"): # jsonschema 3.0+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ "string", is_schema_byte_string
+ )
+ cloudinitValidator = create(
+ meta_schema=strict_metaschema,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ type_checker=type_checker,
+ )
+ else: # jsonschema 2.6 workaround
+ types = Draft4Validator.DEFAULT_TYPES # pylint: disable=E1101
+ # Allow bytes as well as string (and disable a spurious unsupported
+ # assignment-operation pylint warning which appears because this
+ # code path isn't written against the latest jsonschema).
+ types["string"] = (str, bytes) # pylint: disable=E1137
+ cloudinitValidator = create( # pylint: disable=E1123
+ meta_schema=strict_metaschema,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ default_types=types,
+ )
+ return (cloudinitValidator, FormatChecker)
+
+
+def validate_cloudconfig_metaschema(validator, schema: dict, throw=True):
+ """Validate provided schema meets the metaschema definition. Return strict
+ Validator and FormatChecker for use in validation
+ @param validator: Draft4Validator instance used to validate the schema
+ @param schema: schema to validate
+ @param throw: Sometimes the validator and checker are required, even if
+ the schema is invalid. Toggle for whether to raise
+ SchemaValidationError or log warnings.
+
+ @raises: ImportError when jsonschema is not present
+ @raises: SchemaValidationError when the schema is invalid
+ """
+
+ from jsonschema.exceptions import SchemaError
+
+ try:
+ validator.check_schema(schema)
+ except SchemaError as err:
+ # Raise SchemaValidationError to avoid jsonschema imports at call
+ # sites
+ if throw:
+ raise SchemaValidationError(
+ schema_errors=(
+ (".".join([str(p) for p in err.path]), err.message),
+ )
+ ) from err
+ LOG.warning(
+ "Meta-schema validation failed, attempting to validate config "
+ "anyway: %s",
+ err,
+ )
+
+
+def validate_cloudconfig_schema(
+ config: dict,
+ schema: dict = None,
+ strict: bool = False,
+ strict_metaschema: bool = False,
+):
"""Validate provided config meets the schema definition.
@param config: Dict of cloud configuration settings validated against
- schema.
+ schema. Ignored if strict_metaschema=True
@param schema: jsonschema dict describing the supported schema definition
- for the cloud config module (config.cc_*).
+ for the cloud config module (config.cc_*). If None, validate against
+ global schema.
@param strict: Boolean, when True raise SchemaValidationErrors instead of
logging warnings.
+ @param strict_metaschema: Boolean, when True validates schema using strict
+ metaschema definition at runtime (currently unused)
@raises: SchemaValidationError when provided config does not validate
against the provided schema.
+ @raises: RuntimeError when provided config sourced from YAML is not a dict.
"""
+ if schema is None:
+ schema = get_schema()
try:
- from jsonschema import Draft4Validator, FormatChecker
- from jsonschema.validators import create, extend
+ (cloudinitValidator, FormatChecker) = get_jsonschema_validator()
+ if strict_metaschema:
+ validate_cloudconfig_metaschema(
+ cloudinitValidator, schema, throw=False
+ )
except ImportError:
- logging.debug(
- 'Ignoring schema validation. python-jsonschema is not present')
+ LOG.debug("Ignoring schema validation. jsonschema is not present")
return
- # Allow for bytes to be presented as an acceptable valid value for string
- # type jsonschema attributes in cloud-init's schema.
- # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
- if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
- type_checker = Draft4Validator.TYPE_CHECKER.redefine(
- 'string', is_schema_byte_string)
- cloudinitValidator = extend(Draft4Validator, type_checker=type_checker)
- else: # jsonschema 2.6 workaround
- types = Draft4Validator.DEFAULT_TYPES
- # Allow bytes as well as string (and disable a spurious
- # unsupported-assignment-operation pylint warning which appears because
- # this code path isn't written against the latest jsonschema).
- types['string'] = (str, bytes) # pylint: disable=E1137
- cloudinitValidator = create(
- meta_schema=Draft4Validator.META_SCHEMA,
- validators=Draft4Validator.VALIDATORS,
- version="draft4",
- default_types=types)
validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
- path = '.'.join([str(p) for p in error.path])
+ path = ".".join([str(p) for p in error.path])
errors += ((path, error.message),)
if errors:
if strict:
raise SchemaValidationError(errors)
else:
- messages = ['{0}: {1}'.format(k, msg) for k, msg in errors]
- logging.warning('Invalid config:\n%s', '\n'.join(messages))
+ messages = ["{0}: {1}".format(k, msg) for k, msg in errors]
+ LOG.warning(
+ "Invalid cloud-config provided:\n%s", "\n".join(messages)
+ )
def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
@@ -136,14 +227,23 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
if not schema_errors:
return original_content
schemapaths = {}
- if cloudconfig:
- schemapaths = _schemapath_for_cloudconfig(
- cloudconfig, original_content)
errors_by_line = defaultdict(list)
error_footer = []
+ error_header = "# Errors: -------------\n{0}\n\n"
annotated_content = []
+ lines = original_content.decode().split("\n")
+ if not isinstance(cloudconfig, dict):
+ # Return a meaningful message on empty cloud-config
+ return "\n".join(
+ lines
+ + [error_header.format("# E1: Cloud-config is not a YAML dict.")]
+ )
+ if cloudconfig:
+ schemapaths = _schemapath_for_cloudconfig(
+ cloudconfig, original_content
+ )
for path, msg in schema_errors:
- match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
+ match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
if match:
line, col = match.groups()
errors_by_line[int(line)].append(msg)
@@ -151,24 +251,24 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
col = None
errors_by_line[schemapaths[path]].append(msg)
if col is not None:
- msg = 'Line {line} column {col}: {msg}'.format(
- line=line, col=col, msg=msg)
- lines = original_content.decode().split('\n')
+ msg = "Line {line} column {col}: {msg}".format(
+ line=line, col=col, msg=msg
+ )
error_index = 1
for line_number, line in enumerate(lines, 1):
errors = errors_by_line[line_number]
if errors:
error_label = []
for error in errors:
- error_label.append('E{0}'.format(error_index))
- error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_label.append("E{0}".format(error_index))
+ error_footer.append("# E{0}: {1}".format(error_index, error))
error_index += 1
- annotated_content.append(line + '\t\t# ' + ','.join(error_label))
+ annotated_content.append(line + "\t\t# " + ",".join(error_label))
+
else:
annotated_content.append(line)
- annotated_content.append(
- '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer)))
- return '\n'.join(annotated_content)
+ annotated_content.append(error_header.format("\n".join(error_footer)))
+ return "\n".join(annotated_content)
def validate_cloudconfig_file(config_path, schema, annotate=False):
@@ -196,15 +296,18 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
else:
if not os.path.exists(config_path):
raise RuntimeError(
- 'Configfile {0} does not exist'.format(
- config_path
- )
+ "Configfile {0} does not exist".format(config_path)
)
content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = (
- ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
- config_path, CLOUD_CONFIG_HEADER.decode())),)
+ (
+ "format-l1.c1",
+ 'File {0} needs to begin with "{1}"'.format(
+ config_path, CLOUD_CONFIG_HEADER.decode()
+ ),
+ ),
+ )
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
@@ -214,27 +317,36 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
except (yaml.YAMLError) as e:
line = column = 1
mark = None
- if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
- mark = getattr(e, 'context_mark')
- elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
- mark = getattr(e, 'problem_mark')
+ if hasattr(e, "context_mark") and getattr(e, "context_mark"):
+ mark = getattr(e, "context_mark")
+ elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"):
+ mark = getattr(e, "problem_mark")
if mark:
line = mark.line + 1
column = mark.column + 1
- errors = (('format-l{line}.c{col}'.format(line=line, col=column),
- 'File {0} is not valid yaml. {1}'.format(
- config_path, str(e))),)
+ errors = (
+ (
+ "format-l{line}.c{col}".format(line=line, col=column),
+ "File {0} is not valid yaml. {1}".format(config_path, str(e)),
+ ),
+ )
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
raise error from e
+ if not isinstance(cloudconfig, dict):
+ # Return a meaningful message on empty cloud-config
+ if not annotate:
+ raise RuntimeError("Cloud-config is not a YAML dict.")
try:
- validate_cloudconfig_schema(
- cloudconfig, schema, strict=True)
+ validate_cloudconfig_schema(cloudconfig, schema, strict=True)
except SchemaValidationError as e:
if annotate:
- print(annotated_cloudconfig_file(
- cloudconfig, content, e.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ cloudconfig, content, e.schema_errors
+ )
+ )
raise
@@ -244,29 +356,30 @@ def _schemapath_for_cloudconfig(config, original_content):
@param config: The yaml.loaded config dictionary of a cloud-config file.
@param original_content: The simple file content of the cloud-config file
"""
- # FIXME Doesn't handle multi-line lists or multi-line strings
- content_lines = original_content.decode().split('\n')
+ # TODO( handle multi-line lists or multi-line strings, inline dicts)
+ content_lines = original_content.decode().split("\n")
schema_line_numbers = {}
list_index = 0
- RE_YAML_INDENT = r'^(\s*)'
+ RE_YAML_INDENT = r"^(\s*)"
scopes = []
+ if not config:
+ return {} # No YAML config dict, no schemapaths to annotate
for line_number, line in enumerate(content_lines, 1):
indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
line = line.strip()
- if not line or line.startswith('#'):
+ if not line or line.startswith("#"):
continue
if scopes:
previous_depth, path_prefix = scopes[-1]
else:
previous_depth = -1
- path_prefix = ''
- if line.startswith('- '):
+ path_prefix = ""
+ if line.startswith("- "):
# Process list items adding a list_index to the path prefix
- previous_list_idx = '.%d' % (list_index - 1)
+ previous_list_idx = ".%d" % (list_index - 1)
if path_prefix and path_prefix.endswith(previous_list_idx):
- path_prefix = path_prefix[:-len(previous_list_idx)]
+ path_prefix = path_prefix[: -len(previous_list_idx)]
key = str(list_index)
- schema_line_numbers[key] = line_number
item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
item_indent += 1 # For the leading '-' character
previous_depth = indent_depth
@@ -276,53 +389,63 @@ def _schemapath_for_cloudconfig(config, original_content):
else:
# Process non-list lines setting value if present
list_index = 0
- key, value = line.split(':', 1)
- if path_prefix:
+ key, value = line.split(":", 1)
+ if path_prefix and indent_depth > previous_depth:
# Append any existing path_prefix for a fully-pathed key
- key = path_prefix + '.' + key
+ key = path_prefix + "." + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
if list_index > 0 and indent_depth == previous_depth:
- path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ path_prefix = ".".join(path_prefix.split(".")[:-1])
break
else:
previous_depth = -1
- path_prefix = ''
+ path_prefix = ""
scopes.append((indent_depth, key))
if value:
value = value.strip()
- if value.startswith('['):
- scopes.append((indent_depth + 2, key + '.0'))
+ if value.startswith("["):
+ scopes.append((indent_depth + 2, key + ".0"))
for inner_list_index in range(0, len(yaml.safe_load(value))):
- list_key = key + '.' + str(inner_list_index)
+ list_key = key + "." + str(inner_list_index)
schema_line_numbers[list_key] = line_number
schema_line_numbers[key] = line_number
return schema_line_numbers
-def _get_property_type(property_dict):
- """Return a string representing a property type from a given jsonschema."""
- property_type = property_dict.get('type', SCHEMA_UNDEFINED)
- if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'):
- property_type = [
- str(_YAML_MAP.get(k, k)) for k in property_dict['enum']]
+def _get_property_type(property_dict: dict) -> str:
+ """Return a string representing a property type from a given
+ jsonschema.
+ """
+ property_type = property_dict.get("type")
+ if property_type is None:
+ if property_dict.get("enum"):
+ property_type = [
+ str(_YAML_MAP.get(k, k)) for k in property_dict["enum"]
+ ]
+ elif property_dict.get("oneOf"):
+ property_type = [
+ subschema["type"]
+ for subschema in property_dict.get("oneOf")
+ if subschema.get("type")
+ ]
if isinstance(property_type, list):
- property_type = '/'.join(property_type)
- items = property_dict.get('items', {})
- sub_property_type = items.get('type', '')
+ property_type = "/".join(property_type)
+ items = property_dict.get("items", {})
+ sub_property_type = items.get("type", "")
# Collect each item type
- for sub_item in items.get('oneOf', {}):
+ for sub_item in items.get("oneOf", {}):
if sub_property_type:
- sub_property_type += '/'
- sub_property_type += '(' + _get_property_type(sub_item) + ')'
+ sub_property_type += "/"
+ sub_property_type += "(" + _get_property_type(sub_item) + ")"
if sub_property_type:
- return '{0} of {1}'.format(property_type, sub_property_type)
- return property_type
+ return "{0} of {1}".format(property_type, sub_property_type)
+ return property_type or "UNDEFINED"
-def _parse_description(description, prefix):
- """Parse description from the schema in a format that we can better
+def _parse_description(description, prefix) -> str:
+ """Parse description from the meta in a format that we can better
display in our docs. This parser does three things:
- Guarantee that a paragraph will be in a single line
@@ -330,125 +453,269 @@ def _parse_description(description, prefix):
the first paragraph
- Proper align lists of items
- @param description: The original description in the schema.
+ @param description: The original description in the meta.
@param prefix: The number of spaces used to align the current description
"""
list_paragraph = prefix * 3
description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(r"\n\n", r"\n\n{}".format(prefix), description)
description = re.sub(
- r"\n\n", r"\n\n{}".format(prefix), description)
- description = re.sub(
- r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description
+ )
return description
-def _get_property_doc(schema, prefix=' '):
+def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
"""Return restructured text describing the supported schema properties."""
- new_prefix = prefix + ' '
+ new_prefix = prefix + " "
properties = []
- for prop_key, prop_config in schema.get('properties', {}).items():
- # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
- description = prop_config.get('description', '')
-
- properties.append(SCHEMA_PROPERTY_TMPL.format(
- prefix=prefix,
- prop_name=prop_key,
- type=_get_property_type(prop_config),
- description=_parse_description(description, prefix)))
- items = prop_config.get('items')
- if items:
- if isinstance(items, list):
- for item in items:
- properties.append(
- _get_property_doc(item, prefix=new_prefix))
- elif isinstance(items, dict) and items.get('properties'):
- properties.append(SCHEMA_LIST_ITEM_TMPL.format(
- prefix=new_prefix, prop_name=prop_key))
- new_prefix += ' '
- properties.append(_get_property_doc(items, prefix=new_prefix))
- if 'properties' in prop_config:
+ property_keys = [
+ schema.get("properties", {}),
+ schema.get("patternProperties", {}),
+ ]
+
+ for props in property_keys:
+ for prop_key, prop_config in props.items():
+ if "$ref" in prop_config:
+ # Update the defined references in subschema for doc rendering
+ ref = defs[prop_config["$ref"].replace("#/$defs/", "")]
+ prop_config.update(ref)
+ # Define prop_name and description for SCHEMA_PROPERTY_TMPL
+ description = prop_config.get("description", "")
+
+ # Define prop_name and description for SCHEMA_PROPERTY_TMPL
+ label = prop_config.get("label", prop_key)
properties.append(
- _get_property_doc(prop_config, prefix=new_prefix))
- return '\n\n'.join(properties)
+ SCHEMA_PROPERTY_TMPL.format(
+ prefix=prefix,
+ prop_name=label,
+ description=_parse_description(description, prefix),
+ prop_type=_get_property_type(prop_config),
+ )
+ )
+ items = prop_config.get("items")
+ if items:
+ if isinstance(items, list):
+ for item in items:
+ properties.append(
+ _get_property_doc(
+ item, defs=defs, prefix=new_prefix
+ )
+ )
+ elif isinstance(items, dict) and (
+ items.get("properties") or items.get("patternProperties")
+ ):
+ properties.append(
+ SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=label
+ )
+ )
+ new_prefix += " "
+ properties.append(
+ _get_property_doc(items, defs=defs, prefix=new_prefix)
+ )
+ if (
+ "properties" in prop_config
+ or "patternProperties" in prop_config
+ ):
+ properties.append(
+ _get_property_doc(
+ prop_config, defs=defs, prefix=new_prefix
+ )
+ )
+ return "\n\n".join(properties)
-def _get_schema_examples(schema, prefix=''):
- """Return restructured text describing the schema examples if present."""
- examples = schema.get('examples')
+def _get_examples(meta: MetaSchema) -> str:
+ """Return restructured text describing the meta examples if present."""
+ examples = meta.get("examples")
if not examples:
- return ''
+ return ""
rst_content = SCHEMA_EXAMPLES_HEADER
for count, example in enumerate(examples):
# Python2.6 is missing textwrapper.indent
- lines = example.split('\n')
- indented_lines = [' {0}'.format(line) for line in lines]
+ lines = example.split("\n")
+ indented_lines = [" {0}".format(line) for line in lines]
if rst_content != SCHEMA_EXAMPLES_HEADER:
indented_lines.insert(
- 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1))
- rst_content += '\n'.join(indented_lines)
+ 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1)
+ )
+ rst_content += "\n".join(indented_lines)
return rst_content
-def get_schema_doc(schema):
- """Return reStructured text rendering the provided jsonschema.
+def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
+ """Return reStructured text rendering the provided metadata.
- @param schema: Dict of jsonschema to render.
- @raise KeyError: If schema lacks an expected key.
+ @param meta: Dict of metadata to render.
+ @param schema: Optional module schema, if absent, read global schema.
+ @raise KeyError: If metadata lacks an expected key.
"""
- schema_copy = deepcopy(schema)
- schema_copy['property_doc'] = _get_property_doc(schema)
- schema_copy['examples'] = _get_schema_examples(schema)
- schema_copy['distros'] = ', '.join(schema['distros'])
+
+ if schema is None:
+ schema = get_schema()
+ if not meta or not schema:
+ raise ValueError("Expected non-empty meta and schema")
+ keys = set(meta.keys())
+ expected = set(
+ {
+ "id",
+ "title",
+ "examples",
+ "frequency",
+ "distros",
+ "description",
+ "name",
+ }
+ )
+ error_message = ""
+ if expected - keys:
+ error_message = "Missing expected keys in module meta: {}".format(
+ expected - keys
+ )
+ elif keys - expected:
+ error_message = (
+ "Additional unexpected keys found in module meta: {}".format(
+ keys - expected
+ )
+ )
+ if error_message:
+ raise KeyError(error_message)
+
+ # cast away type annotation
+ meta_copy = dict(deepcopy(meta))
+ defs = schema.get("$defs", {})
+ if defs.get(meta["id"]):
+ schema = defs.get(meta["id"])
+ try:
+ meta_copy["property_doc"] = _get_property_doc(schema, defs=defs)
+ except AttributeError:
+ LOG.warning("Unable to render property_doc due to invalid schema")
+ meta_copy["property_doc"] = ""
+ meta_copy["examples"] = _get_examples(meta)
+ meta_copy["distros"] = ", ".join(meta["distros"])
# Need an underbar of the same length as the name
- schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name'])
- return SCHEMA_DOC_TMPL.format(**schema_copy)
+ meta_copy["title_underbar"] = re.sub(r".", "-", meta["name"])
+ template = SCHEMA_DOC_TMPL.format(**meta_copy)
+ return template
-FULL_SCHEMA = None
+def get_modules() -> dict:
+ configs_dir = os.path.dirname(os.path.abspath(__file__))
+ return find_modules(configs_dir)
-def get_schema():
- """Return jsonschema coalesced from all cc_* cloud-config module."""
- global FULL_SCHEMA
- if FULL_SCHEMA:
- return FULL_SCHEMA
- full_schema = {
- '$schema': 'http://json-schema.org/draft-04/schema#',
- 'id': 'cloud-config-schema', 'allOf': []}
+def load_doc(requested_modules: list) -> str:
+ """Load module docstrings
- configs_dir = os.path.dirname(os.path.abspath(__file__))
- potential_handlers = find_modules(configs_dir)
- for (_fname, mod_name) in potential_handlers.items():
- mod_locs, _looked_locs = importer.find_module(
- mod_name, ['cloudinit.config'], ['schema'])
+ Docstrings are generated on module load. Reduce, reuse, recycle.
+ """
+ docs = ""
+ all_modules = list(get_modules().values()) + ["all"]
+ invalid_docs = set(requested_modules).difference(set(all_modules))
+ if invalid_docs:
+ error(
+ "Invalid --docs value {}. Must be one of: {}".format(
+ list(invalid_docs),
+ ", ".join(all_modules),
+ )
+ )
+ for mod_name in all_modules:
+ if "all" in requested_modules or mod_name in requested_modules:
+ (mod_locs, _) = importer.find_module(
+ mod_name, ["cloudinit.config"], ["meta"]
+ )
+ if mod_locs:
+ mod = importer.import_module(mod_locs[0])
+ docs += mod.__doc__ or ""
+ return docs
+
+
+def get_schema() -> dict:
+ """Return jsonschema coalesced from all cc_* cloud-config modules."""
+ schema_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "cloud-init-schema.json"
+ )
+ full_schema = None
+ try:
+ full_schema = json.loads(load_file(schema_file))
+ except Exception as e:
+ LOG.warning("Cannot parse JSON schema file %s. %s", schema_file, e)
+ if not full_schema:
+ LOG.warning(
+ "No base JSON schema files found at %s."
+ " Setting default empty schema",
+ schema_file,
+ )
+ full_schema = {
+ "$defs": {},
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "allOf": [],
+ }
+
+ # TODO( Drop the get_modules loop when all legacy cc_* schema migrates )
+ # Supplement base_schema with any legacy modules which still contain a
+ # "schema" attribute. Legacy cc_* modules will be migrated to use the
+ # store module schema in the composite cloud-init-schema-<version>.json
+ # and will drop "schema" at that point.
+ for (_, mod_name) in get_modules().items():
+ # All cc_* modules need a "meta" attribute to represent schema defs
+ (mod_locs, _) = importer.find_module(
+ mod_name, ["cloudinit.config"], ["schema"]
+ )
if mod_locs:
mod = importer.import_module(mod_locs[0])
- full_schema['allOf'].append(mod.schema)
- FULL_SCHEMA = full_schema
+ full_schema["allOf"].append(mod.schema)
return full_schema
-def error(message):
- print(message, file=sys.stderr)
- sys.exit(1)
+def get_meta() -> dict:
+ """Return metadata coalesced from all cc_* cloud-config module."""
+ full_meta = dict()
+ for (_, mod_name) in get_modules().items():
+ mod_locs, _ = importer.find_module(
+ mod_name, ["cloudinit.config"], ["meta"]
+ )
+ if mod_locs:
+ mod = importer.import_module(mod_locs[0])
+ full_meta[mod.meta["id"]] = mod.meta
+ return full_meta
def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudconfig-schema',
- description='Validate cloud-config files or document schema')
- parser.add_argument('-c', '--config-file',
- help='Path of the cloud-config yaml file to validate')
- parser.add_argument('--system', action='store_true', default=False,
- help='Validate the system cloud-config userdata')
- parser.add_argument('-d', '--docs', nargs='+',
- help=('Print schema module docs. Choices: all or'
- ' space-delimited cc_names.'))
- parser.add_argument('--annotate', action="store_true", default=False,
- help='Annotate existing cloud-config file with errors')
+ prog="cloudconfig-schema",
+ description="Validate cloud-config files or document schema",
+ )
+ parser.add_argument(
+ "-c",
+ "--config-file",
+ help="Path of the cloud-config yaml file to validate",
+ )
+ parser.add_argument(
+ "--system",
+ action="store_true",
+ default=False,
+ help="Validate the system cloud-config userdata",
+ )
+ parser.add_argument(
+ "-d",
+ "--docs",
+ nargs="+",
+ help=(
+ "Print schema module docs. Choices: all or"
+ " space-delimited cc_names."
+ ),
+ )
+ parser.add_argument(
+ "--annotate",
+ action="store_true",
+ default=False,
+ help="Annotate existing cloud-config file with errors",
+ )
return parser
@@ -456,12 +723,15 @@ def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
exclusive_args = [args.config_file, args.docs, args.system]
if len([arg for arg in exclusive_args if arg]) != 1:
- error('Expected one of --config-file, --system or --docs arguments')
+ error("Expected one of --config-file, --system or --docs arguments")
+ if args.annotate and args.docs:
+ error("Invalid flag combination. Cannot use --annotate with --docs")
full_schema = get_schema()
if args.config_file or args.system:
try:
validate_cloudconfig_file(
- args.config_file, full_schema, args.annotate)
+ args.config_file, full_schema, args.annotate
+ )
except SchemaValidationError as e:
if not args.annotate:
error(str(e))
@@ -474,25 +744,17 @@ def handle_schema_args(name, args):
cfg_name = args.config_file
print("Valid cloud-config:", cfg_name)
elif args.docs:
- schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
- schema_ids += ['all']
- invalid_docs = set(args.docs).difference(set(schema_ids))
- if invalid_docs:
- error('Invalid --docs value {0}. Must be one of: {1}'.format(
- list(invalid_docs), ', '.join(schema_ids)))
- for subschema in full_schema['allOf']:
- if 'all' in args.docs or subschema['id'] in args.docs:
- print(get_schema_doc(subschema))
+ print(load_doc(args.docs))
def main():
"""Tool to validate schema of a cloud-config file or print schema docs."""
parser = get_parser()
- handle_schema_args('cloudconfig-schema', parser.parse_args())
+ handle_schema_args("cloudconfig-schema", parser.parse_args())
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py
deleted file mode 100644
index 2a6bb10b..00000000
--- a/cloudinit/config/tests/test_apt_pipelining.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests cc_apt_pipelining handler"""
-
-import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-
-class TestAptPipelining(CiTestCase):
-
- @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
- def test_not_disabled_by_default(self, m_write_file):
- """ensure that default behaviour is to not disable pipelining"""
- cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None)
- self.assertEqual(0, m_write_file.call_count)
-
- @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
- def test_false_disables_pipelining(self, m_write_file):
- """ensure that pipelining can be disabled with correct config"""
- cc_apt_pipelining.handle(
- 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None)
- self.assertEqual(1, m_write_file.call_count)
- args, _ = m_write_file.call_args
- self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0])
- self.assertIn('Pipeline-Depth "0"', args[1])
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
deleted file mode 100644
index b00f2083..00000000
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests cc_disable_ec2_metadata handler"""
-
-import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-import logging
-
-LOG = logging.getLogger(__name__)
-
-DISABLE_CFG = {'disable_ec2_metadata': 'true'}
-
-
-class TestEC2MetadataRoute(CiTestCase):
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
- def test_disable_ifconfig(self, m_subp, m_which):
- """Set the route if ifconfig command is available"""
- m_which.side_effect = lambda x: x if x == 'ifconfig' else None
- ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
- m_subp.assert_called_with(
- ['route', 'add', '-host', '169.254.169.254', 'reject'],
- capture=False)
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
- def test_disable_ip(self, m_subp, m_which):
- """Set the route if ip command is available"""
- m_which.side_effect = lambda x: x if x == 'ip' else None
- ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
- m_subp.assert_called_with(
- ['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
- capture=False)
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
- def test_disable_no_tool(self, m_subp, m_which):
- """Log error when neither route nor ip commands are available"""
- m_which.return_value = None # Find neither ifconfig nor ip
- ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
- self.assertEqual(
- [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list)
- m_subp.assert_not_called()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py
deleted file mode 100644
index 46ba99b2..00000000
--- a/cloudinit/config/tests/test_final_message.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-import logging
-from unittest import mock
-
-import pytest
-
-from cloudinit.config.cc_final_message import handle
-
-
-class TestHandle:
- # TODO: Expand these tests to cover full functionality; currently they only
- # cover the logic around how the boot-finished file is written (and not its
- # contents).
-
- @pytest.mark.parametrize(
- "instance_dir_exists,file_is_written,expected_log_substring",
- [
- (True, True, None),
- (False, False, "Failed to write boot finished file "),
- ],
- )
- def test_boot_finished_written(
- self,
- instance_dir_exists,
- file_is_written,
- expected_log_substring,
- caplog,
- tmpdir,
- ):
- instance_dir = tmpdir.join("var/lib/cloud/instance")
- if instance_dir_exists:
- instance_dir.ensure_dir()
- boot_finished = instance_dir.join("boot-finished")
-
- m_cloud = mock.Mock(
- paths=mock.Mock(boot_finished=boot_finished.strpath)
- )
-
- handle(None, {}, m_cloud, logging.getLogger(), [])
-
- # We should not change the status of the instance directory
- assert instance_dir_exists == instance_dir.exists()
- assert file_is_written == boot_finished.exists()
-
- if expected_log_substring:
- assert expected_log_substring in caplog.text
diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py
deleted file mode 100644
index 99c05bb5..00000000
--- a/cloudinit/config/tests/test_grub_dpkg.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import pytest
-
-from unittest import mock
-from logging import Logger
-from cloudinit.subp import ProcessExecutionError
-from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
-
-
-class TestFetchIdevs:
- """Tests cc_grub_dpkg.fetch_idevs()"""
-
- # Note: udevadm info returns devices in a large single line string
- @pytest.mark.parametrize(
- "grub_output,path_exists,expected_log_call,udevadm_output"
- ",expected_idevs",
- [
- # Inside a container, grub not installed
- (
- ProcessExecutionError(reason=FileNotFoundError()),
- False,
- mock.call("'grub-probe' not found in $PATH"),
- '',
- '',
- ),
- # Inside a container, grub installed
- (
- ProcessExecutionError(stderr="failed to get canonical path"),
- False,
- mock.call("grub-probe 'failed to get canonical path'"),
- '',
- '',
- ),
- # KVM Instance
- (
- ['/dev/vda'],
- True,
- None,
- (
- '/dev/disk/by-path/pci-0000:00:00.0 ',
- '/dev/disk/by-path/virtio-pci-0000:00:00.0 '
- ),
- '/dev/vda',
- ),
- # Xen Instance
- (
- ['/dev/xvda'],
- True,
- None,
- '',
- '/dev/xvda',
- ),
- # NVMe Hardware Instance
- (
- ['/dev/nvme1n1'],
- True,
- None,
- (
- '/dev/disk/by-id/nvme-Company_hash000 ',
- '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ',
- '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 '
- ),
- '/dev/disk/by-id/nvme-Company_hash000',
- ),
- # SCSI Hardware Instance
- (
- ['/dev/sda'],
- True,
- None,
- (
- '/dev/disk/by-id/company-user-1 ',
- '/dev/disk/by-id/scsi-0Company_user-1 ',
- '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 '
- ),
- '/dev/disk/by-id/company-user-1',
- ),
- ],
- )
- @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
- @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
- @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
- def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output,
- path_exists, expected_log_call, udevadm_output,
- expected_idevs):
- """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
- m_subp.side_effect = [
- grub_output,
- ["".join(udevadm_output)]
- ]
- m_exists.return_value = path_exists
- log = mock.Mock(spec=Logger)
- idevs = fetch_idevs(log)
- assert expected_idevs == idevs
- if expected_log_call is not None:
- assert expected_log_call in log.debug.call_args_list
-
-
-class TestHandle:
- """Tests cc_grub_dpkg.handle()"""
-
- @pytest.mark.parametrize(
- "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
- [
- (
- # No configuration
- None,
- None,
- '/dev/disk/by-id/nvme-Company_hash000',
- (
- "Setting grub debconf-set-selections with ",
- "'/dev/disk/by-id/nvme-Company_hash000','false'"
- ),
- ),
- (
- # idevs set, idevs_empty unset
- '/dev/sda',
- None,
- '/dev/sda',
- (
- "Setting grub debconf-set-selections with ",
- "'/dev/sda','false'"
- ),
- ),
- (
- # idevs unset, idevs_empty set
- None,
- 'true',
- '/dev/xvda',
- (
- "Setting grub debconf-set-selections with ",
- "'/dev/xvda','true'"
- ),
- ),
- (
- # idevs set, idevs_empty set
- '/dev/vda',
- 'false',
- '/dev/disk/by-id/company-user-1',
- (
- "Setting grub debconf-set-selections with ",
- "'/dev/vda','false'"
- ),
- ),
- (
- # idevs set, idevs_empty set
- # Respect what the user defines, even if its logically wrong
- '/dev/nvme0n1',
- 'true',
- '',
- (
- "Setting grub debconf-set-selections with ",
- "'/dev/nvme0n1','true'"
- ),
- )
- ],
- )
- @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
- @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
- @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
- @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
- def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs,
- cfg_idevs, cfg_idevs_empty, fetch_idevs_output,
- expected_log_output):
- """Test setting of correct debconf database entries"""
- m_get_cfg_str.side_effect = [
- cfg_idevs,
- cfg_idevs_empty
- ]
- m_fetch_idevs.return_value = fetch_idevs_output
- log = mock.Mock(spec=Logger)
- handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
- log.debug.assert_called_with("".join(expected_log_output))
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
deleted file mode 100644
index 56510fd6..00000000
--- a/cloudinit/config/tests/test_mounts.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-from unittest import mock
-
-import pytest
-
-from cloudinit.config.cc_mounts import create_swapfile
-from cloudinit.subp import ProcessExecutionError
-
-
-M_PATH = 'cloudinit.config.cc_mounts.'
-
-
-class TestCreateSwapfile:
-
- @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
- @mock.patch(M_PATH + 'util.get_mount_info')
- @mock.patch(M_PATH + 'subp.subp')
- def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
- swap_file = tmpdir.join("swap-file")
- fname = str(swap_file)
-
- # Some of the calls to subp.subp should create the swap file; this
- # roughly approximates that
- m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
-
- m_get_mount_info.return_value = (mock.ANY, fstype)
-
- create_swapfile(fname, '')
- assert mock.call(['mkswap', fname]) in m_subp.call_args_list
-
- @mock.patch(M_PATH + "util.get_mount_info")
- @mock.patch(M_PATH + "subp.subp")
- def test_fallback_from_fallocate_to_dd(
- self, m_subp, m_get_mount_info, caplog, tmpdir
- ):
- swap_file = tmpdir.join("swap-file")
- fname = str(swap_file)
-
- def subp_side_effect(cmd, *args, **kwargs):
- # Mock fallocate failing, to initiate fallback
- if cmd[0] == "fallocate":
- raise ProcessExecutionError()
-
- m_subp.side_effect = subp_side_effect
- # Use ext4 so both fallocate and dd are valid swap creation methods
- m_get_mount_info.return_value = (mock.ANY, "ext4")
-
- create_swapfile(fname, "")
-
- cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list]
- assert "fallocate" in cmds, "fallocate was not called"
- assert "dd" in cmds, "fallocate failure did not fallback to dd"
-
- assert cmds.index("dd") > cmds.index(
- "fallocate"
- ), "dd ran before fallocate"
-
- assert mock.call(["mkswap", fname]) in m_subp.call_args_list
-
- msg = "fallocate swap creation failed, will attempt with dd"
- assert msg in caplog.text
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
deleted file mode 100644
index 6546a0b5..00000000
--- a/cloudinit/config/tests/test_resolv_conf.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from unittest import mock
-
-import pytest
-
-from cloudinit.config.cc_resolv_conf import generate_resolv_conf
-
-
-EXPECTED_HEADER = """\
-# Your system has been configured with 'manage-resolv-conf' set to true.
-# As a result, cloud-init has written this file with configuration data
-# that it has been provided. Cloud-init, by default, will write this file
-# a single time (PER_ONCE).
-#\n\n"""
-
-
-class TestGenerateResolvConf:
- @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
- def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
- generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
-
- assert [
- mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
- ] == m_render_to_file.call_args_list
-
- @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
- def test_target_fname_is_used_if_passed(self, m_render_to_file):
- generate_resolv_conf(
- "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
- )
-
- assert [
- mock.call(mock.ANY, "/use/this/path", mock.ANY)
- ] == m_render_to_file.call_args_list
-
- # Patch in templater so we can assert on the actual generated content
- @mock.patch("cloudinit.templater.util.write_file")
- # Parameterise with the value to be passed to generate_resolv_conf as the
- # params parameter, and the expected line after the header as
- # expected_extra_line.
- @pytest.mark.parametrize(
- "params,expected_extra_line",
- [
- # No options
- ({}, None),
- # Just a true flag
- ({"options": {"foo": True}}, "options foo"),
- # Just a false flag
- ({"options": {"foo": False}}, None),
- # Just an option
- ({"options": {"foo": "some_value"}}, "options foo:some_value"),
- # A true flag and an option
- (
- {"options": {"foo": "some_value", "bar": True}},
- "options bar foo:some_value",
- ),
- # Two options
- (
- {"options": {"foo": "some_value", "bar": "other_value"}},
- "options bar:other_value foo:some_value",
- ),
- # Everything
- (
- {
- "options": {
- "foo": "some_value",
- "bar": "other_value",
- "baz": False,
- "spam": True,
- }
- },
- "options spam bar:other_value foo:some_value",
- ),
- ],
- )
- def test_flags_and_options(
- self, m_write_file, params, expected_extra_line
- ):
- generate_resolv_conf("templates/resolv.conf.tmpl", params)
-
- expected_content = EXPECTED_HEADER
- if expected_extra_line is not None:
- # If we have any extra lines, expect a trailing newline
- expected_content += "\n".join([expected_extra_line, ""])
- assert [
- mock.call(mock.ANY, expected_content, mode=mock.ANY)
- ] == m_write_file.call_args_list
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
deleted file mode 100644
index daa1ef51..00000000
--- a/cloudinit/config/tests/test_set_passwords.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from unittest import mock
-
-from cloudinit.config import cc_set_passwords as setpass
-from cloudinit.tests.helpers import CiTestCase
-from cloudinit import util
-
-MODPATH = "cloudinit.config.cc_set_passwords."
-
-
-class TestHandleSshPwauth(CiTestCase):
- """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
-
- with_logs = True
-
- @mock.patch(MODPATH + "subp.subp")
- def test_unknown_value_logs_warning(self, m_subp):
- setpass.handle_ssh_pwauth("floo")
- self.assertIn("Unrecognized value: ssh_pwauth=floo",
- self.logs.getvalue())
- m_subp.assert_not_called()
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
- """If systemctl in service cmd: systemctl restart name."""
- setpass.handle_ssh_pwauth(
- True, service_cmd=["systemctl"], service_name="myssh")
- self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
- m_subp.call_args)
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
- """If systemctl in service cmd: systemctl restart name."""
- setpass.handle_ssh_pwauth(
- True, service_cmd=["service"], service_name="myssh")
- self.assertEqual(mock.call(["service", "myssh", "restart"]),
- m_subp.call_args)
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "subp.subp")
- def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
- """If config is not updated, then no system restart should be done."""
- setpass.handle_ssh_pwauth(True)
- m_subp.assert_not_called()
- self.assertIn("No need to restart SSH", self.logs.getvalue())
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
- """If 'unchanged', then no updates to config and no restart."""
- setpass.handle_ssh_pwauth(
- "unchanged", service_cmd=["systemctl"], service_name="myssh")
- m_update_ssh_config.assert_not_called()
- m_subp.assert_not_called()
-
- @mock.patch(MODPATH + "subp.subp")
- def test_valid_change_values(self, m_subp):
- """If value is a valid changen value, then update should be called."""
- upname = MODPATH + "update_ssh_config"
- optname = "PasswordAuthentication"
- for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
- optval = "yes" if value in util.TRUE_STRINGS else "no"
- with mock.patch(upname, return_value=False) as m_update:
- setpass.handle_ssh_pwauth(value)
- m_update.assert_called_with({optname: optval})
- m_subp.assert_not_called()
-
-
-class TestSetPasswordsHandle(CiTestCase):
- """Test cc_set_passwords.handle"""
-
- with_logs = True
-
- def setUp(self):
- super(TestSetPasswordsHandle, self).setUp()
- self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err')
-
- def test_handle_on_empty_config(self, *args):
- """handle logs that no password has changed when config is empty."""
- cloud = self.tmp_cloud(distro='ubuntu')
- setpass.handle(
- 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[])
- self.assertEqual(
- "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
- 'ssh_pwauth=None\n',
- self.logs.getvalue())
-
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
- """handle parses command password hashes."""
- cloud = self.tmp_cloud(distro='ubuntu')
- valid_hashed_pwds = [
- 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/'
- 'Dlew1Va',
- 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
- 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
- cfg = {'chpasswd': {'list': valid_hashed_pwds}}
- with mock.patch(MODPATH + 'subp.subp') as m_subp:
- setpass.handle(
- 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
- self.assertIn(
- 'DEBUG: Handling input for chpasswd as list.',
- self.logs.getvalue())
- self.assertIn(
- "DEBUG: Setting hashed password for ['root', 'ubuntu']",
- self.logs.getvalue())
- self.assertEqual(
- [mock.call(['chpasswd', '-e'],
- '\n'.join(valid_hashed_pwds) + '\n')],
- m_subp.call_args_list)
-
- @mock.patch(MODPATH + "util.is_BSD")
- @mock.patch(MODPATH + "subp.subp")
- def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_bsd):
- """BSD don't use chpasswd"""
- m_is_bsd.return_value = True
- cloud = self.tmp_cloud(distro='freebsd')
- valid_pwds = ['ubuntu:passw0rd']
- cfg = {'chpasswd': {'list': valid_pwds}}
- setpass.handle(
- 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
- self.assertEqual([
- mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd',
- logstring="chpasswd for ubuntu"),
- mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
- m_subp.call_args_list)
-
- @mock.patch(MODPATH + "util.is_BSD")
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_bsd):
- """handle parses command set random passwords."""
- m_is_bsd.return_value = False
- cloud = self.tmp_cloud(distro='ubuntu')
- valid_random_pwds = [
- 'root:R',
- 'ubuntu:RANDOM']
- cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
- with mock.patch(MODPATH + 'subp.subp') as m_subp:
- setpass.handle(
- 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
- self.assertIn(
- 'DEBUG: Handling input for chpasswd as list.',
- self.logs.getvalue())
- self.assertNotEqual(
- [mock.call(['chpasswd'],
- '\n'.join(valid_random_pwds) + '\n')],
- m_subp.call_args_list)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
deleted file mode 100644
index 6d4c014a..00000000
--- a/cloudinit/config/tests/test_snap.py
+++ /dev/null
@@ -1,564 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import re
-from io import StringIO
-
-from cloudinit.config.cc_snap import (
- ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse,
- run_commands, schema)
-from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import util
-from cloudinit.tests.helpers import (
- CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema)
-
-
-SYSTEM_USER_ASSERTION = """\
-type: system-user
-authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
-brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
-email: foo@bar.com
-password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt
-series:
-- 16
-since: 2016-09-10T16:34:00+03:00
-until: 2017-11-10T16:34:00+03:00
-username: baz
-sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj
-
-AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP
-Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI
-zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF
-s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj
-+to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP
-Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS
-d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q
-BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H
-f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V
-v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q=="""
-
-ACCOUNT_ASSERTION = """\
-type: account-key
-authority-id: canonical
-revision: 2
-public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0
-account-id: canonical
-name: store
-since: 2016-04-01T00:00:00.0Z
-body-length: 717
-sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH
-
-AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j
-qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482
-vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ
-UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK
-Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG
-o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl
-VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9
-2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an
-Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc
-vUvV7RjVzv17ut0AEQEAAQ==
-
-AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM
-WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b
-nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL
-3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL
-eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY
-inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1
-rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+
-rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE
-aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ
-6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO
-haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF
-yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9
-HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi
-skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK
-CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde
-ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF
-qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR
-IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t
-oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k"""
-
-
-class FakeCloud(object):
- def __init__(self, distro):
- self.distro = distro
-
-
-class TestAddAssertions(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestAddAssertions, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
- def test_add_assertions_on_empty_list(self, m_subp):
- """When provided with an empty list, add_assertions does nothing."""
- add_assertions([])
- self.assertEqual('', self.logs.getvalue())
- m_subp.assert_not_called()
-
- def test_add_assertions_on_non_list_or_dict(self):
- """When provided an invalid type, add_assertions raises an error."""
- with self.assertRaises(TypeError) as context_manager:
- add_assertions(assertions="I'm Not Valid")
- self.assertEqual(
- "assertion parameter was not a list or dict: I'm Not Valid",
- str(context_manager.exception))
-
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
- def test_add_assertions_adds_assertions_as_list(self, m_subp):
- """When provided with a list, add_assertions adds all assertions."""
- self.assertEqual(
- ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions')
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]
- wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- add_assertions, assertions)
- self.assertIn(
- 'Importing user-provided snap assertions', self.logs.getvalue())
- self.assertIn(
- 'sertions', self.logs.getvalue())
- self.assertEqual(
- [mock.call(['snap', 'ack', assert_file], capture=True)],
- m_subp.call_args_list)
- compare_file = self.tmp_path('comparison', dir=self.tmp)
- util.write_file(compare_file, '\n'.join(assertions).encode('utf-8'))
- self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file))
-
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
- def test_add_assertions_adds_assertions_as_dict(self, m_subp):
- """When provided with a dict, add_assertions adds all assertions."""
- self.assertEqual(
- ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions')
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION}
- wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- add_assertions, assertions)
- self.assertIn(
- 'Importing user-provided snap assertions', self.logs.getvalue())
- self.assertIn(
- "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv",
- self.logs.getvalue())
- self.assertIn(
- "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic",
- self.logs.getvalue())
- self.assertEqual(
- [mock.call(['snap', 'ack', assert_file], capture=True)],
- m_subp.call_args_list)
- compare_file = self.tmp_path('comparison', dir=self.tmp)
- combined = '\n'.join(assertions.values())
- util.write_file(compare_file, combined.encode('utf-8'))
- self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file))
-
-
-class TestRunCommands(CiTestCase):
-
- with_logs = True
- allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
-
- def setUp(self):
- super(TestRunCommands, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
- def test_run_commands_on_empty_list(self, m_subp):
- """When provided with an empty list, run_commands does nothing."""
- run_commands([])
- self.assertEqual('', self.logs.getvalue())
- m_subp.assert_not_called()
-
- def test_run_commands_on_non_list_or_dict(self):
- """When provided an invalid type, run_commands raises an error."""
- with self.assertRaises(TypeError) as context_manager:
- run_commands(commands="I'm Not Valid")
- self.assertEqual(
- "commands parameter was not a list or dict: I'm Not Valid",
- str(context_manager.exception))
-
- def test_run_command_logs_commands_and_exit_codes_to_stderr(self):
- """All exit codes are logged to stderr."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'bogus command'
- cmd3 = 'echo "MOM" >> %s' % outfile
- commands = [cmd1, cmd2, cmd3]
-
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
- with mock.patch(mock_path, new_callable=StringIO) as m_stderr:
- with self.assertRaises(RuntimeError) as context_manager:
- run_commands(commands=commands)
-
- self.assertIsNotNone(
- re.search(r'bogus: (command )?not found',
- str(context_manager.exception)),
- msg='Expected bogus command not found')
- expected_stderr_log = '\n'.join([
- 'Begin run command: {cmd}'.format(cmd=cmd1),
- 'End run command: exit(0)',
- 'Begin run command: {cmd}'.format(cmd=cmd2),
- 'ERROR: End run command: exit(127)',
- 'Begin run command: {cmd}'.format(cmd=cmd3),
- 'End run command: exit(0)\n'])
- self.assertEqual(expected_stderr_log, m_stderr.getvalue())
-
- def test_run_command_as_lists(self):
- """When commands are specified as a list, run them in order."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'echo "MOM" >> %s' % outfile
- commands = [cmd1, cmd2]
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
- with mock.patch(mock_path, new_callable=StringIO):
- run_commands(commands=commands)
-
- self.assertIn(
- 'DEBUG: Running user-provided snap commands',
- self.logs.getvalue())
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
- self.assertIn(
- 'WARNING: Non-snap commands in snap config:', self.logs.getvalue())
-
- def test_run_command_dict_sorted_as_command_script(self):
- """When commands are a dict, sort them and run."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'echo "MOM" >> %s' % outfile
- commands = {'02': cmd1, '01': cmd2}
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
- with mock.patch(mock_path, new_callable=StringIO):
- run_commands(commands=commands)
-
- expected_messages = [
- 'DEBUG: Running user-provided snap commands']
- for message in expected_messages:
- self.assertIn(message, self.logs.getvalue())
- self.assertEqual('MOM\nHI\n', util.load_file(outfile))
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
-
- with_logs = True
- schema = schema
-
- def test_schema_warns_on_snap_not_as_dict(self):
- """If the snap configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({'snap': 'wrong type'}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nsnap: 'wrong type' is not of type"
- " 'object'\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_schema_disallows_unknown_keys(self, _):
- """Unknown keys in the snap configuration emit warnings."""
- validate_cloudconfig_schema(
- {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema)
- self.assertIn(
- 'WARNING: Invalid config:\nsnap: Additional properties are not'
- " allowed ('invalid-key' was unexpected)",
- self.logs.getvalue())
-
- def test_warn_schema_requires_either_commands_or_assertions(self):
- """Warn when snap configuration lacks both commands and assertions."""
- validate_cloudconfig_schema(
- {'snap': {}}, schema)
- self.assertIn(
- 'WARNING: Invalid config:\nsnap: {} does not have enough'
- ' properties',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_warn_schema_commands_is_not_list_or_dict(self, _):
- """Warn when snap:commands config is not a list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'commands': 'broken'}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type"
- " 'object', 'array'\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_warn_schema_when_commands_is_empty(self, _):
- """Emit warnings when snap:commands is an empty list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'commands': []}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {}}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nsnap.commands: [] is too short\n"
- "WARNING: Invalid config:\nsnap.commands: {} does not have enough"
- " properties\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_schema_when_commands_are_list_or_dict(self, _):
- """No warnings when snap:commands are either a list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'commands': ['valid']}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {'01': 'also valid'}}}, schema)
- self.assertEqual('', self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_schema_when_commands_values_are_invalid_type(self, _):
- """Warnings when snap:commands values are invalid type (e.g. int)"""
- validate_cloudconfig_schema(
- {'snap': {'commands': [123]}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {'01': 123}}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\n"
- "snap.commands.0: 123 is not valid under any of the given"
- " schemas\n"
- "WARNING: Invalid config:\n"
- "snap.commands.01: 123 is not valid under any of the given"
- " schemas\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_schema_when_commands_list_values_are_invalid_type(self, _):
- """Warnings when snap:commands list values are wrong type (e.g. int)"""
- validate_cloudconfig_schema(
- {'snap': {'commands': [["snap", "install", 123]]}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\n"
- "snap.commands.0: ['snap', 'install', 123] is not valid under any"
- " of the given schemas\n",
- "WARNING: Invalid config:\n"
- "snap.commands.0: ['snap', 'install', 123] is not valid under any"
- " of the given schemas\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- def test_schema_when_assertions_values_are_invalid_type(self, _):
- """Warnings when snap:assertions values are invalid type (e.g. int)"""
- validate_cloudconfig_schema(
- {'snap': {'assertions': [123]}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'assertions': {'01': 123}}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\n"
- "snap.assertions.0: 123 is not of type 'string'\n"
- "WARNING: Invalid config:\n"
- "snap.assertions.01: 123 is not of type 'string'\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- def test_warn_schema_assertions_is_not_list_or_dict(self, _):
- """Warn when snap:assertions config is not a list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'assertions': 'broken'}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of"
- " type 'object', 'array'\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- def test_warn_schema_when_assertions_is_empty(self, _):
- """Emit warnings when snap:assertions is an empty list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'assertions': []}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'assertions': {}}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nsnap.assertions: [] is too short\n"
- "WARNING: Invalid config:\nsnap.assertions: {} does not have"
- " enough properties\n",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- def test_schema_when_assertions_are_list_or_dict(self, _):
- """No warnings when snap:assertions are a list or dict."""
- validate_cloudconfig_schema(
- {'snap': {'assertions': ['valid']}}, schema)
- validate_cloudconfig_schema(
- {'snap': {'assertions': {'01': 'also valid'}}}, schema)
- self.assertEqual('', self.logs.getvalue())
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo", "bye"]]},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- {'commands': ["echo bye", "echo bye"]},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_dict_array(self):
- """Duplicated commands dict/array entries are allowed."""
- self.assertSchemaValid(
- {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_dict_string(self):
- """Duplicated commands dict/string entries are allowed."""
- self.assertSchemaValid(
- {'commands': {'00': "echo bye", '01': "echo bye"}},
- "command entries can be duplicate.")
-
-
-class TestHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHandle, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema')
- def test_handle_no_config(self, m_schema, m_add, m_run):
- """When no snap-related configuration is provided, nothing happens."""
- cfg = {}
- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertIn(
- "DEBUG: Skipping module named snap, no 'snap' key in config",
- self.logs.getvalue())
- m_schema.assert_not_called()
- m_add.assert_not_called()
- m_run.assert_not_called()
-
- @mock.patch('cloudinit.config.cc_snap.run_commands')
- @mock.patch('cloudinit.config.cc_snap.add_assertions')
- @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse')
- def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add,
- m_run):
- """When squashfuse_in_container is unset, don't attempt to install."""
- handle(
- 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None)
- handle(
- 'snap', cfg={'snap': {'squashfuse_in_container': None}},
- cloud=None, log=self.logger, args=None)
- handle(
- 'snap', cfg={'snap': {'squashfuse_in_container': False}},
- cloud=None, log=self.logger, args=None)
- self.assertEqual([], m_squash.call_args_list) # No calls
- # snap configuration missing assertions and commands will default to []
- self.assertIn(mock.call([]), m_add.call_args_list)
- self.assertIn(mock.call([]), m_run.call_args_list)
-
- @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse')
- def test_handle_tries_to_install_squashfuse(self, m_squash):
- """If squashfuse_in_container is True, try installing squashfuse."""
- cfg = {'snap': {'squashfuse_in_container': True}}
- mycloud = FakeCloud(None)
- handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
- self.assertEqual(
- [mock.call(mycloud)], m_squash.call_args_list)
-
- def test_handle_runs_commands_provided(self):
- """If commands are specified as a list, run them."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cfg = {
- 'snap': {'commands': ['echo "HI" >> %s' % outfile,
- 'echo "MOM" >> %s' % outfile]}}
- mock_path = 'cloudinit.config.cc_snap.sys.stderr'
- with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
- with mock.patch(mock_path, new_callable=StringIO):
- handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None)
-
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
-
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
- def test_handle_adds_assertions(self, m_subp):
- """Any configured snap assertions are provided to add_assertions."""
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- compare_file = self.tmp_path('comparison', dir=self.tmp)
- cfg = {
- 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}}
- wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None)
- content = '\n'.join(cfg['snap']['assertions'])
- util.write_file(compare_file, content.encode('utf-8'))
- self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file))
-
- @mock.patch('cloudinit.config.cc_snap.subp.subp')
- @skipUnlessJsonSchema()
- def test_handle_validates_schema(self, m_subp):
- """Any provided configuration is runs validate_cloudconfig_schema."""
- assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
- cfg = {'snap': {'invalid': ''}} # Generates schema warning
- wrap_and_call(
- 'cloudinit.config.cc_snap',
- {'ASSERTIONS_FILE': {'new': assert_file}},
- handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- "WARNING: Invalid config:\nsnap: Additional properties are not"
- " allowed ('invalid' was unexpected)\n",
- self.logs.getvalue())
-
-
-class TestMaybeInstallSquashFuse(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestMaybeInstallSquashFuse, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
- def test_maybe_install_squashfuse_skips_non_containers(self, m_container):
- """maybe_install_squashfuse does nothing when not on a container."""
- m_container.return_value = False
- maybe_install_squashfuse(cloud=FakeCloud(None))
- self.assertEqual([mock.call()], m_container.call_args_list)
- self.assertEqual('', self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
- def test_maybe_install_squashfuse_raises_install_errors(self, m_container):
- """maybe_install_squashfuse logs and raises package install errors."""
- m_container.return_value = True
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual('Some apt error', str(context_manager.exception))
- self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
- def test_maybe_install_squashfuse_raises_update_errors(self, m_container):
- """maybe_install_squashfuse logs and raises package update errors."""
- m_container.return_value = True
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual('Some apt error', str(context_manager.exception))
- self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_snap.util.is_container')
- def test_maybe_install_squashfuse_happy_path(self, m_container):
- """maybe_install_squashfuse logs and raises package install errors."""
- m_container.return_value = True
- distro = mock.MagicMock() # No errors raised
- maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual(
- [mock.call()], distro.update_package_sources.call_args_list)
- self.assertEqual(
- [mock.call(['squashfuse'])],
- distro.install_packages.call_args_list)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
deleted file mode 100644
index 87ccdb60..00000000
--- a/cloudinit/config/tests/test_ssh.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os.path
-
-from cloudinit.config import cc_ssh
-from cloudinit import ssh_util
-from cloudinit.tests.helpers import CiTestCase, mock
-import logging
-
-LOG = logging.getLogger(__name__)
-
-MODPATH = "cloudinit.config.cc_ssh."
-KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES
- if name not in 'dsa']
-
-
-@mock.patch(MODPATH + "ssh_util.setup_user_keys")
-class TestHandleSsh(CiTestCase):
- """Test cc_ssh handling of ssh config."""
-
- def _publish_hostkey_test_setup(self):
- self.test_hostkeys = {
- 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'),
- 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'),
- 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'),
- 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'),
- }
- self.test_hostkey_files = []
- hostkey_tmpdir = self.tmp_dir()
- for key_type in cc_ssh.GENERATE_KEY_NAMES:
- key_data = self.test_hostkeys[key_type]
- filename = 'ssh_host_%s_key.pub' % key_type
- filepath = os.path.join(hostkey_tmpdir, filename)
- self.test_hostkey_files.append(filepath)
- with open(filepath, 'w') as f:
- f.write(' '.join(key_data))
-
- cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key')
-
- def test_apply_credentials_with_user(self, m_setup_keys):
- """Apply keys for the given user and root."""
- keys = ["key1"]
- user = "clouduser"
- cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list)
-
- def test_apply_credentials_with_no_user(self, m_setup_keys):
- """Apply keys for root only."""
- keys = ["key1"]
- user = None
- cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
- self.assertEqual([mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list)
-
- def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
- """Apply keys for the given user and disable root ssh."""
- keys = ["key1"]
- user = "clouduser"
- options = ssh_util.DISABLE_USER_OPTS
- cc_ssh.apply_credentials(keys, user, True, options)
- options = options.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
-
- def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
- """Apply keys no user and disable root ssh."""
- keys = ["key1"]
- user = None
- options = ssh_util.DISABLE_USER_OPTS
- cc_ssh.apply_credentials(keys, user, True, options)
- options = options.replace("$USER", "NONE")
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_no_cfg(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
- """Test handle with no config ignores generating existing keyfiles."""
- cfg = {}
- keys = ["key1"]
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ([], {})
- cc_ssh.PUBLISH_HOST_KEYS = False
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
- options = options.replace("$DISABLE_USER", "root")
- m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
- self.assertIn(
- [mock.call('/etc/ssh/ssh_host_rsa_key'),
- mock.call('/etc/ssh/ssh_host_dsa_key'),
- mock.call('/etc/ssh/ssh_host_ecdsa_key'),
- mock.call('/etc/ssh/ssh_host_ed25519_key')],
- m_path_exists.call_args_list)
- self.assertEqual([mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
- """Test allow_public_ssh_keys=False ignores ssh public keys from
- platform.
- """
- cfg = {"allow_public_ssh_keys": False}
- keys = ["key1"]
- user = "clouduser"
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
-
- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(), user),
- mock.call(set(), "root", options=options)],
- m_setup_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
- """Test handle with no config and a default distro user."""
- cfg = {}
- keys = ["key1"]
- user = "clouduser"
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
-
- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
- """Test handle with explicit disable_root and a default distro user."""
- # This test is identical to test_handle_no_cfg_and_default_root,
- # except this uses an explicit cfg value
- cfg = {"disable_root": True}
- keys = ["key1"]
- user = "clouduser"
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
-
- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug,
- m_glob, m_setup_keys):
- """Test handle with disable_root == False."""
- # When disable_root == False, the ssh redirect for root is skipped
- cfg = {"disable_root": False}
- keys = ["key1"]
- user = "clouduser"
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
- cc_ssh.handle("name", cfg, cloud, LOG, None)
-
- self.assertEqual([mock.call(set(keys), user),
- mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_default(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in KEY_NAMES_NO_DSA]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_config_enable(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = False
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {'ssh_publish_hostkeys': {'enabled': True}}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in KEY_NAMES_NO_DSA]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_config_disable(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {'ssh_publish_hostkeys': {'enabled': False}}
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
- cloud.datasource.publish_host_keys.assert_not_called()
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_config_blacklist(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {'ssh_publish_hostkeys': {'enabled': True,
- 'blacklist': ['dsa', 'rsa']}}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in ['ecdsa', 'ed25519']]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_empty_blacklist(
- self, m_path_exists, m_nug, m_glob, m_setup_keys):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter([
- [],
- self.test_hostkey_files,
- ])
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(
- distro='ubuntu', metadata={'public-keys': keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {'ssh_publish_hostkeys': {'enabled': True,
- 'blacklist': []}}
- expected_call = [self.test_hostkeys[key_type] for key_type
- in cc_ssh.GENERATE_KEY_NAMES]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual([mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list)
-
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "util.write_file")
- def test_handle_ssh_keys_in_cfg(self, m_write_file, m_nug, m_setup_keys):
- """Test handle with ssh keys and certificate."""
- # Populate a config dictionary to pass to handle() as well
- # as the expected file-writing calls.
- cfg = {"ssh_keys": {}}
-
- expected_calls = []
- for key_type in cc_ssh.GENERATE_KEY_NAMES:
- private_name = "{}_private".format(key_type)
- public_name = "{}_public".format(key_type)
- cert_name = "{}_certificate".format(key_type)
-
- # Actual key contents don"t have to be realistic
- private_value = "{}_PRIVATE_KEY".format(key_type)
- public_value = "{}_PUBLIC_KEY".format(key_type)
- cert_value = "{}_CERT_KEY".format(key_type)
-
- cfg["ssh_keys"][private_name] = private_value
- cfg["ssh_keys"][public_name] = public_value
- cfg["ssh_keys"][cert_name] = cert_value
-
- expected_calls.extend([
- mock.call(
- '/etc/ssh/ssh_host_{}_key'.format(key_type),
- private_value,
- 384
- ),
- mock.call(
- '/etc/ssh/ssh_host_{}_key.pub'.format(key_type),
- public_value,
- 384
- ),
- mock.call(
- '/etc/ssh/ssh_host_{}_key-cert.pub'.format(key_type),
- cert_value,
- 384
- ),
- mock.call(
- '/etc/ssh/sshd_config',
- ('HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub'
- '\n'.format(key_type)),
- preserve_mode=True
- )
- ])
-
- # Run the handler.
- m_nug.return_value = ([], {})
- with mock.patch(MODPATH + 'ssh_util.parse_ssh_config',
- return_value=[]):
- cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'),
- LOG, None)
-
- # Check that all expected output has been done.
- for call_ in expected_calls:
- self.assertIn(call_, m_write_file.call_args_list)
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
deleted file mode 100644
index db7fb726..00000000
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_ubuntu_advantage import (
- configure_ua, handle, maybe_install_ua_tools, schema)
-from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import subp
-from cloudinit.tests.helpers import (
- CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
-
-
-# Module path used in mocks
-MPATH = 'cloudinit.config.cc_ubuntu_advantage'
-
-
-class FakeCloud(object):
- def __init__(self, distro):
- self.distro = distro
-
-
-class TestConfigureUA(CiTestCase):
-
- with_logs = True
- allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
-
- def setUp(self):
- super(TestConfigureUA, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_error(self, m_subp):
- """Errors from ua attach command are raised."""
- m_subp.side_effect = subp.ProcessExecutionError(
- 'Invalid token SomeToken')
- with self.assertRaises(RuntimeError) as context_manager:
- configure_ua(token='SomeToken')
- self.assertEqual(
- 'Failure attaching Ubuntu Advantage:\nUnexpected error while'
- ' running command.\nCommand: -\nExit code: -\nReason: -\n'
- 'Stdout: Invalid token SomeToken\nStderr: -',
- str(context_manager.exception))
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_token(self, m_subp):
- """When token is provided, attach the machine to ua using the token."""
- configure_ua(token='SomeToken')
- m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
- self.assertEqual(
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_on_service_error(self, m_subp):
- """all services should be enabled and then any failures raised"""
-
- def fake_subp(cmd, capture=None):
- fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
- if cmd in fail_cmds and capture:
- svc = cmd[-1]
- raise subp.ProcessExecutionError(
- 'Invalid {} credentials'.format(svc.upper()))
-
- m_subp.side_effect = fake_subp
-
- with self.assertRaises(RuntimeError) as context_manager:
- configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips'])
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken']),
- mock.call(['ua', 'enable', 'esm'], capture=True),
- mock.call(['ua', 'enable', 'cc'], capture=True),
- mock.call(['ua', 'enable', 'fips'], capture=True)])
- self.assertIn(
- 'WARNING: Failure enabling "esm":\nUnexpected error'
- ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
- 'Stdout: Invalid ESM credentials\nStderr: -\n',
- self.logs.getvalue())
- self.assertIn(
- 'WARNING: Failure enabling "cc":\nUnexpected error'
- ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
- 'Stdout: Invalid CC credentials\nStderr: -\n',
- self.logs.getvalue())
- self.assertEqual(
- 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
- str(context_manager.exception))
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_empty_services(self, m_subp):
- """When services is an empty list, do not auto-enable attach."""
- configure_ua(token='SomeToken', enable=[])
- m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
- self.assertEqual(
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_specific_services(self, m_subp):
- """When services a list, only enable specific services."""
- configure_ua(token='SomeToken', enable=['fips'])
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken']),
- mock.call(['ua', 'enable', 'fips'], capture=True)])
- self.assertEqual(
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_string_services(self, m_subp):
- """When services a string, treat as singleton list and warn"""
- configure_ua(token='SomeToken', enable='fips')
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken']),
- mock.call(['ua', 'enable', 'fips'], capture=True)])
- self.assertEqual(
- 'WARNING: ubuntu_advantage: enable should be a list, not a'
- ' string; treating as a single enable\n'
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
- @mock.patch('%s.subp.subp' % MPATH)
- def test_configure_ua_attach_with_weird_services(self, m_subp):
- """When services not string or list, warn but still attach"""
- configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
- self.assertEqual(
- m_subp.call_args_list,
- [mock.call(['ua', 'attach', 'SomeToken'])])
- self.assertEqual(
- 'WARNING: ubuntu_advantage: enable should be a list, not a'
- ' dict; skipping enabling services\n'
- 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
- self.logs.getvalue())
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
-
- with_logs = True
- schema = schema
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
- """If ubuntu_advantage configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not"
- " of type 'object'\n",
- self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_schema_disallows_unknown_keys(self, _cfg, _):
- """Unknown keys in ubuntu_advantage configuration emit warnings."""
- validate_cloudconfig_schema(
- {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}},
- schema)
- self.assertIn(
- 'WARNING: Invalid config:\nubuntu_advantage: Additional properties'
- " are not allowed ('invalid-key' was unexpected)",
- self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_warn_schema_requires_token(self, _cfg, _):
- """Warn if ubuntu_advantage configuration lacks token."""
- validate_cloudconfig_schema(
- {'ubuntu_advantage': {'enable': ['esm']}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu_advantage:"
- " 'token' is a required property\n", self.logs.getvalue())
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- @mock.patch('%s.configure_ua' % MPATH)
- def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
- """Warn when ubuntu_advantage:enable config is not a list."""
- validate_cloudconfig_schema(
- {'ubuntu_advantage': {'enable': 'needslist'}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu_advantage: 'token' is a"
- " required property\nubuntu_advantage.enable: 'needslist'"
- " is not of type 'array'\n",
- self.logs.getvalue())
-
-
-class TestHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHandle, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('%s.validate_cloudconfig_schema' % MPATH)
- def test_handle_no_config(self, m_schema):
- """When no ua-related configuration is provided, nothing happens."""
- cfg = {}
- handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertIn(
- "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
- ' configuration found',
- self.logs.getvalue())
- m_schema.assert_not_called()
-
- @mock.patch('%s.configure_ua' % MPATH)
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_tries_to_install_ubuntu_advantage_tools(
- self, m_install, m_cfg):
- """If ubuntu_advantage is provided, try installing ua-tools package."""
- cfg = {'ubuntu_advantage': {'token': 'valid'}}
- mycloud = FakeCloud(None)
- handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
- m_install.assert_called_once_with(mycloud)
-
- @mock.patch('%s.configure_ua' % MPATH)
- @mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_passes_credentials_and_services_to_configure_ua(
- self, m_install, m_configure_ua):
- """All ubuntu_advantage config keys are passed to configure_ua."""
- cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}}
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- m_configure_ua.assert_called_once_with(
- token='token', enable=['esm'])
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.configure_ua' % MPATH)
- def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
- self, m_configure_ua):
- """Warning when ubuntu-advantage key is present with new config"""
- cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}}
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'WARNING: Deprecated configuration key "ubuntu-advantage"'
- ' provided. Expected underscore delimited "ubuntu_advantage";'
- ' will attempt to continue.',
- self.logs.getvalue().splitlines()[0])
- m_configure_ua.assert_called_once_with(
- token='token', enable=['esm'])
-
- def test_handle_error_on_deprecated_commands_key_dashed(self):
- """Error when commands is present in ubuntu-advantage key."""
- cfg = {'ubuntu-advantage': {'commands': 'nogo'}}
- with self.assertRaises(RuntimeError) as context_manager:
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"',
- str(context_manager.exception))
-
- def test_handle_error_on_deprecated_commands_key_underscored(self):
- """Error when commands is present in ubuntu_advantage key."""
- cfg = {'ubuntu_advantage': {'commands': 'nogo'}}
- with self.assertRaises(RuntimeError) as context_manager:
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"',
- str(context_manager.exception))
-
- @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.configure_ua' % MPATH)
- def test_handle_prefers_new_style_config(
- self, m_configure_ua):
- """ubuntu_advantage should be preferred over ubuntu-advantage"""
- cfg = {
- 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']},
- 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']},
- }
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'WARNING: Deprecated configuration key "ubuntu-advantage"'
- ' provided. Expected underscore delimited "ubuntu_advantage";'
- ' will attempt to continue.',
- self.logs.getvalue().splitlines()[0])
- m_configure_ua.assert_called_once_with(
- token='token', enable=['esm'])
-
-
-class TestMaybeInstallUATools(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestMaybeInstallUATools, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
- """Do nothing if ubuntu-advantage-tools already exists."""
- m_which.return_value = '/usr/bin/ua' # already installed
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
- maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
- """maybe_install_ua_tools logs and raises apt update errors."""
- m_which.return_value = None
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- 'Some apt error')
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- self.assertEqual('Some apt error', str(context_manager.exception))
- self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_raises_install_errors(self, m_which):
- """maybe_install_ua_tools logs and raises package install errors."""
- m_which.return_value = None
- distro = mock.MagicMock()
- distro.update_package_sources.return_value = None
- distro.install_packages.side_effect = RuntimeError(
- 'Some install error')
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- self.assertEqual('Some install error', str(context_manager.exception))
- self.assertIn(
- 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue())
-
- @mock.patch('%s.subp.which' % MPATH)
- def test_maybe_install_ua_tools_happy_path(self, m_which):
- """maybe_install_ua_tools installs ubuntu-advantage-tools."""
- m_which.return_value = None
- distro = mock.MagicMock() # No errors raised
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- distro.update_package_sources.assert_called_once_with()
- distro.install_packages.assert_called_once_with(
- ['ubuntu-advantage-tools'])
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
deleted file mode 100644
index 504ba356..00000000
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import copy
-import os
-
-from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
-from cloudinit.config.schema import (
- SchemaValidationError, validate_cloudconfig_schema)
-from cloudinit.config import cc_ubuntu_drivers as drivers
-from cloudinit.subp import ProcessExecutionError
-
-MPATH = "cloudinit.config.cc_ubuntu_drivers."
-M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
-OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
- "ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
- "(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
-
-
-# The tests in this module call helper methods which are decorated with
-# mock.patch. pylint doesn't understand that mock.patch passes parameters to
-# the decorated function, so it incorrectly reports that we aren't passing
-# values for all parameters. Instead of annotating every single call, we
-# disable it for the entire module:
-# pylint: disable=no-value-for-parameter
-
-class AnyTempScriptAndDebconfFile(object):
-
- def __init__(self, tmp_dir, debconf_file):
- self.tmp_dir = tmp_dir
- self.debconf_file = debconf_file
-
- def __eq__(self, cmd):
- if not len(cmd) == 2:
- return False
- script, debconf_file = cmd
- if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')):
- return debconf_file == self.debconf_file
- return False
-
-
-class TestUbuntuDrivers(CiTestCase):
- cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}}
- install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia']
-
- with_logs = True
-
- @skipUnlessJsonSchema()
- def test_schema_requires_boolean_for_license_accepted(self):
- with self.assertRaisesRegex(
- SchemaValidationError, ".*license-accepted.*TRUE.*boolean"):
- validate_cloudconfig_schema(
- {'drivers': {'nvidia': {'license-accepted': "TRUE"}}},
- schema=drivers.schema, strict=True)
-
- @mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
- @mock.patch(MPATH + "subp.which", return_value=False)
- def _assert_happy_path_taken(
- self, config, m_which, m_subp, m_tmp):
- """Positive path test through handle. Package should be installed."""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
- m_tmp.return_value = tdir
- myCloud = mock.MagicMock()
- drivers.handle('ubuntu_drivers', config, myCloud, None, None)
- self.assertEqual([mock.call(['ubuntu-drivers-common'])],
- myCloud.distro.install_packages.call_args_list)
- self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
-
- def test_handle_does_package_install(self):
- self._assert_happy_path_taken(self.cfg_accepted)
-
- def test_trueish_strings_are_considered_approval(self):
- for true_value in ['yes', 'true', 'on', '1']:
- new_config = copy.deepcopy(self.cfg_accepted)
- new_config['drivers']['nvidia']['license-accepted'] = true_value
- self._assert_happy_path_taken(new_config)
-
- @mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp")
- @mock.patch(MPATH + "subp.which", return_value=False)
- def test_handle_raises_error_if_no_drivers_found(
- self, m_which, m_subp, m_tmp):
- """If ubuntu-drivers doesn't install any drivers, raise an error."""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
- m_tmp.return_value = tdir
- myCloud = mock.MagicMock()
-
- def fake_subp(cmd):
- if cmd[0].startswith(tdir):
- return
- raise ProcessExecutionError(
- stdout='No drivers found for installation.\n', exit_code=1)
- m_subp.side_effect = fake_subp
-
- with self.assertRaises(Exception):
- drivers.handle(
- 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
- self.assertEqual([mock.call(['ubuntu-drivers-common'])],
- myCloud.distro.install_packages.call_args_list)
- self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
- self.assertIn('ubuntu-drivers found no drivers for installation',
- self.logs.getvalue())
-
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
- @mock.patch(MPATH + "subp.which", return_value=False)
- def _assert_inert_with_config(self, config, m_which, m_subp):
- """Helper to reduce repetition when testing negative cases"""
- myCloud = mock.MagicMock()
- drivers.handle('ubuntu_drivers', config, myCloud, None, None)
- self.assertEqual(0, myCloud.distro.install_packages.call_count)
- self.assertEqual(0, m_subp.call_count)
-
- def test_handle_inert_if_license_not_accepted(self):
- """Ensure we don't do anything if the license is rejected."""
- self._assert_inert_with_config(
- {'drivers': {'nvidia': {'license-accepted': False}}})
-
- def test_handle_inert_if_garbage_in_license_field(self):
- """Ensure we don't do anything if unknown text is in license field."""
- self._assert_inert_with_config(
- {'drivers': {'nvidia': {'license-accepted': 'garbage'}}})
-
- def test_handle_inert_if_no_license_key(self):
- """Ensure we don't do anything if no license key."""
- self._assert_inert_with_config({'drivers': {'nvidia': {}}})
-
- def test_handle_inert_if_no_nvidia_key(self):
- """Ensure we don't do anything if other license accepted."""
- self._assert_inert_with_config(
- {'drivers': {'acme': {'license-accepted': True}}})
-
- def test_handle_inert_if_string_given(self):
- """Ensure we don't do anything if string refusal given."""
- for false_value in ['no', 'false', 'off', '0']:
- self._assert_inert_with_config(
- {'drivers': {'nvidia': {'license-accepted': false_value}}})
-
- @mock.patch(MPATH + "install_drivers")
- def test_handle_no_drivers_does_nothing(self, m_install_drivers):
- """If no 'drivers' key in the config, nothing should be done."""
- myCloud = mock.MagicMock()
- myLog = mock.MagicMock()
- drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None)
- self.assertIn('Skipping module named',
- myLog.debug.call_args_list[0][0][0])
- self.assertEqual(0, m_install_drivers.call_count)
-
- @mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
- @mock.patch(MPATH + "subp.which", return_value=True)
- def test_install_drivers_no_install_if_present(
- self, m_which, m_subp, m_tmp):
- """If 'ubuntu-drivers' is present, no package install should occur."""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
- m_tmp.return_value = tdir
- pkg_install = mock.MagicMock()
- drivers.install_drivers(self.cfg_accepted['drivers'],
- pkg_install_func=pkg_install)
- self.assertEqual(0, pkg_install.call_count)
- self.assertEqual([mock.call('ubuntu-drivers')],
- m_which.call_args_list)
- self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
-
- def test_install_drivers_rejects_invalid_config(self):
- """install_drivers should raise TypeError if not given a config dict"""
- pkg_install = mock.MagicMock()
- with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
- drivers.install_drivers("mystring", pkg_install_func=pkg_install)
- self.assertEqual(0, pkg_install.call_count)
-
- @mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp")
- @mock.patch(MPATH + "subp.which", return_value=False)
- def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
- self, m_which, m_subp, m_tmp):
- """Older ubuntu-drivers versions should emit message and raise error"""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
- m_tmp.return_value = tdir
- myCloud = mock.MagicMock()
-
- def fake_subp(cmd):
- if cmd[0].startswith(tdir):
- return
- raise ProcessExecutionError(
- stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)
- m_subp.side_effect = fake_subp
-
- with self.assertRaises(Exception):
- drivers.handle(
- 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
- self.assertEqual([mock.call(['ubuntu-drivers-common'])],
- myCloud.distro.install_packages.call_args_list)
- self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu)],
- m_subp.call_args_list)
- self.assertIn('WARNING: the available version of ubuntu-drivers is'
- ' too old to perform requested driver installation',
- self.logs.getvalue())
-
-
-# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
-class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
- cfg_accepted = {
- 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}}
- install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
-
- @mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "subp.subp", return_value=('', ''))
- @mock.patch(MPATH + "subp.which", return_value=False)
- def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, 'nvidia.template')
- m_tmp.return_value = tdir
- myCloud = mock.MagicMock()
- version_none_cfg = {
- 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}}
- drivers.handle(
- 'ubuntu_drivers', version_none_cfg, myCloud, None, None)
- self.assertEqual(
- [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])],
- m_subp.call_args_list)
-
- def test_specifying_a_version_doesnt_override_license_acceptance(self):
- self._assert_inert_with_config({
- 'drivers': {'nvidia': {'license-accepted': False,
- 'version': '123'}}
- })
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
deleted file mode 100644
index df89ddb3..00000000
--- a/cloudinit/config/tests/test_users_groups.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-
-from cloudinit.config import cc_users_groups
-from cloudinit.tests.helpers import CiTestCase, mock
-
-MODPATH = "cloudinit.config.cc_users_groups"
-
-
-@mock.patch('cloudinit.distros.ubuntu.Distro.create_group')
-@mock.patch('cloudinit.distros.ubuntu.Distro.create_user')
-class TestHandleUsersGroups(CiTestCase):
- """Test cc_users_groups handling of config."""
-
- with_logs = True
-
- def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
- """Test handle with no config will not create users or groups."""
- cfg = {} # merged cloud-config
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- m_user.assert_not_called()
- m_group.assert_not_called()
-
- def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
- """When users in config, create users with distro.create_user."""
- cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', default=False)])
- m_group.assert_not_called()
-
- @mock.patch('cloudinit.distros.freebsd.Distro.create_group')
- @mock.patch('cloudinit.distros.freebsd.Distro.create_user')
- def test_handle_users_in_cfg_calls_create_users_on_bsd(
- self,
- m_fbsd_user,
- m_fbsd_group,
- m_linux_user,
- m_linux_group,
- ):
- """When users in config, create users with freebsd.create_user."""
- cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True,
- 'groups': ['wheel'],
- 'shell': '/bin/tcsh'}}
- metadata = {}
- cloud = self.tmp_cloud(
- distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_fbsd_user.call_args_list,
- [mock.call('freebsd', groups='wheel', lock_passwd=True,
- shell='/bin/tcsh'),
- mock.call('me2', default=False)])
- m_fbsd_group.assert_not_called()
- m_linux_group.assert_not_called()
- m_linux_user.assert_not_called()
-
- def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
- """When ssh_redirect_user is True pass default user and cloud keys."""
- cfg = {
- 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
- ssh_redirect_user='ubuntu')])
- m_group.assert_not_called()
-
- def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
- """When ssh_redirect_user is 'default' pass default username."""
- cfg = {
- 'users': ['default', {'name': 'me2',
- 'ssh_redirect_user': 'default'}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', cloud_public_ssh_keys=['key1'], default=False,
- ssh_redirect_user='ubuntu')])
- m_group.assert_not_called()
-
- def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
- """Warn when ssh_redirect_user is not 'default'."""
- cfg = {
- 'users': ['default', {'name': 'me2',
- 'ssh_redirect_user': 'snowflake'}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- with self.assertRaises(ValueError) as context_manager:
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- m_group.assert_not_called()
- self.assertEqual(
- 'Not creating user me2. Invalid value of ssh_redirect_user:'
- ' snowflake. Expected values: true, default or false.',
- str(context_manager.exception))
-
- def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
- """When unspecified ssh_redirect_user is false and not set up."""
- cfg = {'users': ['default', {'name': 'me2'}]}
- # System config defines a default user for the distro.
- sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True,
- 'groups': ['lxd', 'sudo'],
- 'shell': '/bin/bash'}}
- metadata = {'public-keys': ['key1']}
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertCountEqual(
- m_user.call_args_list,
- [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
- shell='/bin/bash'),
- mock.call('me2', default=False)])
- m_group.assert_not_called()
-
- def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
- """Warn when ssh_redirect_user is True and no default user present."""
- cfg = {
- 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]}
- # System config defines *no* default user for the distro.
- sys_cfg = {}
- metadata = {} # no public-keys defined
- cloud = self.tmp_cloud(
- distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
- cc_users_groups.handle('modulename', cfg, cloud, None, None)
- m_user.assert_called_once_with('me2', default=False)
- m_group.assert_not_called()
- self.assertEqual(
- 'WARNING: Ignoring ssh_redirect_user: True for me2. No'
- ' default_user defined. Perhaps missing'
- ' cloud configuration users: [default, ..].\n',
- self.logs.getvalue())