From a4236c375ddf78258a8f9252c1d79c665aa4f88b Mon Sep 17 00:00:00 2001 From: Lucendio Date: Mon, 25 Oct 2021 21:31:07 +0200 Subject: Add module 'write-files-deferred' executed in stage 'final' (#916) The main idea is to introduce a second module that takes care of writing files, but in the 'final' stage. While the introduction of a second module would allow for choosing the appropriate place withing the order of modules (and stages), there is no addition top-level directive being added to the cloud configuration schema. Instead, 'write-files' schema is being extended to include a 'defer' attribute used only by the 'write-deffered-files' modules. The new module 'write-deferred-files' reuses as much as possible of the 'write-files' functionality. --- cloudinit/config/cc_write_files_deferred.py | 55 +++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 cloudinit/config/cc_write_files_deferred.py (limited to 'cloudinit/config/cc_write_files_deferred.py') diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py new file mode 100644 index 00000000..0c75aa22 --- /dev/null +++ b/cloudinit/config/cc_write_files_deferred.py @@ -0,0 +1,55 @@ +# Copyright (C) 2021 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Defer writing certain files""" + +from textwrap import dedent + +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import util +from cloudinit.config.cc_write_files import ( + schema as write_files_schema, write_files, DEFAULT_DEFER) + + +schema = util.mergemanydict([ + { + 'id': 'cc_write_files_deferred', + 'name': 'Write Deferred Files', + 'title': dedent("""\ + write certain files, whose creation as been deferred, during + final stage + """), + 'description': dedent("""\ + This module is based on `'Write Files' `__, and + will handle all files from the write_files list, that have been + marked as deferred and thus are not being processed by the + write-files module. + + *Please note that his module is not exposed to the user through + its own dedicated top-level directive.* + """) + }, + write_files_schema +]) + +# Not exposed, because related modules should document this behaviour +__doc__ = None + + +def handle(name, cfg, _cloud, log, _args): + validate_cloudconfig_schema(cfg, schema) + file_list = cfg.get('write_files', []) + filtered_files = [ + f for f in file_list if util.get_cfg_option_bool(f, + 'defer', + DEFAULT_DEFER) + ] + if not filtered_files: + log.debug(("Skipping module named %s," + " no deferred file defined in configuration"), name) + return + write_files(name, filtered_files) + + +# vi: ts=4 expandtab -- cgit v1.2.3 From bedac77e9348e7a54c0ec364fb61df90cd893972 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 6 Dec 2021 15:27:12 -0700 Subject: Add Strict Metaschema Validation (#1101) Improve schema validation. This adds strict validation of config module definitions at testing time, with plumbing included for future runtime validation. This eliminates a class of bugs resulting from schemas that have definitions that are incorrect, but get interpreted by jsonschema as "additionalProperties" that are therefore ignored. - Add strict meta-schema for jsonschema unit test validation - Separate schema from module metadata structure - Improve type annotations for various functions and data types Cleanup: - Remove unused jsonschema "required" elements - Eliminate manual memoization in schema.py:get_schema(), reference module.__doc__ directly --- cloudinit/cmd/clean.py | 8 +- cloudinit/cmd/cloud_id.py | 6 +- cloudinit/config/cc_apk_configure.py | 11 +- cloudinit/config/cc_apt_configure.py | 11 +- cloudinit/config/cc_bootcmd.py | 11 +- cloudinit/config/cc_chef.py | 11 +- cloudinit/config/cc_install_hotplug.py | 9 +- cloudinit/config/cc_locale.py | 9 +- cloudinit/config/cc_ntp.py | 11 +- cloudinit/config/cc_resizefs.py | 10 +- cloudinit/config/cc_runcmd.py | 11 +- cloudinit/config/cc_snap.py | 11 +- cloudinit/config/cc_ubuntu_advantage.py | 10 +- cloudinit/config/cc_ubuntu_drivers.py | 10 +- cloudinit/config/cc_write_files.py | 10 +- cloudinit/config/cc_write_files_deferred.py | 41 ++-- cloudinit/config/cc_zypper_add_repo.py | 10 +- cloudinit/config/schema.py | 288 ++++++++++++++++------- cloudinit/importer.py | 24 +- cloudinit/util.py | 17 +- doc/rtd/conf.py | 13 +- tests/unittests/cmd/test_clean.py | 2 +- tests/unittests/cmd/test_cloud_id.py | 4 +- tests/unittests/config/test_schema.py | 339 ++++++++++++++++++++-------- tests/unittests/test_cli.py | 105 ++++++++- 25 files changed, 701 insertions(+), 291 deletions(-) (limited to 'cloudinit/config/cc_write_files_deferred.py') diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 928a8eea..3502dd56 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -11,11 +11,9 @@ import sys from cloudinit.stages import Init from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link) - - -def error(msg): - sys.stderr.write("ERROR: " + msg + "\n") +from cloudinit.util import ( + del_dir, del_file, get_config_logfiles, is_link, error +) def get_parser(parser=None): diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 97608921..0cdc9675 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,6 +6,7 @@ import argparse import json import sys +from cloudinit.util import error from cloudinit.sources import ( INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) @@ -40,11 +41,6 @@ def get_parser(parser=None): return parser -def error(msg): - sys.stderr.write('ERROR: %s\n' % msg) - return 1 - - def handle_args(name, args): """Handle calls to 'cloud-id' cli. diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index 84d7a0b6..d227a58d 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -12,8 +12,7 @@ from cloudinit import log as logging from cloudinit import temp_utils from cloudinit import templater from cloudinit import util -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -56,7 +55,7 @@ REPOSITORIES_TEMPLATE = """\ frequency = PER_INSTANCE distros = ['alpine'] -schema = { +meta = { 'id': 'cc_apk_configure', 'name': 'APK Configure', 'title': 'Configure apk repositories file', @@ -95,6 +94,9 @@ schema = { """), ], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'apk_repos': { @@ -171,14 +173,13 @@ schema = { """) } }, - 'required': [], 'minProperties': 1, # Either preserve_repositories or alpine_repo 'additionalProperties': False, } } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 86d0feae..2e844c2c 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -14,8 +14,7 @@ import re import pathlib from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging from cloudinit import subp @@ -75,7 +74,8 @@ mirror_property = { } } } -schema = { + +meta = { 'id': 'cc_apt_configure', 'name': 'Apt Configure', 'title': 'Configure apt for the user', @@ -155,6 +155,9 @@ schema = { ------END PGP PUBLIC KEY BLOCK-------""")], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'apt': { @@ -398,7 +401,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) # place where apt stores cached repository data diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 246e4497..06f7a26e 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,8 +12,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS from cloudinit import temp_utils from cloudinit import subp @@ -29,7 +28,7 @@ frequency = PER_ALWAYS distros = ['all'] -schema = { +meta = { 'id': 'cc_bootcmd', 'name': 'Bootcmd', 'title': 'Run arbitrary commands early in the boot process', @@ -57,6 +56,9 @@ schema = { - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] """)], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'bootcmd': { @@ -69,12 +71,11 @@ schema = { 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, 'minItems': 1, - 'required': [], } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 7b20222e..ed734d1c 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -14,8 +14,7 @@ import os from textwrap import dedent from cloudinit import subp -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import templater from cloudinit import temp_utils from cloudinit import url_helper @@ -89,7 +88,8 @@ CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) frequency = PER_ALWAYS distros = ["all"] -schema = { + +meta = { 'id': 'cc_chef', 'name': 'Chef', 'title': 'module that configures, starts and installs chef', @@ -126,6 +126,9 @@ schema = { ssl_verify_mode: :verify_peer validation_name: yourorg-validator""")], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'chef': { @@ -357,7 +360,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index da98c409..9b4075cc 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -6,7 +6,7 @@ from textwrap import dedent from cloudinit import util from cloudinit import subp from cloudinit import stages -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS from cloudinit.event import EventType, EventScope from cloudinit.settings import PER_INSTANCE @@ -15,7 +15,7 @@ from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE distros = [ALL_DISTROS] -schema = { +meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", @@ -49,6 +49,9 @@ schema = { """), ], "frequency": frequency, +} + +schema = { "type": "object", "properties": { "updates": { @@ -81,7 +84,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 4f8b7bf6..7fed9abd 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -11,13 +11,13 @@ from textwrap import dedent from cloudinit import util -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE distros = ['all'] -schema = { +meta = { 'id': 'cc_locale', 'name': 'Locale', 'title': 'Set system locale', @@ -39,6 +39,9 @@ schema = { """), ], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'locale': { @@ -57,7 +60,7 @@ schema = { }, } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, args): diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c3aee798..9c085a04 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -16,7 +16,7 @@ from cloudinit import templater from cloudinit import type_utils from cloudinit import subp from cloudinit import util -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -140,7 +140,7 @@ DISTRO_CLIENT_CONFIG = { # configuration options before actually attempting to deploy with said # configuration. -schema = { +meta = { 'id': 'cc_ntp', 'name': 'NTP', 'title': 'enable and configure ntp', @@ -190,6 +190,9 @@ schema = { - ntp.ubuntu.com - 192.168.23.2""")], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'ntp': { @@ -289,12 +292,10 @@ schema = { }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'required': [], 'minProperties': 1, # If we have config, define something 'additionalProperties': False }, }, - 'required': [], 'additionalProperties': False } } @@ -303,7 +304,7 @@ REQUIRED_NTP_CONFIG_KEYS = frozenset([ 'check_exe', 'confpath', 'packages', 'service_name']) -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def distro_ntp_client_configs(distro): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 990a6939..00bb7ae7 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,8 +13,7 @@ import os import stat from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS from cloudinit import subp from cloudinit import util @@ -24,7 +23,7 @@ NOBLOCK = "noblock" frequency = PER_ALWAYS distros = ['all'] -schema = { +meta = { 'id': 'cc_resizefs', 'name': 'Resizefs', 'title': 'Resize filesystem', @@ -42,6 +41,9 @@ schema = { 'examples': [ 'resize_rootfs: false # disable root filesystem resize operation'], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'resize_rootfs': { @@ -52,7 +54,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def _resize_btrfs(mount_point, devpth): diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 15960c7d..2f5e02cb 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -8,8 +8,7 @@ """Runcmd: run arbitrary commands at rc.local with output to the console""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -26,7 +25,7 @@ from textwrap import dedent distros = [ALL_DISTROS] -schema = { +meta = { 'id': 'cc_runcmd', 'name': 'Runcmd', 'title': 'Run arbitrary commands', @@ -58,6 +57,9 @@ schema = { - [ wget, "http://example.org", -O, /tmp/index.html ] """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'runcmd': { @@ -71,12 +73,11 @@ schema = { 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, 'minItems': 1, - 'required': [], } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 20ed7d2f..21f30b57 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -8,8 +8,7 @@ import sys from textwrap import dedent from cloudinit import log as logging -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE from cloudinit.subp import prepend_base_command from cloudinit import subp @@ -21,7 +20,7 @@ frequency = PER_INSTANCE LOG = logging.getLogger(__name__) -schema = { +meta = { 'id': 'cc_snap', 'name': 'Snap', 'title': 'Install, configure and manage snapd and snap packages', @@ -103,6 +102,9 @@ schema = { signed_assertion_blob_here """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'snap': { @@ -139,13 +141,12 @@ schema = { } }, 'additionalProperties': False, # Reject keys not in schema - 'required': [], 'minProperties': 1 } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() SNAP_CMD = "snap" ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index d61dc655..831a92a2 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -4,8 +4,7 @@ from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import subp @@ -16,7 +15,7 @@ UA_URL = 'https://ubuntu.com/advantage' distros = ['ubuntu'] -schema = { +meta = { 'id': 'cc_ubuntu_advantage', 'name': 'Ubuntu Advantage', 'title': 'Configure Ubuntu Advantage support services', @@ -61,6 +60,9 @@ schema = { - fips """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'ubuntu_advantage': { @@ -82,7 +84,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py index 2d1d2b32..7f617efe 100644 --- a/cloudinit/config/cc_ubuntu_drivers.py +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -5,8 +5,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import subp @@ -18,7 +17,7 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE distros = ['ubuntu'] -schema = { +meta = { 'id': 'cc_ubuntu_drivers', 'name': 'Ubuntu Drivers', 'title': 'Interact with third party drivers in Ubuntu.', @@ -32,6 +31,9 @@ schema = { license-accepted: true """)], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'drivers': { @@ -64,7 +66,7 @@ schema = { OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( "ubuntu-drivers: error: argument : invalid choice: 'install'") -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() # Use a debconf template to configure a global debconf variable diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 41c75fa2..55f8c684 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -10,8 +10,7 @@ import base64 import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -38,7 +37,7 @@ supported_encoding_types = [ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64', 'base64'] -schema = { +meta = { 'id': 'cc_write_files', 'name': 'Write Files', 'title': 'write arbitrary files', @@ -111,6 +110,9 @@ schema = { defer: true """)], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'write_files': { @@ -187,7 +189,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, _cloud, log, _args): diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py index 0c75aa22..4fc8659c 100644 --- a/cloudinit/config/cc_write_files_deferred.py +++ b/cloudinit/config/cc_write_files_deferred.py @@ -4,34 +4,31 @@ """Defer writing certain files""" -from textwrap import dedent - from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.config.cc_write_files import ( schema as write_files_schema, write_files, DEFAULT_DEFER) +# meta is not used in this module, but it remains as code documentation +# +# id: cc_write_files_deferred' +# name: 'Write Deferred Files +# distros: ['all'], +# frequency: PER_INSTANCE, +# title: +# write certain files, whose creation as been deferred, during +# final stage +# description: +# This module is based on `'Write Files' `__, and +# will handle all files from the write_files list, that have been +# marked as deferred and thus are not being processed by the +# write-files module. +# +# *Please note that his module is not exposed to the user through +# its own dedicated top-level directive.* + +schema = write_files_schema -schema = util.mergemanydict([ - { - 'id': 'cc_write_files_deferred', - 'name': 'Write Deferred Files', - 'title': dedent("""\ - write certain files, whose creation as been deferred, during - final stage - """), - 'description': dedent("""\ - This module is based on `'Write Files' `__, and - will handle all files from the write_files list, that have been - marked as deferred and thus are not being processed by the - write-files module. - - *Please note that his module is not exposed to the user through - its own dedicated top-level directive.* - """) - }, - write_files_schema -]) # Not exposed, because related modules should document this behaviour __doc__ = None diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index 05855b0c..bf1638fb 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -9,14 +9,14 @@ import configobj import os from textwrap import dedent -from cloudinit.config.schema import get_schema_doc +from cloudinit.config.schema import get_meta_doc from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import util distros = ['opensuse', 'sles'] -schema = { +meta = { 'id': 'cc_zypper_add_repo', 'name': 'ZypperAddRepo', 'title': 'Configure zypper behavior and add zypper repositories', @@ -51,6 +51,9 @@ schema = { # any setting in /etc/zypp/zypp.conf """)], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'zypper': { @@ -86,14 +89,13 @@ schema = { /etc/zypp/zypp.conf'""") } }, - 'required': [], 'minProperties': 1, # Either config or repo must be provided 'additionalProperties': False, # only repos and config allowed } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 456bab2c..d32b7c01 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -3,19 +3,22 @@ from cloudinit.cmd.devel import read_cfg_paths from cloudinit import importer -from cloudinit.util import find_modules, load_file +from cloudinit.importer import MetaSchema +from cloudinit.util import find_modules, load_file, error import argparse from collections import defaultdict from copy import deepcopy +from functools import partial import logging import os import re import sys import yaml +error = partial(error, sys_exit=True) + _YAML_MAP = {True: 'true', False: 'false', None: 'null'} -SCHEMA_UNDEFINED = b'UNDEFINED' CLOUD_CONFIG_HEADER = b'#cloud-config' SCHEMA_DOC_TMPL = """ {name} @@ -34,7 +37,7 @@ SCHEMA_DOC_TMPL = """ {property_doc} {examples} """ -SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' +SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}" SCHEMA_LIST_ITEM_TMPL = ( '{prefix}Each item in **{prop_name}** list supports the following keys:') SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n' @@ -72,45 +75,102 @@ def is_schema_byte_string(checker, instance): isinstance(instance, (bytes,))) -def validate_cloudconfig_schema(config, schema, strict=False): - """Validate provided config meets the schema definition. +def get_jsonschema_validator(): + """Get metaschema validator and format checker - @param config: Dict of cloud configuration settings validated against - schema. - @param schema: jsonschema dict describing the supported schema definition - for the cloud config module (config.cc_*). - @param strict: Boolean, when True raise SchemaValidationErrors instead of - logging warnings. + Older versions of jsonschema require some compatibility changes. - @raises: SchemaValidationError when provided config does not validate - against the provided schema. + @returns: Tuple: (jsonschema.Validator, FormatChecker) + @raises: ImportError when jsonschema is not present """ - try: - from jsonschema import Draft4Validator, FormatChecker - from jsonschema.validators import create, extend - except ImportError: - logging.debug( - 'Ignoring schema validation. python-jsonschema is not present') - return + from jsonschema import Draft4Validator, FormatChecker + from jsonschema.validators import create # Allow for bytes to be presented as an acceptable valid value for string # type jsonschema attributes in cloud-init's schema. # This allows #cloud-config to provide valid yaml "content: !!binary | ..." + + strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA) + strict_metaschema['additionalProperties'] = False if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+ type_checker = Draft4Validator.TYPE_CHECKER.redefine( 'string', is_schema_byte_string) - cloudinitValidator = extend(Draft4Validator, type_checker=type_checker) + cloudinitValidator = create( + meta_schema=strict_metaschema, + validators=Draft4Validator.VALIDATORS, + version="draft4", + type_checker=type_checker) else: # jsonschema 2.6 workaround types = Draft4Validator.DEFAULT_TYPES - # Allow bytes as well as string (and disable a spurious - # unsupported-assignment-operation pylint warning which appears because - # this code path isn't written against the latest jsonschema). + # Allow bytes as well as string (and disable a spurious unsupported + # assignment-operation pylint warning which appears because this + # code path isn't written against the latest jsonschema). types['string'] = (str, bytes) # pylint: disable=E1137 cloudinitValidator = create( - meta_schema=Draft4Validator.META_SCHEMA, + meta_schema=strict_metaschema, validators=Draft4Validator.VALIDATORS, version="draft4", default_types=types) + return (cloudinitValidator, FormatChecker) + + +def validate_cloudconfig_metaschema(validator, schema: dict, throw=True): + """Validate provided schema meets the metaschema definition. Return strict + Validator and FormatChecker for use in validation + @param validator: Draft4Validator instance used to validate the schema + @param schema: schema to validate + @param throw: Sometimes the validator and checker are required, even if + the schema is invalid. Toggle for whether to raise + SchemaValidationError or log warnings. + + @raises: ImportError when jsonschema is not present + @raises: SchemaValidationError when the schema is invalid + """ + + from jsonschema.exceptions import SchemaError + + try: + validator.check_schema(schema) + except SchemaError as err: + # Raise SchemaValidationError to avoid jsonschema imports at call + # sites + if throw: + raise SchemaValidationError( + schema_errors=( + ('.'.join([str(p) for p in err.path]), err.message), + ) + ) from err + logging.warning( + "Meta-schema validation failed, attempting to validate config " + "anyway: %s", err) + + +def validate_cloudconfig_schema( + config: dict, schema: dict, strict=False, strict_metaschema=False +): + """Validate provided config meets the schema definition. + + @param config: Dict of cloud configuration settings validated against + schema. Ignored if strict_metaschema=True + @param schema: jsonschema dict describing the supported schema definition + for the cloud config module (config.cc_*). + @param strict: Boolean, when True raise SchemaValidationErrors instead of + logging warnings. + @param strict_metaschema: Boolean, when True validates schema using strict + metaschema definition at runtime (currently unused) + + @raises: SchemaValidationError when provided config does not validate + against the provided schema. + """ + try: + (cloudinitValidator, FormatChecker) = get_jsonschema_validator() + if strict_metaschema: + validate_cloudconfig_metaschema( + cloudinitValidator, schema, throw=False) + except ImportError: + logging.debug("Ignoring schema validation. jsonschema is not present") + return + validator = cloudinitValidator(schema, format_checker=FormatChecker()) errors = () for error in sorted(validator.iter_errors(config), key=lambda e: e.path): @@ -301,12 +361,15 @@ def _schemapath_for_cloudconfig(config, original_content): return schema_line_numbers -def _get_property_type(property_dict): - """Return a string representing a property type from a given jsonschema.""" - property_type = property_dict.get('type', SCHEMA_UNDEFINED) - if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'): +def _get_property_type(property_dict: dict) -> str: + """Return a string representing a property type from a given + jsonschema. + """ + property_type = property_dict.get("type") + if property_type is None and property_dict.get("enum"): property_type = [ - str(_YAML_MAP.get(k, k)) for k in property_dict['enum']] + str(_YAML_MAP.get(k, k)) for k in property_dict["enum"] + ] if isinstance(property_type, list): property_type = '/'.join(property_type) items = property_dict.get('items', {}) @@ -317,12 +380,12 @@ def _get_property_type(property_dict): sub_property_type += '/' sub_property_type += '(' + _get_property_type(sub_item) + ')' if sub_property_type: - return '{0} of {1}'.format(property_type, sub_property_type) - return property_type + return "{0} of {1}".format(property_type, sub_property_type) + return property_type or "UNDEFINED" -def _parse_description(description, prefix): - """Parse description from the schema in a format that we can better +def _parse_description(description, prefix) -> str: + """Parse description from the meta in a format that we can better display in our docs. This parser does three things: - Guarantee that a paragraph will be in a single line @@ -330,7 +393,7 @@ def _parse_description(description, prefix): the first paragraph - Proper align lists of items - @param description: The original description in the schema. + @param description: The original description in the meta. @param prefix: The number of spaces used to align the current description """ list_paragraph = prefix * 3 @@ -343,20 +406,24 @@ def _parse_description(description, prefix): return description -def _get_property_doc(schema, prefix=' '): +def _get_property_doc(schema: dict, prefix=" ") -> str: """Return restructured text describing the supported schema properties.""" new_prefix = prefix + ' ' properties = [] for prop_key, prop_config in schema.get('properties', {}).items(): - # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL + # Define prop_name and description for SCHEMA_PROPERTY_TMPL description = prop_config.get('description', '') - properties.append(SCHEMA_PROPERTY_TMPL.format( - prefix=prefix, - prop_name=prop_key, - type=_get_property_type(prop_config), - description=_parse_description(description, prefix))) - items = prop_config.get('items') + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + properties.append( + SCHEMA_PROPERTY_TMPL.format( + prefix=prefix, + prop_name=prop_key, + description=_parse_description(description, prefix), + prop_type=_get_property_type(prop_config), + ) + ) + items = prop_config.get("items") if items: if isinstance(items, list): for item in items: @@ -373,9 +440,9 @@ def _get_property_doc(schema, prefix=' '): return '\n\n'.join(properties) -def _get_schema_examples(schema, prefix=''): - """Return restructured text describing the schema examples if present.""" - examples = schema.get('examples') +def _get_examples(meta: MetaSchema) -> str: + """Return restructured text describing the meta examples if present.""" + examples = meta.get("examples") if not examples: return '' rst_content = SCHEMA_EXAMPLES_HEADER @@ -390,48 +457,111 @@ def _get_schema_examples(schema, prefix=''): return rst_content -def get_schema_doc(schema): - """Return reStructured text rendering the provided jsonschema. +def get_meta_doc(meta: MetaSchema, schema: dict) -> str: + """Return reStructured text rendering the provided metadata. - @param schema: Dict of jsonschema to render. - @raise KeyError: If schema lacks an expected key. + @param meta: Dict of metadata to render. + @raise KeyError: If metadata lacks an expected key. """ - schema_copy = deepcopy(schema) - schema_copy['property_doc'] = _get_property_doc(schema) - schema_copy['examples'] = _get_schema_examples(schema) - schema_copy['distros'] = ', '.join(schema['distros']) + + if not meta or not schema: + raise ValueError("Expected meta and schema") + keys = set(meta.keys()) + expected = set( + { + "id", + "title", + "examples", + "frequency", + "distros", + "description", + "name", + } + ) + error_message = "" + if expected - keys: + error_message = "Missing expected keys in module meta: {}".format( + expected - keys + ) + elif keys - expected: + error_message = ( + "Additional unexpected keys found in module meta: {}".format( + keys - expected + ) + ) + if error_message: + raise KeyError(error_message) + + # cast away type annotation + meta_copy = dict(deepcopy(meta)) + meta_copy["property_doc"] = _get_property_doc(schema) + meta_copy["examples"] = _get_examples(meta) + meta_copy["distros"] = ", ".join(meta["distros"]) # Need an underbar of the same length as the name - schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name']) - return SCHEMA_DOC_TMPL.format(**schema_copy) + meta_copy["title_underbar"] = re.sub(r".", "-", meta["name"]) + template = SCHEMA_DOC_TMPL.format(**meta_copy) + return template + + +def get_modules() -> dict: + configs_dir = os.path.dirname(os.path.abspath(__file__)) + return find_modules(configs_dir) + +def load_doc(requested_modules: list) -> str: + """Load module docstrings -FULL_SCHEMA = None + Docstrings are generated on module load. Reduce, reuse, recycle. + """ + docs = "" + all_modules = list(get_modules().values()) + ["all"] + invalid_docs = set(requested_modules).difference(set(all_modules)) + if invalid_docs: + error( + "Invalid --docs value {}. Must be one of: {}".format( + list(invalid_docs), ", ".join(all_modules), + ) + ) + for mod_name in all_modules: + if "all" in requested_modules or mod_name in requested_modules: + (mod_locs, _) = importer.find_module( + mod_name, ["cloudinit.config"], ["schema"] + ) + if mod_locs: + mod = importer.import_module(mod_locs[0]) + docs += mod.__doc__ or "" + return docs -def get_schema(): +def get_schema() -> dict: """Return jsonschema coalesced from all cc_* cloud-config module.""" - global FULL_SCHEMA - if FULL_SCHEMA: - return FULL_SCHEMA full_schema = { - '$schema': 'http://json-schema.org/draft-04/schema#', - 'id': 'cloud-config-schema', 'allOf': []} - - configs_dir = os.path.dirname(os.path.abspath(__file__)) - potential_handlers = find_modules(configs_dir) - for (_fname, mod_name) in potential_handlers.items(): - mod_locs, _looked_locs = importer.find_module( - mod_name, ['cloudinit.config'], ['schema']) + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "cloud-config-schema", + "allOf": [], + } + + for (_, mod_name) in get_modules().items(): + (mod_locs, _) = importer.find_module( + mod_name, ["cloudinit.config"], ["schema"] + ) if mod_locs: mod = importer.import_module(mod_locs[0]) - full_schema['allOf'].append(mod.schema) - FULL_SCHEMA = full_schema + full_schema["allOf"].append(mod.schema) return full_schema -def error(message): - print(message, file=sys.stderr) - sys.exit(1) +def get_meta() -> dict: + """Return metadata coalesced from all cc_* cloud-config module.""" + full_meta = dict() + for (_, mod_name) in get_modules().items(): + mod_locs, _ = importer.find_module( + mod_name, ["cloudinit.config"], ["meta"] + ) + if mod_locs: + mod = importer.import_module(mod_locs[0]) + full_meta[mod.meta["id"]] = mod.meta + return full_meta def get_parser(parser=None): @@ -474,15 +604,7 @@ def handle_schema_args(name, args): cfg_name = args.config_file print("Valid cloud-config:", cfg_name) elif args.docs: - schema_ids = [subschema['id'] for subschema in full_schema['allOf']] - schema_ids += ['all'] - invalid_docs = set(args.docs).difference(set(schema_ids)) - if invalid_docs: - error('Invalid --docs value {0}. Must be one of: {1}'.format( - list(invalid_docs), ', '.join(schema_ids))) - for subschema in full_schema['allOf']: - if 'all' in args.docs or subschema['id'] in args.docs: - print(get_schema_doc(subschema)) + print(load_doc(args.docs)) def main(): diff --git a/cloudinit/importer.py b/cloudinit/importer.py index f1194fbe..4e677af3 100644 --- a/cloudinit/importer.py +++ b/cloudinit/importer.py @@ -9,6 +9,27 @@ # This file is part of cloud-init. See LICENSE file for license information. import sys +import typing + +# annotations add value for development, but don't break old versions +# pyver: 3.5 -> 3.8 +# pylint: disable=E1101 +if sys.version_info >= (3, 8) and hasattr(typing, "TypeDict"): + MetaSchema = typing.TypedDict( + "MetaSchema", + { + "name": str, + "id": str, + "title": str, + "description": str, + "distros": typing.List[str], + "examples": typing.List[str], + "frequency": str, + }, + ) +else: + MetaSchema = dict +# pylint: enable=E1101 def import_module(module_name): @@ -16,7 +37,8 @@ def import_module(module_name): return sys.modules[module_name] -def find_module(base_name, search_paths, required_attrs=None): +def find_module(base_name: str, search_paths, required_attrs=None) -> tuple: + """Finds and imports specified modules""" if not required_attrs: required_attrs = [] # NOTE(harlowja): translate the search paths to include the base name. diff --git a/cloudinit/util.py b/cloudinit/util.py index 2045a6ab..1b462a38 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -347,7 +347,7 @@ def extract_usergroup(ug_pair): return (u, g) -def find_modules(root_dir): +def find_modules(root_dir) -> dict: entries = dict() for fname in glob.glob(os.path.join(root_dir, "*.py")): if not os.path.isfile(fname): @@ -2751,4 +2751,19 @@ def get_proc_ppid(pid): ppid = int(parts[3]) return ppid + +def error(msg, rc=1, fmt='Error:\n{}', sys_exit=False): + """ + Print error to stderr and return or exit + + @param msg: message to print + @param rc: return code (default: 1) + @param fmt: format string for putting message in (default: 'Error:\n {}') + @param sys_exit: exit when called (default: false) + """ + print(fmt.format(msg), file=sys.stderr) + if sys_exit: + sys.exit(rc) + return rc + # vi: ts=4 expandtab diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 684822c2..4316b5d9 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -1,6 +1,8 @@ import os import sys +from cloudinit import version + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -9,8 +11,6 @@ sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) sys.path.insert(0, os.path.abspath('.')) -from cloudinit import version -from cloudinit.config.schema import get_schema_doc # Supress warnings for docs that aren't used yet # unused_docs = [ @@ -66,12 +66,3 @@ html_theme = 'sphinx_rtd_theme' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'static/logo.png' - -def generate_docstring_from_schema(app, what, name, obj, options, lines): - """Override module docs from schema when present.""" - if what == 'module' and hasattr(obj, "schema"): - del lines[:] - lines.extend(get_schema_doc(obj.schema).split('\n')) - -def setup(app): - app.connect('autodoc-process-docstring', generate_docstring_from_schema) diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py index 81fc930e..3bb0ee9b 100644 --- a/tests/unittests/cmd/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -137,7 +137,7 @@ class TestClean(CiTestCase): clean.remove_artifacts, remove_logs=False) self.assertEqual(1, retcode) self.assertEqual( - 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + 'Error:\nCould not remove %s/dir1: oops\n' % self.artifact_dir, m_stderr.getvalue()) def test_handle_clean_args_reboots(self): diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py index 12fc80e8..9a010402 100644 --- a/tests/unittests/cmd/test_cloud_id.py +++ b/tests/unittests/cmd/test_cloud_id.py @@ -51,7 +51,7 @@ class TestCloudId(CiTestCase): cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( - "ERROR: File not found '%s'" % self.instance_data, + "Error:\nFile not found '%s'" % self.instance_data, m_stderr.getvalue()) def test_cloud_id_non_json_instance_data(self): @@ -64,7 +64,7 @@ class TestCloudId(CiTestCase): cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( - "ERROR: File '%s' is not valid json." % self.instance_data, + "Error:\nFile '%s' is not valid json." % self.instance_data, m_stderr.getvalue()) def test_cloud_id_from_cloud_name_in_instance_data(self): diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index b01f5eea..f90e0f62 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -1,13 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -import cloudinit -from cloudinit.config.schema import ( - CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, - get_schema_doc, get_schema, validate_cloudconfig_file, - validate_cloudconfig_schema, main) -from cloudinit.util import write_file -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +import importlib +import sys +import inspect +import logging from copy import copy import itertools import pytest @@ -15,6 +12,63 @@ from pathlib import Path from textwrap import dedent from yaml import safe_load +import cloudinit +from cloudinit.config.schema import ( + CLOUD_CONFIG_HEADER, + SchemaValidationError, + annotated_cloudconfig_file, + get_meta_doc, + get_schema, + get_jsonschema_validator, + validate_cloudconfig_file, + validate_cloudconfig_metaschema, + validate_cloudconfig_schema, + main, + MetaSchema, +) +from cloudinit.util import write_file +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema + + +def get_schemas() -> dict: + """Return all module schemas + + Assumes that module schemas have the variable name "schema" + """ + return get_module_variable("schema") + + +def get_metas() -> dict: + """Return all module metas + + Assumes that module schemas have the variable name "schema" + """ + return get_module_variable("meta") + + +def get_module_variable(var_name) -> dict: + """Inspect modules and get variable from module matching var_name""" + schemas = {} + + files = list(Path("../../cloudinit/config/").glob("cc_*.py")) + modules = [mod.stem for mod in files] + + for module in modules: + importlib.import_module("cloudinit.config.{}".format(module)) + + for k, v in sys.modules.items(): + path = Path(k) + + if "cloudinit.config" == path.stem and path.suffix[1:4] == "cc_": + module_name = path.suffix[1:] + members = inspect.getmembers(v) + schemas[module_name] = None + for name, value in members: + if name == var_name: + schemas[module_name] = value + break + return schemas + class GetSchemaTest(CiTestCase): @@ -34,25 +88,17 @@ class GetSchemaTest(CiTestCase): 'cc_ubuntu_advantage', 'cc_ubuntu_drivers', 'cc_write_files', - 'cc_write_files_deferred', 'cc_zypper_add_repo', 'cc_chef', 'cc_install_hotplug', ], - [subschema['id'] for subschema in schema['allOf']]) - self.assertEqual('cloud-config-schema', schema['id']) + [meta["id"] for meta in get_metas().values() if meta is not None], + ) + self.assertEqual("cloud-config-schema", schema["id"]) self.assertEqual( - 'http://json-schema.org/draft-04/schema#', - schema['$schema']) - # FULL_SCHEMA is updated by the get_schema call - from cloudinit.config.schema import FULL_SCHEMA - self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys()) - - def test_get_schema_returns_global_when_set(self): - """When FULL_SCHEMA global is already set, get_schema returns it.""" - m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA' - with mock.patch(m_schema_path, {'here': 'iam'}): - self.assertEqual({'here': 'iam'}, get_schema()) + "http://json-schema.org/draft-04/schema#", schema["$schema"] + ) + self.assertCountEqual(["id", "$schema", "allOf"], get_schema().keys()) class SchemaValidationErrorTest(CiTestCase): @@ -93,8 +139,9 @@ class ValidateCloudConfigSchemaTest(CiTestCase): with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): validate_cloudconfig_schema({'p1': -1}, schema, strict=True) self.assertIn( - 'Ignoring schema validation. python-jsonschema is not present', - self.logs.getvalue()) + "Ignoring schema validation. jsonschema is not present", + self.logs.getvalue(), + ) @skipUnlessJsonSchema() def test_validateconfig_schema_strict_raises_errors(self): @@ -117,14 +164,48 @@ class ValidateCloudConfigSchemaTest(CiTestCase): "Cloud config schema errors: p1: '-1' is not a 'email'", str(context_mgr.exception)) + @skipUnlessJsonSchema() + def test_validateconfig_schema_honors_formats_strict_metaschema(self): + """With strict True and strict_metascheam True, ensure errors on format + """ + schema = {"properties": {"p1": {"type": "string", "format": "email"}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema( + {"p1": "-1"}, schema, strict=True, strict_metaschema=True + ) + self.assertEqual( + "Cloud config schema errors: p1: '-1' is not a 'email'", + str(context_mgr.exception), + ) + + @skipUnlessJsonSchema() + def test_validateconfig_strict_metaschema_do_not_raise_exception(self): + """With strict_metaschema=True, do not raise exceptions. + + This flag is currently unused, but is intended for run-time validation. + This should warn, but not raise. + """ + schema = {"properties": {"p1": {"types": "string", "format": "email"}}} + validate_cloudconfig_schema( + {"p1": "-1"}, schema, strict_metaschema=True + ) + assert ( + "Meta-schema validation failed, attempting to validate config" + in self.logs.getvalue() + ) + class TestCloudConfigExamples: - schema = get_schema() + schema = get_schemas() + metas = get_metas() params = [ - (schema["id"], example) - for schema in schema["allOf"] for example in schema["examples"]] + (meta["id"], example) + for meta in metas.values() + if meta and meta.get("examples") + for example in meta.get("examples") + ] - @pytest.mark.parametrize("schema_id,example", params) + @pytest.mark.parametrize("schema_id, example", params) @skipUnlessJsonSchema() def test_validateconfig_schema_of_example(self, schema_id, example): """ For a given example in a config module we test if it is valid @@ -201,22 +282,42 @@ class ValidateCloudConfigFileTest(CiTestCase): class GetSchemaDocTest(CiTestCase): - """Tests for get_schema_doc.""" + """Tests for get_meta_doc.""" def setUp(self): super(GetSchemaDocTest, self).setUp() self.required_schema = { - 'title': 'title', 'description': 'description', 'id': 'id', - 'name': 'name', 'frequency': 'frequency', - 'distros': ['debian', 'rhel']} + "title": "title", + "description": "description", + "id": "id", + "name": "name", + "frequency": "frequency", + "distros": ["debian", "rhel"], + } + self.meta = MetaSchema( + { + "title": "title", + "description": "description", + "id": "id", + "name": "name", + "frequency": "frequency", + "distros": ["debian", "rhel"], + "examples": [ + 'ex1:\n [don\'t, expand, "this"]', + "ex2: true", + ], + } + ) - def test_get_schema_doc_returns_restructured_text(self): - """get_schema_doc returns restructured text for a cloudinit schema.""" + def test_get_meta_doc_returns_restructured_text(self): + """get_meta_doc returns restructured text for a cloudinit schema.""" full_schema = copy(self.required_schema) full_schema.update( {'properties': { 'prop1': {'type': 'array', 'description': 'prop-description', 'items': {'type': 'integer'}}}}) + + doc = get_meta_doc(self.meta, full_schema) self.assertEqual( dedent(""" name @@ -232,47 +333,51 @@ class GetSchemaDocTest(CiTestCase): **Supported distros:** debian, rhel **Config schema**: - **prop1:** (array of integer) prop-description\n\n"""), - get_schema_doc(full_schema)) + **prop1:** (array of integer) prop-description - def test_get_schema_doc_handles_multiple_types(self): - """get_schema_doc delimits multiple property types with a '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': ['string', 'integer'], - 'description': 'prop-description'}}}) + **Examples**:: + + ex1: + [don't, expand, "this"] + # --- Example2 --- + ex2: true + """), + doc, + ) + + def test_get_meta_doc_handles_multiple_types(self): + """get_meta_doc delimits multiple property types with a '/'.""" + schema = {"properties": {"prop1": {"type": ["string", "integer"]}}} self.assertIn( - '**prop1:** (string/integer) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (string/integer)", get_meta_doc(self.meta, schema) + ) - def test_get_schema_doc_handles_enum_types(self): - """get_schema_doc converts enum types to yaml and delimits with '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'enum': [True, False, 'stuff'], - 'description': 'prop-description'}}}) + def test_get_meta_doc_handles_enum_types(self): + """get_meta_doc converts enum types to yaml and delimits with '/'.""" + schema = {"properties": {"prop1": {"enum": [True, False, "stuff"]}}} self.assertIn( - '**prop1:** (true/false/stuff) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (true/false/stuff)", get_meta_doc(self.meta, schema) + ) - def test_get_schema_doc_handles_nested_oneof_property_types(self): - """get_schema_doc describes array items oneOf declarations in type.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', - 'items': { - 'oneOf': [{'type': 'string'}, - {'type': 'integer'}]}, - 'description': 'prop-description'}}}) + def test_get_meta_doc_handles_nested_oneof_property_types(self): + """get_meta_doc describes array items oneOf declarations in type.""" + schema = { + "properties": { + "prop1": { + "type": "array", + "items": { + "oneOf": [{"type": "string"}, {"type": "integer"}] + }, + } + } + } self.assertIn( - '**prop1:** (array of (string)/(integer)) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (array of (string)/(integer))", + get_meta_doc(self.meta, schema), + ) - def test_get_schema_doc_handles_string_examples(self): - """get_schema_doc properly indented examples as a list of strings.""" + def test_get_meta_doc_handles_string_examples(self): + """get_meta_doc properly indented examples as a list of strings.""" full_schema = copy(self.required_schema) full_schema.update( {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], @@ -291,16 +396,17 @@ class GetSchemaDocTest(CiTestCase): # --- Example2 --- ex2: true """), - get_schema_doc(full_schema)) + get_meta_doc(self.meta, full_schema), + ) - def test_get_schema_doc_properly_parse_description(self): - """get_schema_doc description properly formatted""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'p1': { - 'type': 'string', - 'description': dedent("""\ + def test_get_meta_doc_properly_parse_description(self): + """get_meta_doc description properly formatted""" + schema = { + "properties": { + "p1": { + "type": "string", + "description": dedent( + """\ This item has the following options: @@ -312,8 +418,8 @@ class GetSchemaDocTest(CiTestCase): The default value is option1""") } - }} - ) + } + } self.assertIn( dedent(""" @@ -325,16 +431,28 @@ class GetSchemaDocTest(CiTestCase): - option3 The default value is option1 + """), - get_schema_doc(full_schema)) + get_meta_doc(self.meta, schema), + ) - def test_get_schema_doc_raises_key_errors(self): - """get_schema_doc raises KeyErrors on missing keys.""" - for key in self.required_schema: - invalid_schema = copy(self.required_schema) - invalid_schema.pop(key) + def test_get_meta_doc_raises_key_errors(self): + """get_meta_doc raises KeyErrors on missing keys.""" + schema = { + "properties": { + "prop1": { + "type": "array", + "items": { + "oneOf": [{"type": "string"}, {"type": "integer"}] + }, + } + } + } + for key in self.meta: + invalid_meta = copy(self.meta) + invalid_meta.pop(key) with self.assertRaises(KeyError) as context_mgr: - get_schema_doc(invalid_schema) + get_meta_doc(invalid_meta, schema) self.assertIn(key, str(context_mgr.exception)) @@ -418,6 +536,7 @@ class TestMain: _out, err = capsys.readouterr() expected = ( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n' ) assert expected == err @@ -431,6 +550,7 @@ class TestMain: _out, err = capsys.readouterr() expected = ( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n' ) assert expected == err @@ -443,7 +563,7 @@ class TestMain: main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() - assert 'Configfile NOT_A_FILE does not exist\n' == err + assert 'Error:\nConfigfile NOT_A_FILE does not exist\n' == err def test_main_prints_docs(self, capsys): """When --docs parameter is provided, main generates documentation.""" @@ -489,12 +609,13 @@ class TestMain: assert 1 == context_manager.value.code _out, err = capsys.readouterr() expected = ( - 'Unable to read system userdata as non-root user. Try using sudo\n' + 'Error:\nUnable to read system userdata as non-root user. ' + 'Try using sudo\n' ) assert expected == err -def _get_schema_doc_examples(): +def _get_meta_doc_examples(): examples_dir = Path( cloudinit.__file__).parent.parent / 'doc' / 'examples' assert examples_dir.is_dir() @@ -507,9 +628,49 @@ def _get_schema_doc_examples(): class TestSchemaDocExamples: schema = get_schema() - @pytest.mark.parametrize("example_path", _get_schema_doc_examples()) + @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() def test_schema_doc_examples(self, example_path): validate_cloudconfig_file(str(example_path), self.schema) + +class TestStrictMetaschema: + """Validate that schemas follow a stricter metaschema definition than + the default. This disallows arbitrary key/value pairs. + """ + + @skipUnlessJsonSchema() + def test_modules(self): + """Validate all modules with a stricter metaschema""" + (validator, _) = get_jsonschema_validator() + for (name, value) in get_schemas().items(): + if value: + validate_cloudconfig_metaschema(validator, value) + else: + logging.warning("module %s has no schema definition", name) + + @skipUnlessJsonSchema() + def test_validate_bad_module(self): + """Throw exception by default, don't throw if throw=False + + item should be 'items' and is therefore interpreted as an additional + property which is invalid with a strict metaschema + """ + (validator, _) = get_jsonschema_validator() + schema = { + "type": "array", + "item": { + "type": "object", + }, + } + with pytest.raises( + SchemaValidationError, + match=(r"Additional properties are not allowed.*") + ): + + validate_cloudconfig_metaschema(validator, schema) + + validate_cloudconfig_metaschema(validator, schema, throw=False) + + # vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fd717f34..d0162673 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import contextlib import io from collections import namedtuple @@ -214,26 +215,106 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self.assertEqual(1, exit_code) # Known whitebox output from schema subcommand self.assertEqual( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n', self.stderr.getvalue()) - def test_wb_devel_schema_subcommand_doc_content(self): - """Validate that doc content is sane from known examples.""" + def test_wb_devel_schema_subcommand_doc_all_spot_check(self): + """Validate that doc content has correct values from known examples. + + Ensure that schema doc is returned + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager stdout = io.StringIO() - self.patchStdoutAndStderr(stdout=stdout) - self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) - expected_doc_sections = [ - '**Supported distros:** all', - ('**Supported distros:** almalinux, alpine, centos, cloudlinux, ' - 'debian, eurolinux, fedora, openEuler, opensuse, photon, rhel, ' - 'rocky, sles, ubuntu, virtuozzo'), - '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', - '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' - ] + with contextlib.redirect_stdout(stdout): + self._call_main(["cloud-init", "devel", "schema", "--docs", "all"]) + expected_doc_sections = [ + "**Supported distros:** all", + ( + "**Supported distros:** almalinux, alpine, centos, " + "cloudlinux, debian, eurolinux, fedora, openEuler, " + "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo" + ), + "**Config schema**:\n **resize_rootfs:** " + "(true/false/noblock)", + "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n", + ] + stdout = stdout.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stdout) + + def test_wb_devel_schema_subcommand_single_spot_check(self): + """Validate that doc content has correct values from known example. + + Validate 'all' arg + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + self._call_main( + ["cloud-init", "devel", "schema", "--docs", "cc_runcmd"] + ) + expected_doc_sections = [ + "Runcmd\n------\n**Summary:** Run arbitrary commands" + ] stdout = stdout.getvalue() for expected in expected_doc_sections: self.assertIn(expected, stdout) + def test_wb_devel_schema_subcommand_multiple_spot_check(self): + """Validate that doc content has correct values from known example. + + Validate single arg + """ + + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + self._call_main( + [ + "cloud-init", + "devel", + "schema", + "--docs", + "cc_runcmd", + "cc_resizefs", + ] + ) + expected_doc_sections = [ + "Runcmd\n------\n**Summary:** Run arbitrary commands", + "Resizefs\n--------\n**Summary:** Resize filesystem", + ] + stdout = stdout.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stdout) + + def test_wb_devel_schema_subcommand_bad_arg_fails(self): + """Validate that doc content has correct values from known example. + + Validate multiple args + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager + stderr = io.StringIO() + with contextlib.redirect_stderr(stderr): + self._call_main( + ["cloud-init", "devel", "schema", "--docs", "garbage_value"] + ) + expected_doc_sections = ["Invalid --docs value"] + stderr = stderr.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stderr) + @mock.patch('cloudinit.cmd.main.main_single') def test_single_subcommand(self, m_main_single): """The subcommand 'single' calls main_single with valid args.""" -- cgit v1.2.3 From bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 15 Dec 2021 20:16:38 -0600 Subject: Adopt Black and isort (SC-700) (#1157) Applied Black and isort, fixed any linting issues, updated tox.ini and CI. --- .travis.yml | 4 + CONTRIBUTING.rst | 5 + cloudinit/analyze/__main__.py | 269 +- cloudinit/analyze/dump.py | 71 +- cloudinit/analyze/show.py | 192 +- cloudinit/apport.py | 153 +- cloudinit/atomic_helper.py | 25 +- cloudinit/cloud.py | 14 +- cloudinit/cmd/clean.py | 59 +- cloudinit/cmd/cloud_id.py | 68 +- cloudinit/cmd/devel/__init__.py | 3 +- cloudinit/cmd/devel/hotplug_hook.py | 138 +- cloudinit/cmd/devel/logs.py | 120 +- cloudinit/cmd/devel/make_mime.py | 76 +- cloudinit/cmd/devel/net_convert.py | 145 +- cloudinit/cmd/devel/parser.py | 48 +- cloudinit/cmd/devel/render.py | 54 +- cloudinit/cmd/main.py | 595 ++- cloudinit/cmd/query.py | 170 +- cloudinit/cmd/status.py | 101 +- cloudinit/config/__init__.py | 20 +- cloudinit/config/cc_apk_configure.py | 195 +- cloudinit/config/cc_apt_configure.py | 618 +-- cloudinit/config/cc_apt_pipelining.py | 13 +- cloudinit/config/cc_bootcmd.py | 65 +- cloudinit/config/cc_byobu.py | 27 +- cloudinit/config/cc_ca_certs.py | 84 +- cloudinit/config/cc_chef.py | 659 +-- cloudinit/config/cc_debug.py | 21 +- cloudinit/config/cc_disable_ec2_metadata.py | 25 +- cloudinit/config/cc_disk_setup.py | 334 +- cloudinit/config/cc_emit_upstart.py | 24 +- cloudinit/config/cc_fan.py | 34 +- cloudinit/config/cc_final_message.py | 24 +- cloudinit/config/cc_foo.py | 1 + cloudinit/config/cc_growpart.py | 134 +- cloudinit/config/cc_grub_dpkg.py | 54 +- cloudinit/config/cc_install_hotplug.py | 48 +- cloudinit/config/cc_keys_to_console.py | 36 +- cloudinit/config/cc_landscape.py | 24 +- cloudinit/config/cc_locale.py | 51 +- cloudinit/config/cc_lxd.py | 186 +- cloudinit/config/cc_mcollective.py | 50 +- cloudinit/config/cc_migrator.py | 25 +- cloudinit/config/cc_mounts.py | 172 +- cloudinit/config/cc_ntp.py | 546 ++- .../config/cc_package_update_upgrade_install.py | 30 +- cloudinit/config/cc_phone_home.py | 98 +- cloudinit/config/cc_power_state_change.py | 58 +- cloudinit/config/cc_puppet.py | 194 +- cloudinit/config/cc_refresh_rmc_and_interface.py | 51 +- cloudinit/config/cc_reset_rmc.py | 43 +- cloudinit/config/cc_resizefs.py | 183 +- cloudinit/config/cc_resolv_conf.py | 41 +- cloudinit/config/cc_rh_subscription.py | 240 +- cloudinit/config/cc_rightscale_userdata.py | 31 +- cloudinit/config/cc_rsyslog.py | 86 +- cloudinit/config/cc_runcmd.py | 70 +- cloudinit/config/cc_salt_minion.py | 69 +- cloudinit/config/cc_scripts_per_boot.py | 14 +- cloudinit/config/cc_scripts_per_instance.py | 14 +- cloudinit/config/cc_scripts_per_once.py | 14 +- cloudinit/config/cc_scripts_user.py | 12 +- cloudinit/config/cc_scripts_vendor.py | 22 +- cloudinit/config/cc_seed_random.py | 41 +- cloudinit/config/cc_set_hostname.py | 30 +- cloudinit/config/cc_set_passwords.py | 65 +- cloudinit/config/cc_snap.py | 168 +- cloudinit/config/cc_spacewalk.py | 67 +- cloudinit/config/cc_ssh.py | 106 +- cloudinit/config/cc_ssh_authkey_fingerprints.py | 73 +- cloudinit/config/cc_ssh_import_id.py | 23 +- cloudinit/config/cc_timezone.py | 2 +- cloudinit/config/cc_ubuntu_advantage.py | 154 +- cloudinit/config/cc_ubuntu_drivers.py | 133 +- cloudinit/config/cc_update_etc_hosts.py | 42 +- cloudinit/config/cc_update_hostname.py | 25 +- cloudinit/config/cc_users_groups.py | 39 +- cloudinit/config/cc_write_files.py | 242 +- cloudinit/config/cc_write_files_deferred.py | 22 +- cloudinit/config/cc_yum_add_repo.py | 65 +- cloudinit/config/cc_zypper_add_repo.py | 159 +- cloudinit/config/schema.py | 239 +- cloudinit/cs_utils.py | 20 +- cloudinit/dhclient_hook.py | 21 +- cloudinit/distros/__init__.py | 420 +- cloudinit/distros/almalinux.py | 1 + cloudinit/distros/alpine.py | 45 +- cloudinit/distros/amazon.py | 1 - cloudinit/distros/arch.py | 147 +- cloudinit/distros/bsd.py | 66 +- cloudinit/distros/bsd_utils.py | 18 +- cloudinit/distros/centos.py | 1 + cloudinit/distros/cloudlinux.py | 1 + cloudinit/distros/debian.py | 168 +- cloudinit/distros/dragonflybsd.py | 2 +- cloudinit/distros/eurolinux.py | 1 + cloudinit/distros/fedora.py | 1 + cloudinit/distros/freebsd.py | 93 +- cloudinit/distros/gentoo.py | 140 +- cloudinit/distros/miraclelinux.py | 2 + cloudinit/distros/net_util.py | 68 +- cloudinit/distros/netbsd.py | 85 +- cloudinit/distros/networking.py | 13 +- cloudinit/distros/openEuler.py | 1 + cloudinit/distros/openbsd.py | 20 +- cloudinit/distros/opensuse.py | 119 +- cloudinit/distros/parsers/__init__.py | 3 +- cloudinit/distros/parsers/hostname.py | 24 +- cloudinit/distros/parsers/hosts.py | 24 +- cloudinit/distros/parsers/networkmanager_conf.py | 6 +- cloudinit/distros/parsers/resolv_conf.py | 73 +- cloudinit/distros/parsers/sys_conf.py | 38 +- cloudinit/distros/photon.py | 86 +- cloudinit/distros/rhel.py | 76 +- cloudinit/distros/rhel_util.py | 4 +- cloudinit/distros/rocky.py | 1 + cloudinit/distros/sles.py | 1 + cloudinit/distros/ubuntu.py | 33 +- cloudinit/distros/ug_util.py | 106 +- cloudinit/distros/virtuozzo.py | 1 + cloudinit/dmi.py | 68 +- cloudinit/ec2_utils.py | 165 +- cloudinit/event.py | 8 +- cloudinit/filters/launch_index.py | 12 +- cloudinit/gpg.py | 48 +- cloudinit/handlers/__init__.py | 152 +- cloudinit/handlers/boot_hook.py | 21 +- cloudinit/handlers/cloud_config.py | 29 +- cloudinit/handlers/jinja_template.py | 87 +- cloudinit/handlers/shell_script.py | 15 +- cloudinit/handlers/upstart_job.py | 22 +- cloudinit/helpers.py | 111 +- cloudinit/importer.py | 3 +- cloudinit/log.py | 21 +- cloudinit/mergers/__init__.py | 43 +- cloudinit/mergers/m_dict.py | 34 +- cloudinit/mergers/m_list.py | 37 +- cloudinit/mergers/m_str.py | 5 +- cloudinit/net/__init__.py | 579 ++- cloudinit/net/activators.py | 87 +- cloudinit/net/bsd.py | 112 +- cloudinit/net/cmdline.py | 97 +- cloudinit/net/dhcp.py | 194 +- cloudinit/net/eni.py | 454 +- cloudinit/net/freebsd.py | 44 +- cloudinit/net/netbsd.py | 27 +- cloudinit/net/netplan.py | 313 +- cloudinit/net/network_state.py | 734 +-- cloudinit/net/networkd.py | 208 +- cloudinit/net/openbsd.py | 33 +- cloudinit/net/renderer.py | 31 +- cloudinit/net/renderers.py | 40 +- cloudinit/net/sysconfig.py | 886 ++-- cloudinit/net/udev.py | 23 +- cloudinit/netinfo.py | 403 +- cloudinit/patcher.py | 9 +- cloudinit/registry.py | 4 +- cloudinit/reporting/__init__.py | 9 +- cloudinit/reporting/events.py | 97 +- cloudinit/reporting/handlers.py | 128 +- cloudinit/safeyaml.py | 25 +- cloudinit/serial.py | 25 +- cloudinit/settings.py | 82 +- cloudinit/signal_handler.py | 12 +- cloudinit/simpletable.py | 26 +- cloudinit/sources/DataSourceAliYun.py | 18 +- cloudinit/sources/DataSourceAltCloud.py | 113 +- cloudinit/sources/DataSourceAzure.py | 1350 +++--- cloudinit/sources/DataSourceBigstep.py | 9 +- cloudinit/sources/DataSourceCloudSigma.py | 39 +- cloudinit/sources/DataSourceCloudStack.py | 135 +- cloudinit/sources/DataSourceConfigDrive.py | 117 +- cloudinit/sources/DataSourceDigitalOcean.py | 65 +- cloudinit/sources/DataSourceEc2.py | 461 +- cloudinit/sources/DataSourceExoscale.py | 171 +- cloudinit/sources/DataSourceGCE.py | 221 +- cloudinit/sources/DataSourceHetzner.py | 74 +- cloudinit/sources/DataSourceIBMCloud.py | 128 +- cloudinit/sources/DataSourceLXD.py | 61 +- cloudinit/sources/DataSourceMAAS.py | 180 +- cloudinit/sources/DataSourceNoCloud.py | 154 +- cloudinit/sources/DataSourceNone.py | 15 +- cloudinit/sources/DataSourceOVF.py | 311 +- cloudinit/sources/DataSourceOpenNebula.py | 190 +- cloudinit/sources/DataSourceOpenStack.py | 129 +- cloudinit/sources/DataSourceOracle.py | 125 +- cloudinit/sources/DataSourceRbxCloud.py | 194 +- cloudinit/sources/DataSourceScaleway.py | 131 +- cloudinit/sources/DataSourceSmartOS.py | 555 ++- cloudinit/sources/DataSourceUpCloud.py | 7 +- cloudinit/sources/DataSourceVMware.py | 13 +- cloudinit/sources/DataSourceVultr.py | 86 +- cloudinit/sources/__init__.py | 385 +- cloudinit/sources/helpers/azure.py | 693 +-- cloudinit/sources/helpers/digitalocean.py | 195 +- cloudinit/sources/helpers/hetzner.py | 15 +- cloudinit/sources/helpers/netlink.py | 187 +- cloudinit/sources/helpers/openstack.py | 438 +- cloudinit/sources/helpers/upcloud.py | 12 +- cloudinit/sources/helpers/vmware/imc/boot_proto.py | 5 +- cloudinit/sources/helpers/vmware/imc/config.py | 59 +- .../helpers/vmware/imc/config_custom_script.py | 45 +- .../sources/helpers/vmware/imc/config_file.py | 7 +- .../sources/helpers/vmware/imc/config_namespace.py | 1 + cloudinit/sources/helpers/vmware/imc/config_nic.py | 84 +- .../sources/helpers/vmware/imc/config_passwd.py | 38 +- .../sources/helpers/vmware/imc/config_source.py | 1 + .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_event.py | 1 + .../sources/helpers/vmware/imc/guestcust_state.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 46 +- cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 11 +- cloudinit/sources/helpers/vmware/imc/nic.py | 33 +- cloudinit/sources/helpers/vmware/imc/nic_base.py | 29 +- cloudinit/sources/helpers/vultr.py | 172 +- cloudinit/ssh_util.py | 172 +- cloudinit/stages.py | 649 +-- cloudinit/subp.py | 165 +- cloudinit/temp_utils.py | 20 +- cloudinit/templater.py | 96 +- cloudinit/type_utils.py | 4 +- cloudinit/url_helper.py | 273 +- cloudinit/user_data.py | 121 +- cloudinit/util.py | 873 ++-- cloudinit/version.py | 9 +- cloudinit/warnings.py | 21 +- conftest.py | 5 +- doc/rtd/conf.py | 30 +- pyproject.toml | 8 + setup.py | 263 +- tests/integration_tests/__init__.py | 8 +- tests/integration_tests/bugs/test_gh570.py | 13 +- tests/integration_tests/bugs/test_gh626.py | 25 +- tests/integration_tests/bugs/test_gh632.py | 20 +- tests/integration_tests/bugs/test_gh668.py | 15 +- tests/integration_tests/bugs/test_gh671.py | 35 +- tests/integration_tests/bugs/test_gh868.py | 3 +- tests/integration_tests/bugs/test_lp1813396.py | 3 +- tests/integration_tests/bugs/test_lp1835584.py | 19 +- tests/integration_tests/bugs/test_lp1886531.py | 2 - tests/integration_tests/bugs/test_lp1897099.py | 13 +- tests/integration_tests/bugs/test_lp1898997.py | 14 +- tests/integration_tests/bugs/test_lp1900837.py | 2 +- tests/integration_tests/bugs/test_lp1901011.py | 49 +- tests/integration_tests/bugs/test_lp1910835.py | 1 - tests/integration_tests/bugs/test_lp1912844.py | 4 +- tests/integration_tests/clouds.py | 163 +- tests/integration_tests/conftest.py | 130 +- .../datasources/test_lxd_discovery.py | 43 +- .../datasources/test_network_dependency.py | 17 +- tests/integration_tests/instances.py | 83 +- tests/integration_tests/integration_settings.py | 13 +- tests/integration_tests/modules/test_apt.py | 88 +- tests/integration_tests/modules/test_ca_certs.py | 1 - tests/integration_tests/modules/test_cli.py | 9 +- tests/integration_tests/modules/test_combined.py | 155 +- .../modules/test_command_output.py | 5 +- tests/integration_tests/modules/test_disk_setup.py | 76 +- tests/integration_tests/modules/test_growpart.py | 38 +- tests/integration_tests/modules/test_hotplug.py | 55 +- .../modules/test_jinja_templating.py | 11 +- .../modules/test_keys_to_console.py | 9 +- tests/integration_tests/modules/test_lxd_bridge.py | 2 - .../integration_tests/modules/test_ntp_servers.py | 30 +- .../modules/test_package_update_upgrade_install.py | 18 +- .../integration_tests/modules/test_persistence.py | 26 +- .../modules/test_power_state_change.py | 48 +- tests/integration_tests/modules/test_puppet.py | 6 +- .../integration_tests/modules/test_set_hostname.py | 10 +- .../integration_tests/modules/test_set_password.py | 15 +- .../modules/test_ssh_auth_key_fingerprints.py | 13 +- .../integration_tests/modules/test_ssh_generate.py | 16 +- .../modules/test_ssh_keys_provided.py | 58 +- .../integration_tests/modules/test_ssh_keysfile.py | 159 +- .../integration_tests/modules/test_user_events.py | 50 +- .../integration_tests/modules/test_users_groups.py | 21 +- .../modules/test_version_change.py | 45 +- .../integration_tests/modules/test_write_files.py | 32 +- tests/integration_tests/test_upgrade.py | 120 +- tests/integration_tests/util.py | 39 +- tests/unittests/__init__.py | 1 + tests/unittests/analyze/test_boot.py | 135 +- tests/unittests/analyze/test_dump.py | 213 +- tests/unittests/cmd/devel/test_hotplug_hook.py | 162 +- tests/unittests/cmd/devel/test_logs.py | 232 +- tests/unittests/cmd/devel/test_render.py | 152 +- tests/unittests/cmd/test_clean.py | 179 +- tests/unittests/cmd/test_cloud_id.py | 99 +- tests/unittests/cmd/test_main.py | 223 +- tests/unittests/cmd/test_query.py | 403 +- tests/unittests/cmd/test_status.py | 561 ++- tests/unittests/config/test_apt_conf_v1.py | 68 +- .../config/test_apt_configure_sources_list_v1.py | 131 +- .../config/test_apt_configure_sources_list_v3.py | 158 +- tests/unittests/config/test_apt_key.py | 117 +- tests/unittests/config/test_apt_source_v1.py | 765 ++-- tests/unittests/config/test_apt_source_v3.py | 1220 +++-- tests/unittests/config/test_cc_apk_configure.py | 148 +- tests/unittests/config/test_cc_apt_pipelining.py | 12 +- tests/unittests/config/test_cc_bootcmd.py | 100 +- tests/unittests/config/test_cc_ca_certs.py | 220 +- tests/unittests/config/test_cc_chef.py | 202 +- tests/unittests/config/test_cc_debug.py | 39 +- .../config/test_cc_disable_ec2_metadata.py | 44 +- tests/unittests/config/test_cc_disk_setup.py | 270 +- tests/unittests/config/test_cc_growpart.py | 232 +- tests/unittests/config/test_cc_grub_dpkg.py | 121 +- tests/unittests/config/test_cc_install_hotplug.py | 58 +- tests/unittests/config/test_cc_keys_to_console.py | 18 +- tests/unittests/config/test_cc_landscape.py | 178 +- tests/unittests/config/test_cc_locale.py | 99 +- tests/unittests/config/test_cc_lxd.py | 250 +- tests/unittests/config/test_cc_mcollective.py | 104 +- tests/unittests/config/test_cc_mounts.py | 449 +- tests/unittests/config/test_cc_ntp.py | 682 +-- .../unittests/config/test_cc_power_state_change.py | 74 +- tests/unittests/config/test_cc_puppet.py | 432 +- .../config/test_cc_refresh_rmc_and_interface.py | 162 +- tests/unittests/config/test_cc_resizefs.py | 436 +- tests/unittests/config/test_cc_resolv_conf.py | 76 +- tests/unittests/config/test_cc_rh_subscription.py | 366 +- tests/unittests/config/test_cc_rsyslog.py | 112 +- tests/unittests/config/test_cc_runcmd.py | 74 +- tests/unittests/config/test_cc_seed_random.py | 158 +- tests/unittests/config/test_cc_set_hostname.py | 185 +- tests/unittests/config/test_cc_set_passwords.py | 111 +- tests/unittests/config/test_cc_snap.py | 445 +- tests/unittests/config/test_cc_spacewalk.py | 36 +- tests/unittests/config/test_cc_ssh.py | 356 +- tests/unittests/config/test_cc_timezone.py | 31 +- tests/unittests/config/test_cc_ubuntu_advantage.py | 311 +- tests/unittests/config/test_cc_ubuntu_drivers.py | 213 +- tests/unittests/config/test_cc_update_etc_hosts.py | 63 +- tests/unittests/config/test_cc_users_groups.py | 264 +- tests/unittests/config/test_cc_write_files.py | 148 +- .../config/test_cc_write_files_deferred.py | 62 +- tests/unittests/config/test_cc_yum_add_repo.py | 105 +- tests/unittests/config/test_cc_zypper_add_repo.py | 166 +- tests/unittests/config/test_schema.py | 301 +- tests/unittests/distros/__init__.py | 10 +- tests/unittests/distros/test_arch.py | 50 +- tests/unittests/distros/test_bsd_utils.py | 49 +- tests/unittests/distros/test_create_users.py | 252 +- tests/unittests/distros/test_debian.py | 155 +- tests/unittests/distros/test_freebsd.py | 28 +- tests/unittests/distros/test_generic.py | 300 +- tests/unittests/distros/test_gentoo.py | 11 +- tests/unittests/distros/test_hostname.py | 16 +- tests/unittests/distros/test_hosts.py | 36 +- tests/unittests/distros/test_init.py | 273 +- tests/unittests/distros/test_manage_service.py | 33 +- tests/unittests/distros/test_netbsd.py | 11 +- tests/unittests/distros/test_netconfig.py | 605 ++- tests/unittests/distros/test_networking.py | 30 +- tests/unittests/distros/test_opensuse.py | 3 +- tests/unittests/distros/test_photon.py | 42 +- tests/unittests/distros/test_resolv.py | 55 +- tests/unittests/distros/test_sles.py | 3 +- tests/unittests/distros/test_sysconfig.py | 62 +- .../unittests/distros/test_user_data_normalize.py | 383 +- tests/unittests/filters/test_launch_index.py | 23 +- tests/unittests/helpers.py | 191 +- tests/unittests/net/test_dhcp.py | 678 +-- tests/unittests/net/test_init.py | 1368 +++--- tests/unittests/net/test_network_state.py | 82 +- tests/unittests/net/test_networkd.py | 2 +- tests/unittests/runs/test_merge_run.py | 49 +- tests/unittests/runs/test_simple_run.py | 132 +- tests/unittests/sources/helpers/test_netlink.py | 357 +- tests/unittests/sources/helpers/test_openstack.py | 51 +- tests/unittests/sources/test_aliyun.py | 217 +- tests/unittests/sources/test_altcloud.py | 311 +- tests/unittests/sources/test_azure.py | 3174 +++++++------ tests/unittests/sources/test_azure_helper.py | 1138 +++-- tests/unittests/sources/test_cloudsigma.py | 72 +- tests/unittests/sources/test_cloudstack.py | 121 +- tests/unittests/sources/test_common.py | 86 +- tests/unittests/sources/test_configdrive.py | 1100 +++-- tests/unittests/sources/test_digitalocean.py | 283 +- tests/unittests/sources/test_ec2.py | 851 ++-- tests/unittests/sources/test_exoscale.py | 248 +- tests/unittests/sources/test_gce.py | 304 +- tests/unittests/sources/test_hetzner.py | 85 +- tests/unittests/sources/test_ibmcloud.py | 299 +- tests/unittests/sources/test_init.py | 879 ++-- tests/unittests/sources/test_lxd.py | 134 +- tests/unittests/sources/test_maas.py | 147 +- tests/unittests/sources/test_nocloud.py | 320 +- tests/unittests/sources/test_opennebula.py | 888 ++-- tests/unittests/sources/test_openstack.py | 652 +-- tests/unittests/sources/test_oracle.py | 412 +- tests/unittests/sources/test_ovf.py | 1053 +++-- tests/unittests/sources/test_rbx.py | 215 +- tests/unittests/sources/test_scaleway.py | 481 +- tests/unittests/sources/test_smartos.py | 956 ++-- tests/unittests/sources/test_upcloud.py | 161 +- tests/unittests/sources/test_vmware.py | 12 +- tests/unittests/sources/test_vultr.py | 375 +- .../unittests/sources/vmware/test_custom_script.py | 61 +- .../sources/vmware/test_guestcust_util.py | 79 +- .../sources/vmware/test_vmware_config_file.py | 430 +- tests/unittests/test__init__.py | 193 +- tests/unittests/test_atomic_helper.py | 4 +- tests/unittests/test_builtin_handlers.py | 405 +- tests/unittests/test_cli.py | 214 +- tests/unittests/test_conftest.py | 10 +- tests/unittests/test_cs_util.py | 39 +- tests/unittests/test_data.py | 526 ++- tests/unittests/test_dhclient_hook.py | 89 +- tests/unittests/test_dmi.py | 90 +- tests/unittests/test_ds_identify.py | 1609 ++++--- tests/unittests/test_ec2_util.py | 376 +- tests/unittests/test_event.py | 16 +- tests/unittests/test_features.py | 36 +- tests/unittests/test_gpg.py | 103 +- tests/unittests/test_helpers.py | 11 +- tests/unittests/test_log.py | 12 +- tests/unittests/test_merging.py | 123 +- tests/unittests/test_net.py | 4833 ++++++++++++-------- tests/unittests/test_net_activators.py | 154 +- tests/unittests/test_net_freebsd.py | 45 +- tests/unittests/test_netinfo.py | 193 +- tests/unittests/test_pathprefix2dict.py | 28 +- tests/unittests/test_registry.py | 21 +- tests/unittests/test_render_cloudcfg.py | 71 +- tests/unittests/test_reporting.py | 379 +- tests/unittests/test_reporting_hyperv.py | 193 +- tests/unittests/test_simpletable.py | 47 +- tests/unittests/test_sshutil.py | 817 ++-- tests/unittests/test_stages.py | 444 +- tests/unittests/test_subp.py | 289 +- tests/unittests/test_temp_utils.py | 118 +- tests/unittests/test_templating.py | 103 +- tests/unittests/test_url_helper.py | 134 +- tests/unittests/test_util.py | 934 ++-- tests/unittests/test_version.py | 11 +- tests/unittests/util.py | 14 +- tools/mock-meta.py | 301 +- tools/validate-yaml.py | 4 +- tox.ini | 28 +- 441 files changed, 43425 insertions(+), 31496 deletions(-) create mode 100644 pyproject.toml (limited to 'cloudinit/config/cc_write_files_deferred.py') diff --git a/.travis.yml b/.travis.yml index 9470cc31..c458db48 100644 --- a/.travis.yml +++ b/.travis.yml @@ -133,6 +133,10 @@ matrix: env: TOXENV=flake8 - python: 3.6 env: TOXENV=pylint + - python: 3.6 + env: TOXENV=black + - python: 3.6 + env: TOXENV=isort - python: 3.7 env: TOXENV=doc # Test all supported Python versions (but at the end, so we schedule diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 06b31497..aa09c61e 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,6 +19,7 @@ Before any pull request can be accepted, you must do the following: `tools/.github-cla-signers`_ * Add or update any `unit tests`_ accordingly * Add or update any `integration tests`_ (if applicable) +* Format code (using black and isort) with `tox -e format` * Ensure unit tests and linting pass using `tox`_ * Submit a PR against the `main` branch of the `cloud-init` repository @@ -133,6 +134,10 @@ Do these things for each feature or bug git commit +* Apply black and isort formatting rules with `tox`_:: + + tox -e format + * Run unit tests and lint/formatting checks with `tox`_:: tox diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 99e5c203..36a5be78 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -5,62 +5,111 @@ import argparse import re import sys +from datetime import datetime from cloudinit.util import json_dumps -from datetime import datetime -from . import dump -from . import show + +from . import dump, show def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-analyze', - description='Devel tool: Analyze cloud-init logs and data') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-analyze", + description="Devel tool: Analyze cloud-init logs and data", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True parser_blame = subparsers.add_parser( - 'blame', help='Print list of executed stages ordered by time to init') + "blame", help="Print list of executed stages ordered by time to init" + ) parser_blame.add_argument( - '-i', '--infile', action='store', dest='infile', - default='/var/log/cloud-init.log', - help='specify where to read input.') + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) parser_blame.add_argument( - '-o', '--outfile', action='store', dest='outfile', default='-', - help='specify where to write output. ') - parser_blame.set_defaults(action=('blame', analyze_blame)) + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_blame.set_defaults(action=("blame", analyze_blame)) parser_show = subparsers.add_parser( - 'show', help='Print list of in-order events during execution') - parser_show.add_argument('-f', '--format', action='store', - dest='print_format', default='%I%D @%Es +%ds', - help='specify formatting of output.') - parser_show.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input.') - parser_show.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_show.set_defaults(action=('show', analyze_show)) + "show", help="Print list of in-order events during execution" + ) + parser_show.add_argument( + "-f", + "--format", + action="store", + dest="print_format", + default="%I%D @%Es +%ds", + help="specify formatting of output.", + ) + parser_show.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) + parser_show.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_show.set_defaults(action=("show", analyze_show)) parser_dump = subparsers.add_parser( - 'dump', help='Dump cloud-init events in JSON format') - parser_dump.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_dump.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output. ') - parser_dump.set_defaults(action=('dump', analyze_dump)) + "dump", help="Dump cloud-init events in JSON format" + ) + parser_dump.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_dump.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_dump.set_defaults(action=("dump", analyze_dump)) parser_boot = subparsers.add_parser( - 'boot', help='Print list of boot times for kernel and cloud-init') - parser_boot.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_boot.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_boot.set_defaults(action=('boot', analyze_boot)) + "boot", help="Print list of boot times for kernel and cloud-init" + ) + parser_boot.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_boot.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_boot.set_defaults(action=("boot", analyze_boot)) return parser @@ -78,61 +127,68 @@ def analyze_boot(name, args): """ infh, outfh = configure_io(args) kernel_info = show.dist_check_timestamp() - status_code, kernel_start, kernel_end, ci_sysd_start = \ - kernel_info + status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start) kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end) ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start) try: - last_init_local = \ - [e for e in _get_events(infh) if e['name'] == 'init-local' and - 'starting search' in e['description']][-1] - ci_start = datetime.utcfromtimestamp(last_init_local['timestamp']) + last_init_local = [ + e + for e in _get_events(infh) + if e["name"] == "init-local" + and "starting search" in e["description"] + ][-1] + ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"]) except IndexError: - ci_start = 'Could not find init-local log-line in cloud-init.log' + ci_start = "Could not find init-local log-line in cloud-init.log" status_code = show.FAIL_CODE - FAILURE_MSG = 'Your Linux distro or container does not support this ' \ - 'functionality.\n' \ - 'You must be running a Kernel Telemetry supported ' \ - 'distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest' \ - '/topics/analyze.html for more ' \ - 'information on supported distros.\n' - - SUCCESS_MSG = '-- Most Recent Boot Record --\n' \ - ' Kernel Started at: {k_s_t}\n' \ - ' Kernel ended boot at: {k_e_t}\n' \ - ' Kernel time to boot (seconds): {k_r}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Time between Kernel end boot and Cloud-init ' \ - 'activation (seconds): {bt_r}\n' \ - ' Cloud-init start: {ci_start}\n' - - CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \ - ' Container started at: {k_s_t}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Cloud-init start: {ci_start}\n' \ - + FAILURE_MSG = ( + "Your Linux distro or container does not support this " + "functionality.\n" + "You must be running a Kernel Telemetry supported " + "distro.\nPlease check " + "https://cloudinit.readthedocs.io/en/latest" + "/topics/analyze.html for more " + "information on supported distros.\n" + ) + + SUCCESS_MSG = ( + "-- Most Recent Boot Record --\n" + " Kernel Started at: {k_s_t}\n" + " Kernel ended boot at: {k_e_t}\n" + " Kernel time to boot (seconds): {k_r}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Time between Kernel end boot and Cloud-init " + "activation (seconds): {bt_r}\n" + " Cloud-init start: {ci_start}\n" + ) + + CONTAINER_MSG = ( + "-- Most Recent Container Boot Record --\n" + " Container started at: {k_s_t}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Cloud-init start: {ci_start}\n" + ) status_map = { show.FAIL_CODE: FAILURE_MSG, show.CONTAINER_CODE: CONTAINER_MSG, - show.SUCCESS_CODE: SUCCESS_MSG + show.SUCCESS_CODE: SUCCESS_MSG, } kernel_runtime = kernel_end - kernel_start between_process_runtime = ci_sysd_start - kernel_end kwargs = { - 'k_s_t': kernel_start_timestamp, - 'k_e_t': kernel_end_timestamp, - 'k_r': kernel_runtime, - 'bt_r': between_process_runtime, - 'k_e': kernel_end, - 'k_s': kernel_start, - 'ci_sysd': ci_sysd_start, - 'ci_sysd_t': ci_sysd_start_timestamp, - 'ci_start': ci_start + "k_s_t": kernel_start_timestamp, + "k_e_t": kernel_end_timestamp, + "k_r": kernel_runtime, + "bt_r": between_process_runtime, + "k_e": kernel_end, + "k_s": kernel_start, + "ci_sysd": ci_sysd_start, + "ci_sysd_t": ci_sysd_start_timestamp, + "ci_start": ci_start, } outfh.write(status_map[status_code].format(**kwargs)) @@ -152,15 +208,16 @@ def analyze_blame(name, args): and sorting by record data ('delta') """ (infh, outfh) = configure_io(args) - blame_format = ' %ds (%n)' - r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) - for idx, record in enumerate(show.show_events(_get_events(infh), - blame_format)): + blame_format = " %ds (%n)" + r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE) + for idx, record in enumerate( + show.show_events(_get_events(infh), blame_format) + ): srecs = sorted(filter(r.match, record), reverse=True) - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('\n'.join(srecs) + '\n') - outfh.write('\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write("\n".join(srecs) + "\n") + outfh.write("\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_show(name, args): @@ -184,21 +241,25 @@ def analyze_show(name, args): Finished stage: (modules-final) 0.NNN seconds """ (infh, outfh) = configure_io(args) - for idx, record in enumerate(show.show_events(_get_events(infh), - args.print_format)): - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('The total time elapsed since completing an event is' - ' printed after the "@" character.\n') - outfh.write('The time the event takes is printed after the "+" ' - 'character.\n\n') - outfh.write('\n'.join(record) + '\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + for idx, record in enumerate( + show.show_events(_get_events(infh), args.print_format) + ): + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write( + "The total time elapsed since completing an event is" + ' printed after the "@" character.\n' + ) + outfh.write( + 'The time the event takes is printed after the "+" character.\n\n' + ) + outfh.write("\n".join(record) + "\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + "\n") def _get_events(infile): @@ -211,28 +272,28 @@ def _get_events(infile): def configure_io(args): """Common parsing and setup of input/output files""" - if args.infile == '-': + if args.infile == "-": infh = sys.stdin else: try: - infh = open(args.infile, 'r') + infh = open(args.infile, "r") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.infile) + sys.stderr.write("Cannot open file %s\n" % args.infile) sys.exit(1) - if args.outfile == '-': + if args.outfile == "-": outfh = sys.stdout else: try: - outfh = open(args.outfile, 'w') + outfh = open(args.outfile, "w") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.outfile) + sys.stderr.write("Cannot open file %s\n" % args.outfile) sys.exit(1) return (infh, outfh) -if __name__ == '__main__': +if __name__ == "__main__": parser = get_parser() args = parser.parse_args() (name, action_functor) = args.action diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 62ad51fe..8e6e3c6a 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. import calendar -from datetime import datetime import sys +from datetime import datetime -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util stage_to_description = { - 'finished': 'finished running cloud-init', - 'init-local': 'starting search for local datasources', - 'init-network': 'searching for network datasources', - 'init': 'searching for network datasources', - 'modules-config': 'running config modules', - 'modules-final': 'finalizing modules', - 'modules': 'running modules for', - 'single': 'running single module ', + "finished": "finished running cloud-init", + "init-local": "starting search for local datasources", + "init-network": "searching for network datasources", + "init": "searching for network datasources", + "modules-config": "running config modules", + "modules-final": "finalizing modules", + "modules": "running modules for", + "single": "running single module ", } # logger's asctime format @@ -34,11 +33,11 @@ def parse_timestamp(timestampstr): if timestampstr.split()[0] in months: # Aug 29 22:55:26 FMT = DEFAULT_FMT - if '.' in timestampstr: + if "." in timestampstr: FMT = CLOUD_INIT_JOURNALCTL_FMT - dt = datetime.strptime(timestampstr + " " + - str(datetime.now().year), - FMT) + dt = datetime.strptime( + timestampstr + " " + str(datetime.now().year), FMT + ) timestamp = dt.strftime("%s.%f") elif "," in timestampstr: # 2016-09-12 14:39:20,839 @@ -52,7 +51,7 @@ def parse_timestamp(timestampstr): def parse_timestamp_from_date(timestampstr): - out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr]) + out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr]) timestamp = out.strip() return float(timestamp) @@ -79,8 +78,8 @@ def parse_ci_logline(line): # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \ # init-local/check-cache: attempting to read from cache [check] - amazon_linux_2_sep = ' cloud-init[' - separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep] + amazon_linux_2_sep = " cloud-init[" + separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep] found = False for sep in separators: if sep in line: @@ -99,7 +98,7 @@ def parse_ci_logline(line): if "," in timehost: timestampstr, extra = timehost.split(",") timestampstr += ",%s" % extra.split()[0] - if ' ' in extra: + if " " in extra: hostname = extra.split()[-1] else: hostname = timehost.split()[-1] @@ -111,11 +110,11 @@ def parse_ci_logline(line): eventstr = eventstr.split(maxsplit=1)[1] else: timestampstr = timehost.split(hostname)[0].strip() - if 'Cloud-init v.' in eventstr: - event_type = 'start' - if 'running' in eventstr: - stage_and_timestamp = eventstr.split('running')[1].lstrip() - event_name, _ = stage_and_timestamp.split(' at ') + if "Cloud-init v." in eventstr: + event_type = "start" + if "running" in eventstr: + stage_and_timestamp = eventstr.split("running")[1].lstrip() + event_name, _ = stage_and_timestamp.split(" at ") event_name = event_name.replace("'", "").replace(":", "-") if event_name == "init": event_name = "init-network" @@ -128,17 +127,17 @@ def parse_ci_logline(line): event_description = eventstr.split(event_name)[1].strip() event = { - 'name': event_name.rstrip(":"), - 'description': event_description, - 'timestamp': parse_timestamp(timestampstr), - 'origin': 'cloudinit', - 'event_type': event_type.rstrip(":"), + "name": event_name.rstrip(":"), + "description": event_description, + "timestamp": parse_timestamp(timestampstr), + "origin": "cloudinit", + "event_type": event_type.rstrip(":"), } - if event['event_type'] == "finish": + if event["event_type"] == "finish": result = event_description.split(":")[0] - desc = event_description.split(result)[1].lstrip(':').strip() - event['result'] = result - event['description'] = desc.strip() + desc = event_description.split(result)[1].lstrip(":").strip() + event["result"] = result + event["description"] = desc.strip() return event @@ -146,10 +145,10 @@ def parse_ci_logline(line): def dump_events(cisource=None, rawdata=None): events = [] event = None - CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] + CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."] if not any([cisource, rawdata]): - raise ValueError('Either cisource or rawdata parameters are required') + raise ValueError("Either cisource or rawdata parameters are required") if rawdata: data = rawdata.splitlines() @@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None): try: event = parse_ci_logline(line) except ValueError: - sys.stderr.write('Skipping invalid entry\n') + sys.stderr.write("Skipping invalid entry\n") if event: events.append(event) diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 01a4d3e5..5fd9cdfd 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,11 +8,10 @@ import base64 import datetime import json import os -import time import sys +import time -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.distros import uses_systemd # Example events: @@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd # } format_key = { - '%d': 'delta', - '%D': 'description', - '%E': 'elapsed', - '%e': 'event_type', - '%I': 'indent', - '%l': 'level', - '%n': 'name', - '%o': 'origin', - '%r': 'result', - '%t': 'timestamp', - '%T': 'total_time', + "%d": "delta", + "%D": "description", + "%E": "elapsed", + "%e": "event_type", + "%I": "indent", + "%l": "level", + "%n": "name", + "%o": "origin", + "%r": "result", + "%t": "timestamp", + "%T": "total_time", } -formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) - for k, v in format_key.items()]) -SUCCESS_CODE = 'successful' -FAIL_CODE = 'failure' -CONTAINER_CODE = 'container' +formatting_help = " ".join( + ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()] +) +SUCCESS_CODE = "successful" +FAIL_CODE = "failure" +CONTAINER_CODE = "container" TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) @@ -60,7 +60,7 @@ def format_record(msg, event): for i, j in format_key.items(): if i in msg: # ensure consistent formatting of time values - if j in ['delta', 'elapsed', 'timestamp']: + if j in ["delta", "elapsed", "timestamp"]: msg = msg.replace(i, "{%s:08.5f}" % j) else: msg = msg.replace(i, "{%s}" % j) @@ -68,13 +68,13 @@ def format_record(msg, event): def dump_event_files(event): - content = dict((k, v) for k, v in event.items() if k not in ['content']) - files = content['files'] + content = dict((k, v) for k, v in event.items() if k not in ["content"]) + files = content["files"] saved = [] for f in files: - fname = f['path'] + fname = f["path"] fn_local = os.path.basename(fname) - fcontent = base64.b64decode(f['content']).decode('ascii') + fcontent = base64.b64decode(f["content"]).decode("ascii") util.write_file(fn_local, fcontent) saved.append(fn_local) @@ -83,13 +83,13 @@ def dump_event_files(event): def event_name(event): if event: - return event.get('name') + return event.get("name") return None def event_type(event): if event: - return event.get('event_type') + return event.get("event_type") return None @@ -100,7 +100,7 @@ def event_parent(event): def event_timestamp(event): - return float(event.get('timestamp')) + return float(event.get("timestamp")) def event_datetime(event): @@ -117,41 +117,44 @@ def event_duration(start, finish): def event_record(start_time, start, finish): record = finish.copy() - record.update({ - 'delta': event_duration(start, finish), - 'elapsed': delta_seconds(start_time, event_datetime(start)), - 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', - }) + record.update( + { + "delta": event_duration(start, finish), + "elapsed": delta_seconds(start_time, event_datetime(start)), + "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->", + } + ) return record def total_time_record(total_time): - return 'Total Time: %3.5f seconds\n' % total_time + return "Total Time: %3.5f seconds\n" % total_time class SystemctlReader(object): - ''' + """ Class for dealing with all systemctl subp calls in a consistent manner. - ''' + """ + def __init__(self, property, parameter=None): self.epoch = None - self.args = ['/bin/systemctl', 'show'] + self.args = ["/bin/systemctl", "show"] if parameter: self.args.append(parameter) - self.args.extend(['-p', property]) + self.args.extend(["-p", property]) # Don't want the init of our object to break. Instead of throwing # an exception, set an error code that gets checked when data is # requested from the object self.failure = self.subp() def subp(self): - ''' + """ Make a subp call based on set args and handle errors by setting failure code :return: whether the subp call failed or not - ''' + """ try: value, err = subp.subp(self.args, capture=True) if err: @@ -162,41 +165,41 @@ class SystemctlReader(object): return systemctl_fail def parse_epoch_as_float(self): - ''' + """ If subp call succeeded, return the timestamp from subp as a float. :return: timestamp as a float - ''' + """ # subp has 2 ways to fail: it either fails and throws an exception, # or returns an error code. Raise an exception here in order to make # sure both scenarios throw exceptions if self.failure: - raise RuntimeError('Subprocess call to systemctl has failed, ' - 'returning error code ({})' - .format(self.failure)) + raise RuntimeError( + "Subprocess call to systemctl has failed, " + "returning error code ({})".format(self.failure) + ) # Output from systemctl show has the format Property=Value. # For example, UserspaceMonotonic=1929304 - timestamp = self.epoch.split('=')[1] + timestamp = self.epoch.split("=")[1] # Timestamps reported by systemctl are in microseconds, converting return float(timestamp) / 1000000 def dist_check_timestamp(): - ''' + """ Determine which init system a particular linux distro is using. Each init system (systemd, upstart, etc) has a different way of providing timestamps. :return: timestamps of kernelboot, kernelendboot, and cloud-initstart or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved. - ''' + """ if uses_systemd(): return gather_timestamps_using_systemd() # Use dmesg to get timestamps if the distro does not have systemd - if util.is_FreeBSD() or 'gentoo' in \ - util.system_info()['system'].lower(): + if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower(): return gather_timestamps_using_dmesg() # this distro doesn't fit anything that is supported by cloud-init. just @@ -205,20 +208,20 @@ def dist_check_timestamp(): def gather_timestamps_using_dmesg(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization using dmesg as opposed to systemctl :return: the two timestamps plus a dummy timestamp to keep consistency with gather_timestamps_using_systemd - ''' + """ try: - data, _ = subp.subp(['dmesg'], capture=True) + data, _ = subp.subp(["dmesg"], capture=True) split_entries = data[0].splitlines() for i in split_entries: - if i.decode('UTF-8').find('user') != -1: - splitup = i.decode('UTF-8').split() - stripped = splitup[1].strip(']') + if i.decode("UTF-8").find("user") != -1: + splitup = i.decode("UTF-8").split() + stripped = splitup[1].strip("]") # kernel timestamp from dmesg is equal to 0, # with the userspace timestamp relative to it. @@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg(): # systemd wont start cloud-init in this case, # so we cannot get that timestamp - return SUCCESS_CODE, kernel_start, kernel_end, \ - kernel_end + return SUCCESS_CODE, kernel_start, kernel_end, kernel_end except Exception: pass @@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg(): def gather_timestamps_using_systemd(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization. and cloud-init systemd unit activation :return: the three timestamps - ''' + """ kernel_start = float(time.time()) - float(util.uptime()) try: - delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\ - .parse_epoch_as_float() - delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic', - 'cloud-init-local').parse_epoch_as_float() + delta_k_end = SystemctlReader( + "UserspaceTimestampMonotonic" + ).parse_epoch_as_float() + delta_ci_s = SystemctlReader( + "InactiveExitTimestampMonotonic", "cloud-init-local" + ).parse_epoch_as_float() base_time = kernel_start status = SUCCESS_CODE # lxc based containers do not set their monotonic zero point to be when @@ -262,12 +266,13 @@ def gather_timestamps_using_systemd(): # in containers when https://github.com/lxc/lxcfs/issues/292 # is fixed, util.uptime() should be used instead of stat on try: - file_stat = os.stat('/proc/1/cmdline') + file_stat = os.stat("/proc/1/cmdline") kernel_start = file_stat.st_atime except OSError as err: - raise RuntimeError('Could not determine container boot ' - 'time from /proc/1/cmdline. ({})' - .format(err)) from err + raise RuntimeError( + "Could not determine container boot " + "time from /proc/1/cmdline. ({})".format(err) + ) from err status = CONTAINER_CODE else: status = FAIL_CODE @@ -283,10 +288,14 @@ def gather_timestamps_using_systemd(): return status, kernel_start, kernel_end, cloudinit_sysd -def generate_records(events, blame_sort=False, - print_format="(%n) %d seconds in %I%D", - dump_files=False, log_datafiles=False): - ''' +def generate_records( + events, + blame_sort=False, + print_format="(%n) %d seconds in %I%D", + dump_files=False, + log_datafiles=False, +): + """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. @@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False, :param log_datafiles: whether or not to log events generated :return: boot records ordered chronologically - ''' + """ - sorted_events = sorted(events, key=lambda x: x['timestamp']) + sorted_events = sorted(events, key=lambda x: x["timestamp"]) records = [] start_time = None total_time = 0.0 @@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False, except IndexError: next_evt = None - if event_type(event) == 'start': - if event.get('name') in stages_seen: + if event_type(event) == "start": + if event.get("name") in stages_seen: records.append(total_time_record(total_time)) boot_records.append(records) records = [] @@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False, # see if we have a pair if event_name(event) == event_name(next_evt): - if event_type(next_evt) == 'finish': - records.append(format_record(print_format, - event_record(start_time, - event, - next_evt))) + if event_type(next_evt) == "finish": + records.append( + format_record( + print_format, + event_record(start_time, event, next_evt), + ) + ) else: # This is a parent event - records.append("Starting stage: %s" % event.get('name')) + records.append("Starting stage: %s" % event.get("name")) unprocessed.append(event) - stages_seen.append(event.get('name')) + stages_seen.append(event.get("name")) continue else: prev_evt = unprocessed.pop() if event_name(event) == event_name(prev_evt): record = event_record(start_time, prev_evt, event) - records.append(format_record("Finished stage: " - "(%n) %d seconds", - record) + "\n") - total_time += record.get('delta') + records.append( + format_record("Finished stage: (%n) %d seconds", record) + + "\n" + ) + total_time += record.get("delta") else: # not a match, put it back unprocessed.append(prev_evt) @@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False, def show_events(events, print_format): - ''' + """ A passthrough method that makes it easier to call generate_records() :param events: JSONs from dump that represents events taken from logs @@ -368,18 +380,18 @@ def show_events(events, print_format): and time taken by the event in one line :return: boot records ordered chronologically - ''' + """ return generate_records(events, print_format=print_format) def load_events_infile(infile): - ''' + """ Takes in a log file, read it, and convert to json. :param infile: The Log file to be read :return: json version of logfile, raw file - ''' + """ data = infile.read() try: return json.loads(data), data diff --git a/cloudinit/apport.py b/cloudinit/apport.py index aadc638f..92068aa9 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -2,127 +2,143 @@ # # This file is part of cloud-init. See LICENSE file for license information. -'''Cloud-init apport interface''' +"""Cloud-init apport interface""" try: from apport.hookutils import ( - attach_file, attach_root_command_outputs, root_command_output) + attach_file, + attach_root_command_outputs, + root_command_output, + ) + has_apport = True except ImportError: has_apport = False KNOWN_CLOUD_NAMES = [ - 'AliYun', - 'AltCloud', - 'Amazon - Ec2', - 'Azure', - 'Bigstep', - 'Brightbox', - 'CloudSigma', - 'CloudStack', - 'DigitalOcean', - 'E24Cloud', - 'GCE - Google Compute Engine', - 'Exoscale', - 'Hetzner Cloud', - 'IBM - (aka SoftLayer or BlueMix)', - 'LXD', - 'MAAS', - 'NoCloud', - 'OpenNebula', - 'OpenStack', - 'Oracle', - 'OVF', - 'RbxCloud - (HyperOne, Rootbox, Rubikon)', - 'OpenTelekomCloud', - 'SAP Converged Cloud', - 'Scaleway', - 'SmartOS', - 'UpCloud', - 'VMware', - 'Vultr', - 'ZStack', - 'Other' + "AliYun", + "AltCloud", + "Amazon - Ec2", + "Azure", + "Bigstep", + "Brightbox", + "CloudSigma", + "CloudStack", + "DigitalOcean", + "E24Cloud", + "GCE - Google Compute Engine", + "Exoscale", + "Hetzner Cloud", + "IBM - (aka SoftLayer or BlueMix)", + "LXD", + "MAAS", + "NoCloud", + "OpenNebula", + "OpenStack", + "Oracle", + "OVF", + "RbxCloud - (HyperOne, Rootbox, Rubikon)", + "OpenTelekomCloud", + "SAP Converged Cloud", + "Scaleway", + "SmartOS", + "UpCloud", + "VMware", + "Vultr", + "ZStack", + "Other", ] # Potentially clear text collected logs -CLOUDINIT_LOG = '/var/log/cloud-init.log' -CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOG = "/var/log/cloud-init.log" +CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def attach_cloud_init_logs(report, ui=None): - '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' - attach_root_command_outputs(report, { - 'cloud-init-log-warnings': - 'egrep -i "warn|error" /var/log/cloud-init.log', - 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) + """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" + attach_root_command_outputs( + report, + { + "cloud-init-log-warnings": ( + 'egrep -i "warn|error" /var/log/cloud-init.log' + ), + "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", + }, + ) root_command_output( - ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) - attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') + ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] + ) + attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") def attach_hwinfo(report, ui=None): - '''Optionally attach hardware info from lshw.''' + """Optionally attach hardware info from lshw.""" prompt = ( - 'Your device details (lshw) may be useful to developers when' - ' addressing this bug, but gathering it requires admin privileges.' - ' Would you like to include this info?') + "Your device details (lshw) may be useful to developers when" + " addressing this bug, but gathering it requires admin privileges." + " Would you like to include this info?" + ) if ui and ui.yesno(prompt): - attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) + attach_root_command_outputs(report, {"lshw.txt": "lshw"}) def attach_cloud_info(report, ui=None): - '''Prompt for cloud details if available.''' + """Prompt for cloud details if available.""" if ui: - prompt = 'Is this machine running in a cloud environment?' + prompt = "Is this machine running in a cloud environment?" response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - prompt = ('Please select the cloud vendor or environment in which' - ' this instance is running') + prompt = ( + "Please select the cloud vendor or environment in which" + " this instance is running" + ) response = ui.choice(prompt, KNOWN_CLOUD_NAMES) if response: - report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] + report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]] else: - report['CloudName'] = 'None' + report["CloudName"] = "None" def attach_user_data(report, ui=None): - '''Optionally provide user-data if desired.''' + """Optionally provide user-data if desired.""" if ui: prompt = ( - 'Your user-data or cloud-config file can optionally be provided' - ' from {0} and could be useful to developers when addressing this' - ' bug. Do you wish to attach user-data to this bug?'.format( - USER_DATA_FILE)) + "Your user-data or cloud-config file can optionally be provided" + " from {0} and could be useful to developers when addressing this" + " bug. Do you wish to attach user-data to this bug?".format( + USER_DATA_FILE + ) + ) response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - attach_file(report, USER_DATA_FILE, 'user_data.txt') + attach_file(report, USER_DATA_FILE, "user_data.txt") def add_bug_tags(report): - '''Add any appropriate tags to the bug.''' - if 'JournalErrors' in report.keys(): - errors = report['JournalErrors'] - if 'Breaking ordering cycle' in errors: - report['Tags'] = 'systemd-ordering' + """Add any appropriate tags to the bug.""" + if "JournalErrors" in report.keys(): + errors = report["JournalErrors"] + if "Breaking ordering cycle" in errors: + report["Tags"] = "systemd-ordering" def add_info(report, ui): - '''This is an entry point to run cloud-init's apport functionality. + """This is an entry point to run cloud-init's apport functionality. Distros which want apport support will have a cloud-init package-hook at /usr/share/apport/package-hooks/cloud-init.py which defines an add_info function and returns the result of cloudinit.apport.add_info(report, ui). - ''' + """ if not has_apport: raise RuntimeError( - 'No apport imports discovered. Apport functionality disabled') + "No apport imports discovered. Apport functionality disabled" + ) attach_cloud_init_logs(report, ui) attach_hwinfo(report, ui) attach_cloud_info(report, ui) @@ -130,4 +146,5 @@ def add_info(report, ui): add_bug_tags(report) return True + # vi: ts=4 expandtab diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 485ff92f..ae117fad 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -10,8 +10,9 @@ _DEF_PERMS = 0o644 LOG = logging.getLogger(__name__) -def write_file(filename, content, mode=_DEF_PERMS, - omode="wb", preserve_mode=False): +def write_file( + filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False +): # open filename in mode 'omode', write content, set permissions to 'mode' if preserve_mode: @@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS, tf = None try: - tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), - delete=False, mode=omode) + tf = tempfile.NamedTemporaryFile( + dir=os.path.dirname(filename), delete=False, mode=omode + ) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", - filename, tf.name, omode, mode, len(content)) + filename, + tf.name, + omode, + mode, + len(content), + ) tf.write(content) tf.close() os.chmod(tf.name, mode) @@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS, def write_json(filename, data, mode=_DEF_PERMS): # dump json representation of data to file filename. return write_file( - filename, json.dumps(data, indent=1, sort_keys=True) + "\n", - omode="w", mode=mode) + filename, + json.dumps(data, indent=1, sort_keys=True) + "\n", + omode="w", + mode=mode, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 7ae98e1c..91e48103 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -35,7 +35,8 @@ class Cloud(object): reporter = events.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", - reporting_enabled=False) + reporting_enabled=False, + ) self.reporter = reporter # If a 'user' manipulates logging or logging services @@ -56,8 +57,11 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warning("No template found in %s for template named %s", - os.path.dirname(fn), name) + LOG.warning( + "No template found in %s for template named %s", + os.path.dirname(fn), + name, + ) return None return fn @@ -80,7 +84,8 @@ class Cloud(object): def get_hostname(self, fqdn=False, metadata_only=False): return self.datasource.get_hostname( - fqdn=fqdn, metadata_only=metadata_only) + fqdn=fqdn, metadata_only=metadata_only + ) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) @@ -94,4 +99,5 @@ class Cloud(object): def get_ipath(self, name=None): return self.paths.get_ipath(name) + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 3502dd56..0e1db118 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,9 +10,13 @@ import os import sys from cloudinit.stages import Init -from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.util import ( - del_dir, del_file, get_config_logfiles, is_link, error + del_dir, + del_file, + error, + get_config_logfiles, + is_link, ) @@ -27,18 +31,35 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='clean', - description=('Remove logs and artifacts so cloud-init re-runs on ' - 'a clean system')) + prog="clean", + description=( + "Remove logs and artifacts so cloud-init re-runs on " + "a clean system" + ), + ) parser.add_argument( - '-l', '--logs', action='store_true', default=False, dest='remove_logs', - help='Remove cloud-init logs.') + "-l", + "--logs", + action="store_true", + default=False, + dest="remove_logs", + help="Remove cloud-init logs.", + ) parser.add_argument( - '-r', '--reboot', action='store_true', default=False, - help='Reboot system after logs are cleaned so cloud-init re-runs.') + "-r", + "--reboot", + action="store_true", + default=False, + help="Reboot system after logs are cleaned so cloud-init re-runs.", + ) parser.add_argument( - '-s', '--seed', action='store_true', default=False, dest='remove_seed', - help='Remove cloud-init seed directory /var/lib/cloud/seed.') + "-s", + "--seed", + action="store_true", + default=False, + dest="remove_seed", + help="Remove cloud-init seed directory /var/lib/cloud/seed.", + ) return parser @@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - seed_path = os.path.join(init.paths.cloud_dir, 'seed') - for path in glob.glob('%s/*' % init.paths.cloud_dir): + seed_path = os.path.join(init.paths.cloud_dir, "seed") + for path in glob.glob("%s/*" % init.paths.cloud_dir): if path == seed_path and not remove_seed: continue try: @@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False): else: del_file(path) except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) + error("Could not remove {0}: {1}".format(path, str(e))) return 1 return 0 @@ -78,13 +99,15 @@ def handle_clean_args(name, args): """Handle calls to 'cloud-init clean' as a subcommand.""" exit_code = remove_artifacts(args.remove_logs, args.remove_seed) if exit_code == 0 and args.reboot: - cmd = ['shutdown', '-r', 'now'] + cmd = ["shutdown", "-r", "now"] try: subp(cmd, capture=False) except ProcessExecutionError as e: error( 'Could not reboot this system using "{0}": {1}'.format( - cmd, str(e))) + cmd, str(e) + ) + ) exit_code = 1 return exit_code @@ -92,10 +115,10 @@ def handle_clean_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - sys.exit(handle_clean_args('clean', parser.parse_args())) + sys.exit(handle_clean_args("clean", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 0cdc9675..b92b03a8 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,13 +6,16 @@ import argparse import json import sys -from cloudinit.util import error from cloudinit.sources import ( - INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + INSTANCE_JSON_FILE, + METADATA_UNKNOWN, + canonical_cloud_id, +) +from cloudinit.util import error -DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE +DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE -NAME = 'cloud-id' +NAME = "cloud-id" def get_parser(parser=None): @@ -27,17 +30,30 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( prog=NAME, - description='Report the canonical cloud-id for this instance') + description="Report the canonical cloud-id for this instance", + ) parser.add_argument( - '-j', '--json', action='store_true', default=False, - help='Report all standardized cloud-id information as json.') + "-j", + "--json", + action="store_true", + default=False, + help="Report all standardized cloud-id information as json.", + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help='Report extended cloud-id information as tab-delimited string.') + "-l", + "--long", + action="store_true", + default=False, + help="Report extended cloud-id information as tab-delimited string.", + ) parser.add_argument( - '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, - help=('Path to instance-data.json file. Default is %s' % - DEFAULT_INSTANCE_JSON)) + "-i", + "--instance-data", + type=str, + default=DEFAULT_INSTANCE_JSON, + help="Path to instance-data.json file. Default is %s" + % DEFAULT_INSTANCE_JSON, + ) return parser @@ -53,24 +69,28 @@ def handle_args(name, args): except IOError: return error( "File not found '%s'. Provide a path to instance data json file" - ' using --instance-data' % args.instance_data) + " using --instance-data" % args.instance_data + ) except ValueError as e: return error( - "File '%s' is not valid json. %s" % (args.instance_data, e)) - v1 = instance_data.get('v1', {}) + "File '%s' is not valid json. %s" % (args.instance_data, e) + ) + v1 = instance_data.get("v1", {}) cloud_id = canonical_cloud_id( - v1.get('cloud_name', METADATA_UNKNOWN), - v1.get('region', METADATA_UNKNOWN), - v1.get('platform', METADATA_UNKNOWN)) + v1.get("cloud_name", METADATA_UNKNOWN), + v1.get("region", METADATA_UNKNOWN), + v1.get("platform", METADATA_UNKNOWN), + ) if args.json: - v1['cloud_id'] = cloud_id - response = json.dumps( # Pretty, sorted json - v1, indent=1, sort_keys=True, separators=(',', ': ')) + v1["cloud_id"] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(",", ": ") + ) elif args.long: - response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN)) else: response = cloud_id - sys.stdout.write('%s\n' % response) + sys.stdout.write("%s\n" % response) return 0 @@ -80,7 +100,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index 3ae28b69..ead5f7a9 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -11,7 +11,7 @@ from cloudinit.stages import Init def addLogHandlerCLI(logger, log_level): """Add a commandline logging handler to emit messages to stderr.""" - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") log.setupBasicLogging(log_level, formatter=formatter) return logger @@ -22,4 +22,5 @@ def read_cfg_paths(): init.read_cfg() return init.paths + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index f6f36a00..a9be0379 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -6,20 +6,17 @@ import os import sys import time -from cloudinit import log -from cloudinit import reporting -from cloudinit import stages +from cloudinit import log, reporting, stages from cloudinit.event import EventScope, EventType from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events -from cloudinit.stages import Init from cloudinit.sources import DataSource # noqa: F401 from cloudinit.sources import DataSourceNotFoundException - +from cloudinit.stages import Init LOG = log.getLogger(__name__) -NAME = 'hotplug-hook' +NAME = "hotplug-hook" def get_parser(parser=None): @@ -35,33 +32,38 @@ def get_parser(parser=None): parser.description = __doc__ parser.add_argument( - "-s", "--subsystem", required=True, + "-s", + "--subsystem", + required=True, help="subsystem to act on", - choices=['net'] + choices=["net"], ) subparsers = parser.add_subparsers( - title='Hotplug Action', - dest='hotplug_action' + title="Hotplug Action", dest="hotplug_action" ) subparsers.required = True subparsers.add_parser( - 'query', - help='query if hotplug is enabled for given subsystem' + "query", help="query if hotplug is enabled for given subsystem" ) parser_handle = subparsers.add_parser( - 'handle', help='handle the hotplug event') + "handle", help="handle the hotplug event" + ) parser_handle.add_argument( - "-d", "--devpath", required=True, + "-d", + "--devpath", + required=True, metavar="PATH", - help="sysfs path to hotplugged device" + help="sysfs path to hotplugged device", ) parser_handle.add_argument( - "-u", "--udevaction", required=True, + "-u", + "--udevaction", + required=True, help="action to take", - choices=['add', 'remove'] + choices=["add", "remove"], ) return parser @@ -90,27 +92,29 @@ class UeventHandler(abc.ABC): def detect_hotplugged_device(self): detect_presence = None - if self.action == 'add': + if self.action == "add": detect_presence = True - elif self.action == 'remove': + elif self.action == "remove": detect_presence = False else: - raise ValueError('Unknown action: %s' % self.action) + raise ValueError("Unknown action: %s" % self.action) if detect_presence != self.device_detected(): raise RuntimeError( - 'Failed to detect %s in updated metadata' % self.id) + "Failed to detect %s in updated metadata" % self.id + ) def success(self): return self.success_fn() def update_metadata(self): - result = self.datasource.update_metadata_if_supported([ - EventType.HOTPLUG]) + result = self.datasource.update_metadata_if_supported( + [EventType.HOTPLUG] + ) if not result: raise RuntimeError( - 'Datasource %s not updated for ' - 'event %s' % (self.datasource, EventType.HOTPLUG) + "Datasource %s not updated for event %s" + % (self.datasource, EventType.HOTPLUG) ) return result @@ -118,7 +122,7 @@ class UeventHandler(abc.ABC): class NetHandler(UeventHandler): def __init__(self, datasource, devpath, action, success_fn): # convert devpath to mac address - id = read_sys_net_safe(os.path.basename(devpath), 'address') + id = read_sys_net_safe(os.path.basename(devpath), "address") super().__init__(id, datasource, devpath, action, success_fn) def apply(self): @@ -128,14 +132,16 @@ class NetHandler(UeventHandler): ) interface_name = os.path.basename(self.devpath) activator = activators.select_activator() - if self.action == 'add': + if self.action == "add": if not activator.bring_up_interface(interface_name): raise RuntimeError( - 'Failed to bring up device: {}'.format(self.devpath)) - elif self.action == 'remove': + "Failed to bring up device: {}".format(self.devpath) + ) + elif self.action == "remove": if not activator.bring_down_interface(interface_name): raise RuntimeError( - 'Failed to bring down device: {}'.format(self.devpath)) + "Failed to bring down device: {}".format(self.devpath) + ) @property def config(self): @@ -144,15 +150,16 @@ class NetHandler(UeventHandler): def device_detected(self) -> bool: netstate = parse_net_config_data(self.config) found = [ - iface for iface in netstate.iter_interfaces() - if iface.get('mac_address') == self.id + iface + for iface in netstate.iter_interfaces() + if iface.get("mac_address") == self.id ] - LOG.debug('Ifaces with ID=%s : %s', self.id, found) + LOG.debug("Ifaces with ID=%s : %s", self.id, found) return len(found) > 0 SUBSYSTEM_PROPERTES_MAP = { - 'net': (NetHandler, EventScope.NETWORK), + "net": (NetHandler, EventScope.NETWORK), } @@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem): scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] except KeyError as e: raise Exception( - 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem) + "hotplug-hook: cannot handle events for subsystem: {}".format( + subsystem + ) ) from e return stages.update_event_enabled( datasource=hotplug_init.datasource, cfg=hotplug_init.cfg, event_source_type=EventType.HOTPLUG, - scope=scope + scope=scope, ) def initialize_datasource(hotplug_init, subsystem): - LOG.debug('Fetching datasource') + LOG.debug("Fetching datasource") datasource = hotplug_init.fetch(existing="trust") if not datasource.get_supported_events([EventType.HOTPLUG]): - LOG.debug('hotplug not supported for event of type %s', subsystem) + LOG.debug("hotplug not supported for event of type %s", subsystem) return if not is_enabled(hotplug_init, subsystem): - LOG.debug('hotplug not enabled for event of type %s', subsystem) + LOG.debug("hotplug not enabled for event of type %s", subsystem) return return datasource -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): +def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction): datasource = initialize_datasource(hotplug_init, subsystem) if not datasource: return handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] - LOG.debug('Creating %s event handler', subsystem) + LOG.debug("Creating %s event handler", subsystem) event_handler = handler_cls( datasource=datasource, devpath=devpath, action=udevaction, - success_fn=hotplug_init._write_to_cache + success_fn=hotplug_init._write_to_cache, ) # type: UeventHandler wait_times = [1, 3, 5, 10, 30] for attempt, wait in enumerate(wait_times): LOG.debug( - 'subsystem=%s update attempt %s/%s', + "subsystem=%s update attempt %s/%s", subsystem, attempt, - len(wait_times) + len(wait_times), ) try: - LOG.debug('Refreshing metadata') + LOG.debug("Refreshing metadata") event_handler.update_metadata() - LOG.debug('Detecting device in updated metadata') + LOG.debug("Detecting device in updated metadata") event_handler.detect_hotplugged_device() - LOG.debug('Applying config change') + LOG.debug("Applying config change") event_handler.apply() - LOG.debug('Updating cache') + LOG.debug("Updating cache") event_handler.success() break except Exception as e: - LOG.debug('Exception while processing hotplug event. %s', e) + LOG.debug("Exception while processing hotplug event. %s", e) time.sleep(wait) last_exception = e else: @@ -238,31 +244,33 @@ def handle_args(name, args): hotplug_init.read_cfg() log.setupLogging(hotplug_init.cfg) - if 'reporting' in hotplug_init.cfg: - reporting.update_configuration(hotplug_init.cfg.get('reporting')) + if "reporting" in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get("reporting")) # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {' - 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + "%s called with the following arguments: {" + "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}", name, args.hotplug_action, args.subsystem, - args.udevaction if 'udevaction' in args else None, - args.devpath if 'devpath' in args else None, + args.udevaction if "udevaction" in args else None, + args.devpath if "devpath" in args else None, ) with hotplug_reporter: try: - if args.hotplug_action == 'query': + if args.hotplug_action == "query": try: datasource = initialize_datasource( - hotplug_init, args.subsystem) + hotplug_init, args.subsystem + ) except DataSourceNotFoundException: print( "Unable to determine hotplug state. No datasource " - "detected") + "detected" + ) sys.exit(1) - print('enabled' if datasource else 'disabled') + print("enabled" if datasource else "disabled") else: handle_hotplug( hotplug_init=hotplug_init, @@ -271,13 +279,13 @@ def handle_args(name, args): udevaction=args.udevaction, ) except Exception: - LOG.exception('Received fatal exception handling hotplug!') + LOG.exception("Received fatal exception handling hotplug!") raise - LOG.debug('Exiting hotplug handler') + LOG.debug("Exiting hotplug handler") reporting.flush_events() -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 31ade73d..d54b809a 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -5,20 +5,19 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse -from datetime import datetime import os import shutil import sys +from datetime import datetime from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (chdir, copy, ensure_dir, write_file) +from cloudinit.util import chdir, copy, ensure_dir, write_file - -CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] -CLOUDINIT_RUN_DIR = '/run/cloud-init' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"] +CLOUDINIT_RUN_DIR = "/run/cloud-init" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def get_parser(parser=None): @@ -32,26 +31,44 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='collect-logs', - description='Collect and tar all cloud-init debug info') - parser.add_argument('--verbose', '-v', action='count', default=0, - dest='verbosity', help="Be more verbose.") + prog="collect-logs", + description="Collect and tar all cloud-init debug info", + ) + parser.add_argument( + "--verbose", + "-v", + action="count", + default=0, + dest="verbosity", + help="Be more verbose.", + ) parser.add_argument( - "--tarfile", '-t', default='cloud-init.tar.gz', - help=('The tarfile to create containing all collected logs.' - ' Default: cloud-init.tar.gz')) + "--tarfile", + "-t", + default="cloud-init.tar.gz", + help=( + "The tarfile to create containing all collected logs." + " Default: cloud-init.tar.gz" + ), + ) parser.add_argument( - "--include-userdata", '-u', default=False, action='store_true', - dest='userdata', help=( - 'Optionally include user-data from {0} which could contain' - ' sensitive information.'.format(USER_DATA_FILE))) + "--include-userdata", + "-u", + default=False, + action="store_true", + dest="userdata", + help=( + "Optionally include user-data from {0} which could contain" + " sensitive information.".format(USER_DATA_FILE) + ), + ) return parser def _copytree_rundir_ignore_files(curdir, files): """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ - 'hook-hotplug-cmd', # named pipe for hotplug + "hook-hotplug-cmd", # named pipe for hotplug ] if os.getuid() != 0: # Ignore root-permissioned files @@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0): if include_userdata and os.getuid() != 0: sys.stderr.write( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n") + " Try sudo cloud-init collect-logs\n" + ) return 1 tarfile = os.path.abspath(tarfile) - date = datetime.utcnow().date().strftime('%Y-%m-%d') - log_dir = 'cloud-init-logs-{0}'.format(date) - with tempdir(dir='/tmp') as tmp_dir: + date = datetime.utcnow().date().strftime("%Y-%m-%d") + log_dir = "cloud-init-logs-{0}".format(date) + with tempdir(dir="/tmp") as tmp_dir: log_dir = os.path.join(tmp_dir, log_dir) version = _write_command_output_to_file( - ['cloud-init', '--version'], - os.path.join(log_dir, 'version'), - "cloud-init --version", verbosity) + ["cloud-init", "--version"], + os.path.join(log_dir, "version"), + "cloud-init --version", + verbosity, + ) dpkg_ver = _write_command_output_to_file( - ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], - os.path.join(log_dir, 'dpkg-version'), - "dpkg version", verbosity) + ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], + os.path.join(log_dir, "dpkg-version"), + "dpkg version", + verbosity, + ) if not version: version = dpkg_ver if dpkg_ver else "not-available" _debug("collected cloud-init version: %s\n" % version, 1, verbosity) _write_command_output_to_file( - ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), - "dmesg output", verbosity) + ["dmesg"], + os.path.join(log_dir, "dmesg.txt"), + "dmesg output", + verbosity, + ) _write_command_output_to_file( - ['journalctl', '--boot=0', '-o', 'short-precise'], - os.path.join(log_dir, 'journal.txt'), - "systemd journal of current boot", verbosity) + ["journalctl", "--boot=0", "-o", "short-precise"], + os.path.join(log_dir, "journal.txt"), + "systemd journal of current boot", + verbosity, + ) for log in CLOUDINIT_LOGS: _collect_file(log, log_dir, verbosity) if include_userdata: _collect_file(USER_DATA_FILE, log_dir, verbosity) - run_dir = os.path.join(log_dir, 'run') + run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): try: - shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init'), - ignore=_copytree_rundir_ignore_files) + shutil.copytree( + CLOUDINIT_RUN_DIR, + os.path.join(run_dir, "cloud-init"), + ignore=_copytree_rundir_ignore_files, + ) except shutil.Error as e: sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: - _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, - verbosity) + _debug( + "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, + 1, + verbosity, + ) with chdir(tmp_dir): - subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) + subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")]) sys.stderr.write("Wrote %s\n" % tarfile) return 0 @@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - return handle_collect_logs_args('collect-logs', parser.parse_args()) + return handle_collect_logs_args("collect-logs", parser.parse_args()) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 4e6a5778..a7493c74 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -9,19 +9,22 @@ from email.mime.text import MIMEText from cloudinit import log from cloudinit.handlers import INCLUSION_TYPES_MAP + from . import addLogHandlerCLI -NAME = 'make-mime' +NAME = "make-mime" LOG = log.getLogger(NAME) -EPILOG = ("Example: make-mime -a config.yaml:cloud-config " - "-a script.sh:x-shellscript > user-data") +EPILOG = ( + "Example: make-mime -a config.yaml:cloud-config " + "-a script.sh:x-shellscript > user-data" +) def file_content_type(text): - """ Return file content type by reading the first line of the input. """ + """Return file content type by reading the first line of the input.""" try: filename, content_type = text.split(":", 1) - return (open(filename, 'r'), filename, content_type.strip()) + return (open(filename, "r"), filename, content_type.strip()) except ValueError as e: raise argparse.ArgumentError( text, "Invalid value for %r" % (text) @@ -41,26 +44,43 @@ def get_parser(parser=None): # update the parser's doc and add an epilog to show an example parser.description = __doc__ parser.epilog = EPILOG - parser.add_argument("-a", "--attach", dest="files", type=file_content_type, - action='append', default=[], - metavar=":", - help=("attach the given file as the specified " - "content-type")) - parser.add_argument('-l', '--list-types', action='store_true', - default=False, - help='List support cloud-init content types.') - parser.add_argument('-f', '--force', action='store_true', - default=False, - help='Ignore unknown content-type warnings') + parser.add_argument( + "-a", + "--attach", + dest="files", + type=file_content_type, + action="append", + default=[], + metavar=":", + help="attach the given file as the specified content-type", + ) + parser.add_argument( + "-l", + "--list-types", + action="store_true", + default=False, + help="List support cloud-init content types.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="Ignore unknown content-type warnings", + ) return parser def get_content_types(strip_prefix=False): - """ Return a list of cloud-init supported content types. Optionally - strip out the leading 'text/' of the type if strip_prefix=True. + """Return a list of cloud-init supported content types. Optionally + strip out the leading 'text/' of the type if strip_prefix=True. """ - return sorted([ctype.replace("text/", "") if strip_prefix else ctype - for ctype in INCLUSION_TYPES_MAP.values()]) + return sorted( + [ + ctype.replace("text/", "") if strip_prefix else ctype + for ctype in INCLUSION_TYPES_MAP.values() + ] + ) def handle_args(name, args): @@ -82,14 +102,16 @@ def handle_args(name, args): for i, (fh, filename, format_type) in enumerate(args.files): contents = fh.read() sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) - sub_message.add_header('Content-Disposition', - 'attachment; filename="%s"' % (filename)) + sub_message.add_header( + "Content-Disposition", 'attachment; filename="%s"' % (filename) + ) content_type = sub_message.get_content_type().lower() if content_type not in get_content_types(): level = "WARNING" if args.force else "ERROR" - msg = (level + ": content type %r for attachment %s " - "may be incorrect!") % (content_type, i + 1) - sys.stderr.write(msg + '\n') + msg = ( + level + ": content type %r for attachment %s may be incorrect!" + ) % (content_type, i + 1) + sys.stderr.write(msg + "\n") errors.append(msg) sub_messages.append(sub_message) if len(errors) and not args.force: @@ -104,10 +126,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index f4a98e5e..18b1e7ff 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -6,15 +6,13 @@ import json import os import sys -from cloudinit.sources.helpers import openstack +from cloudinit import distros, log, safeyaml +from cloudinit.net import eni, netplan, network_state, networkd, sysconfig from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf +from cloudinit.sources.helpers import openstack -from cloudinit import distros, safeyaml -from cloudinit.net import eni, netplan, networkd, network_state, sysconfig -from cloudinit import log - -NAME = 'net-convert' +NAME = "net-convert" def get_parser(parser=None): @@ -27,33 +25,59 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) - parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True, - help="The network configuration to read") - parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml', - 'azure-imds', 'vmware-imc'], - required=True, - help="The format of the given network config") - parser.add_argument("-d", "--directory", - metavar="PATH", - help="directory to place output in", - required=True) - parser.add_argument("-D", "--distro", - choices=[item for sublist in - distros.OSFAMILIES.values() - for item in sublist], - required=True) - parser.add_argument("-m", "--mac", - metavar="name,mac", - action='append', - help="interface name to mac mapping") - parser.add_argument("--debug", action='store_true', - help='enable debug logging to stderr.') - parser.add_argument("-O", "--output-kind", - choices=['eni', 'netplan', 'networkd', 'sysconfig'], - required=True, - help="The network config format to emit") + parser.add_argument( + "-p", + "--network-data", + type=open, + metavar="PATH", + required=True, + help="The network configuration to read", + ) + parser.add_argument( + "-k", + "--kind", + choices=[ + "eni", + "network_data.json", + "yaml", + "azure-imds", + "vmware-imc", + ], + required=True, + help="The format of the given network config", + ) + parser.add_argument( + "-d", + "--directory", + metavar="PATH", + help="directory to place output in", + required=True, + ) + parser.add_argument( + "-D", + "--distro", + choices=[ + item for sublist in distros.OSFAMILIES.values() for item in sublist + ], + required=True, + ) + parser.add_argument( + "-m", + "--mac", + metavar="name,mac", + action="append", + help="interface name to mac mapping", + ) + parser.add_argument( + "--debug", action="store_true", help="enable debug logging to stderr." + ) + parser.add_argument( + "-O", + "--output-kind", + choices=["eni", "netplan", "networkd", "sysconfig"], + required=True, + help="The network config format to emit", + ) return parser @@ -81,59 +105,68 @@ def handle_args(name, args): pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": pre_ns = safeyaml.load(net_data) - if 'network' in pre_ns: - pre_ns = pre_ns.get('network') + if "network" in pre_ns: + pre_ns = pre_ns.get("network") if args.debug: - sys.stderr.write('\n'.join( - ["Input YAML", safeyaml.dumps(pre_ns), ""])) - elif args.kind == 'network_data.json': + sys.stderr.write( + "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""]) + ) + elif args.kind == "network_data.json": pre_ns = openstack.convert_net_json( - json.loads(net_data), known_macs=known_macs) - elif args.kind == 'azure-imds': + json.loads(net_data), known_macs=known_macs + ) + elif args.kind == "azure-imds": pre_ns = azure.parse_network_config(json.loads(net_data)) - elif args.kind == 'vmware-imc': + elif args.kind == "vmware-imc": config = ovf.Config(ovf.ConfigFile(args.network_data.name)) pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) if args.debug: - sys.stderr.write('\n'.join( - ["", "Internal State", safeyaml.dumps(ns), ""])) + sys.stderr.write( + "\n".join(["", "Internal State", safeyaml.dumps(ns), ""]) + ) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} if args.output_kind == "eni": r_cls = eni.Renderer - config = distro.renderer_configs.get('eni') + config = distro.renderer_configs.get("eni") elif args.output_kind == "netplan": r_cls = netplan.Renderer - config = distro.renderer_configs.get('netplan') + config = distro.renderer_configs.get("netplan") # don't run netplan generate/apply - config['postcmds'] = False + config["postcmds"] = False # trim leading slash - config['netplan_path'] = config['netplan_path'][1:] + config["netplan_path"] = config["netplan_path"][1:] # enable some netplan features - config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] + config["features"] = ["dhcp-use-domains", "ipv6-mtu"] elif args.output_kind == "networkd": r_cls = networkd.Renderer - config = distro.renderer_configs.get('networkd') + config = distro.renderer_configs.get("networkd") elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer - config = distro.renderer_configs.get('sysconfig') + config = distro.renderer_configs.get("sysconfig") else: raise RuntimeError("Invalid output_kind") r = r_cls(config=config) - sys.stderr.write(''.join([ - "Read input format '%s' from '%s'.\n" % ( - args.kind, args.network_data.name), - "Wrote output format '%s' to '%s'\n" % ( - args.output_kind, args.directory)]) + "\n") + sys.stderr.write( + "".join( + [ + "Read input format '%s' from '%s'.\n" + % (args.kind, args.network_data.name), + "Wrote output format '%s' to '%s'\n" + % (args.output_kind, args.directory), + ] + ) + + "\n" + ) r.render_network_state(network_state=ns, target=args.directory) -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index be304630..76b16c2e 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -5,33 +5,47 @@ """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" import argparse + from cloudinit.config import schema -from . import hotplug_hook -from . import net_convert -from . import render -from . import make_mime +from . import hotplug_hook, make_mime, net_convert, render def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-devel', - description='Run development cloud-init tools') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-devel", + description="Run development cloud-init tools", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True subcmds = [ - (hotplug_hook.NAME, hotplug_hook.__doc__, - hotplug_hook.get_parser, hotplug_hook.handle_args), - ('schema', 'Validate cloud-config files for document schema', - schema.get_parser, schema.handle_schema_args), - (net_convert.NAME, net_convert.__doc__, - net_convert.get_parser, net_convert.handle_args), - (render.NAME, render.__doc__, - render.get_parser, render.handle_args), - (make_mime.NAME, make_mime.__doc__, - make_mime.get_parser, make_mime.handle_args), + ( + hotplug_hook.NAME, + hotplug_hook.__doc__, + hotplug_hook.get_parser, + hotplug_hook.handle_args, + ), + ( + "schema", + "Validate cloud-config files for document schema", + schema.get_parser, + schema.handle_schema_args, + ), + ( + net_convert.NAME, + net_convert.__doc__, + net_convert.get_parser, + net_convert.handle_args, + ), + (render.NAME, render.__doc__, render.get_parser, render.handle_args), + ( + make_mime.NAME, + make_mime.__doc__, + make_mime.get_parser, + make_mime.handle_args, + ), ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1090aa16..2f9a22a8 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -6,12 +6,13 @@ import argparse import os import sys -from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit import log +from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE + from . import addLogHandlerCLI, read_cfg_paths -NAME = 'render' +NAME = "render" LOG = log.getLogger(NAME) @@ -27,13 +28,24 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - 'user_data', type=str, help='Path to the user-data file to render') + "user_data", type=str, help="Path to the user-data file to render" + ) + parser.add_argument( + "-i", + "--instance-data", + type=str, + help=( + "Optional path to instance-data.json file. Defaults to" + " /run/cloud-init/instance-data.json" + ), + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Optional path to instance-data.json file. Defaults to' - ' /run/cloud-init/instance-data.json')) - parser.add_argument('-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) return parser @@ -54,34 +66,38 @@ def handle_args(name, args): redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: instance_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + instance_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn if not os.path.exists(instance_data_fn): - LOG.error('Missing instance-data.json file: %s', instance_data_fn) + LOG.error("Missing instance-data.json file: %s", instance_data_fn) return 1 try: with open(args.user_data) as stream: user_data = stream.read() except IOError: - LOG.error('Missing user-data file: %s', args.user_data) + LOG.error("Missing user-data file: %s", args.user_data) return 1 try: rendered_payload = render_jinja_payload_from_file( - payload=user_data, payload_fn=args.user_data, + payload=user_data, + payload_fn=args.user_data, instance_data_file=instance_data_fn, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) except RuntimeError as e: - LOG.error('Cannot render from instance data: %s', str(e)) + LOG.error("Cannot render from instance data: %s", str(e)) return 1 if not rendered_payload: - LOG.error('Unable to render user-data file: %s', args.user_data) + LOG.error("Unable to render user-data file: %s", args.user_data) return 1 sys.stdout.write(rendered_payload) return 0 @@ -89,10 +105,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 63186d34..e67edbc3 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -19,6 +19,7 @@ import time import traceback from cloudinit import patcher + patcher.patch_logging() from cloudinit import log as logging @@ -34,8 +35,7 @@ from cloudinit import warnings from cloudinit import reporting from cloudinit.reporting import events -from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, - CLOUD_CONFIG) +from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG from cloudinit import atomic_helper @@ -44,8 +44,10 @@ from cloudinit import dhclient_hook # Welcome message template -WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " - "{timestamp}. Up {uptime} seconds.") +WELCOME_MSG_TPL = ( + "Cloud-init v. {version} running '{action}' at " + "{timestamp}. Up {uptime} seconds." +) # Module section template MOD_SECTION_TPL = "cloud_%s_modules" @@ -53,9 +55,9 @@ MOD_SECTION_TPL = "cloud_%s_modules" # Frequency shortname to full name # (so users don't have to remember the full name...) FREQ_SHORT_NAMES = { - 'instance': PER_INSTANCE, - 'always': PER_ALWAYS, - 'once': PER_ONCE, + "instance": PER_INSTANCE, + "always": PER_ALWAYS, + "once": PER_ONCE, } LOG = logging.getLogger() @@ -63,21 +65,20 @@ LOG = logging.getLogger() # Used for when a logger may not be active # and we still want to print exceptions... -def print_exc(msg=''): +def print_exc(msg=""): if msg: sys.stderr.write("%s\n" % (msg)) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") traceback.print_exc(file=sys.stderr) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") def welcome(action, msg=None): if not msg: msg = welcome_format(action) - util.multi_log("%s\n" % (msg), - console=False, stderr=True, log=LOG) + util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) return msg @@ -86,7 +87,8 @@ def welcome_format(action): version=version.version_string(), uptime=util.uptime(), timestamp=util.time_rfc2822(), - action=action) + action=action, + ) def extract_fns(args): @@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section): (which_ran, failures) = mods.run_section(full_section_name) total_attempted = len(which_ran) + len(failures) if total_attempted == 0: - msg = ("No '%s' modules to run" - " under section '%s'") % (action_name, full_section_name) + msg = "No '%s' modules to run under section '%s'" % ( + action_name, + full_section_name, + ) sys.stderr.write("%s\n" % (msg)) LOG.debug(msg) return [] else: - LOG.debug("Ran %s modules with %s failures", - len(which_ran), len(failures)) + LOG.debug( + "Ran %s modules with %s failures", len(which_ran), len(failures) + ) return failures def apply_reporting_cfg(cfg): - if cfg.get('reporting'): - reporting.update_configuration(cfg.get('reporting')) + if cfg.get("reporting"): + reporting.update_configuration(cfg.get("reporting")) -def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): +def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")): data = util.keyval_str_to_dict(cmdline) for key in names: if key in data: return key, data[key] - raise KeyError("No keys (%s) found in string '%s'" % - (cmdline, names)) + raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names)) def attempt_cmdline_url(path, network=True, cmdline=None): @@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None): if path_is_local and os.path.exists(path): if network: - m = ("file '%s' existed, possibly from local stage download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.INFO if path_is_local: level = logging.DEBUG else: - m = ("file '%s' existed, possibly from previous boot download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.WARN return (level, m) - kwargs = {'url': url, 'timeout': 10, 'retries': 2} + kwargs = {"url": url, "timeout": 10, "retries": 2} if network or path_is_local: level = logging.WARN - kwargs['sec_between'] = 1 + kwargs["sec_between"] = 1 else: level = logging.DEBUG - kwargs['sec_between'] = .1 + kwargs["sec_between"] = 0.1 data = None - header = b'#cloud-config' + header = b"#cloud-config" try: resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): - if cmdline_name == 'cloud-config-url': + if cmdline_name == "cloud-config-url": level = logging.WARN else: level = logging.INFO return ( level, - "contents of '%s' did not start with %s" % (url, header)) + "contents of '%s' did not start with %s" % (url, header), + ) else: - return (level, - "url '%s' returned code %s. Ignoring." % (url, resp.code)) + return ( + level, + "url '%s' returned code %s. Ignoring." % (url, resp.code), + ) except url_helper.UrlError as e: return (level, "retrieving url '%s' failed: %s" % (url, e)) util.write_file(path, data, mode=0o600) - return (logging.INFO, - "wrote cloud-config data from %s='%s' to %s" % - (cmdline_name, url, path)) + return ( + logging.INFO, + "wrote cloud-config data from %s='%s' to %s" + % (cmdline_name, url, path), + ) def purge_cache_on_python_version_change(init): @@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init): There could be changes not represented in our cache (obj.pkl) after we upgrade to a new version of python, so at that point clear the cache """ - current_python_version = '%d.%d' % ( - sys.version_info.major, sys.version_info.minor + current_python_version = "%d.%d" % ( + sys.version_info.major, + sys.version_info.minor, ) python_version_path = os.path.join( - init.paths.get_cpath('data'), 'python-version' + init.paths.get_cpath("data"), "python-version" ) if os.path.exists(python_version_path): cached_python_version = open(python_version_path).read() # The Python version has changed out from under us, anything that was # pickled previously is likely useless due to API changes. if cached_python_version != current_python_version: - LOG.debug('Python version change detected. Purging cache') + LOG.debug("Python version change detected. Purging cache") init.purge_cache(True) util.write_file(python_version_path, current_python_version) else: - if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + if os.path.exists(init.paths.get_ipath_cur("obj_pkl")): LOG.info( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' + "Writing python-version file. " + "Cache compatibility status is currently unknown." ) util.write_file(python_version_path, current_python_version) def _should_bring_up_interfaces(init, args): - if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + if util.get_cfg_option_bool(init.cfg, "disable_network_activation"): return False return not args.local @@ -250,10 +264,14 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)] + early_logs = [ + attempt_cmdline_url( + path=os.path.join( + "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg" + ), + network=not args.local, + ) + ] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -289,8 +307,9 @@ def main_init(name, args): early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(init.cfg) apply_reporting_cfg(init.cfg) @@ -317,9 +336,11 @@ def main_init(name, args): if mode == sources.DSMODE_NETWORK: existing = "trust" sys.stderr.write("%s\n" % (netinfo.debug_info())) - LOG.debug(("Checking to see if files that we need already" - " exist from a previous run that would allow us" - " to stop early.")) + LOG.debug( + "Checking to see if files that we need already" + " exist from a previous run that would allow us" + " to stop early." + ) # no-net is written by upstart cloud-init-nonet when network failed # to come up stop_files = [ @@ -331,15 +352,18 @@ def main_init(name, args): existing_files.append(fn) if existing_files: - LOG.debug("[%s] Exiting. stop file %s existed", - mode, existing_files) + LOG.debug( + "[%s] Exiting. stop file %s existed", mode, existing_files + ) return (None, []) else: - LOG.debug("Execution continuing, no previous run detected that" - " would allow us to stop early.") + LOG.debug( + "Execution continuing, no previous run detected that" + " would allow us to stop early." + ) else: existing = "check" - mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False) if mcfg: LOG.debug("manual cache clean set from config") existing = "trust" @@ -360,8 +384,11 @@ def main_init(name, args): # if in network mode, and the datasource is local # then work was done at that stage. if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s in local mode", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s in local mode", + mode, + init.datasource, + ) return (None, []) except sources.DataSourceNotFoundException: # In the case of 'cloud-init init' without '--local' it is a bit @@ -371,8 +398,9 @@ def main_init(name, args): if mode == sources.DSMODE_LOCAL: LOG.debug("No local datasource found") else: - util.logexc(LOG, ("No instance datasource found!" - " Likely bad things to come!")) + util.logexc( + LOG, "No instance datasource found! Likely bad things to come!" + ) if not args.force: init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) @@ -381,46 +409,60 @@ def main_init(name, args): else: return (None, ["No instance datasource found."]) else: - LOG.debug("[%s] barreling on in force mode without datasource", - mode) + LOG.debug( + "[%s] barreling on in force mode without datasource", mode + ) _maybe_persist_instance_data(init) # Stage 6 iid = init.instancify() - LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", - mode, name, iid, init.is_new_instance()) + LOG.debug( + "[%s] %s will now be targeting instance id: %s. new=%s", + mode, + name, + iid, + init.is_new_instance(), + ) if mode == sources.DSMODE_LOCAL: # Before network comes up, set any configured hostname to allow # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. - _maybe_set_hostname(init, stage='local', retry_stage='network') + _maybe_set_hostname(init, stage="local", retry_stage="network") init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s not in local mode.", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s not in local mode.", + mode, + init.datasource, + ) return (init.datasource, []) else: - LOG.debug("[%s] %s is in local mode, will apply init modules now.", - mode, init.datasource) + LOG.debug( + "[%s] %s is in local mode, will apply init modules now.", + mode, + init.datasource, + ) # Give the datasource a chance to use network resources. # This is used on Azure to communicate with the fabric over network. init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() - _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') + _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config") # Stage 7 try: # Attempt to consume the data per instance. # This may run user-data handlers and/or perform # url downloads and such as needed. - (ran, _results) = init.cloudify().run('consume_data', - init.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + (ran, _results) = init.cloudify().run( + "consume_data", + init.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) if not ran: # Just consume anything that is set to run per-always # if nothing ran in the per-instance code @@ -442,8 +484,7 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warning("Stdout, stderr changing to (%s, %s)", - outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -459,11 +500,11 @@ def main_init(name, args): def di_report_warn(datasource, cfg): - if 'di_report' not in cfg: + if "di_report" not in cfg: LOG.debug("no di_report found in config.") return - dicfg = cfg['di_report'] + dicfg = cfg["di_report"] if dicfg is None: # ds-identify may write 'di_report:\n #comment\n' # which reads as {'di_report': None} @@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg): LOG.warning("di_report config not a dictionary: %s", dicfg) return - dslist = dicfg.get('datasource_list') + dslist = dicfg.get("datasource_list") if dslist is None: LOG.warning("no 'datasource_list' found in di_report.") return @@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg): # where Name is the thing that shows up in datasource_list. modname = datasource.__module__.rpartition(".")[2] if modname.startswith(sources.DS_PREFIX): - modname = modname[len(sources.DS_PREFIX):] + modname = modname[len(sources.DS_PREFIX) :] else: - LOG.warning("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning( + "Datasource '%s' came from unexpected module '%s'.", + datasource, + modname, + ) if modname in dslist: - LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", - datasource, modname, dslist) + LOG.debug( + "used datasource '%s' from '%s' was in di_report's list: %s", + datasource, + modname, + dslist, + ) return - warnings.show_warning('dsid_missing_source', cfg, - source=modname, dslist=str(dslist)) + warnings.show_warning( + "dsid_missing_source", cfg, source=modname, dslist=str(dslist) + ) def main_modules(action_name, args): @@ -521,8 +570,10 @@ def main_modules(action_name, args): init.fetch(existing="trust") except sources.DataSourceNotFoundException: # There was no datasource found, theres nothing to do - msg = ('Can not apply stage %s, no datasource found! Likely bad ' - 'things to come!' % name) + msg = ( + "Can not apply stage %s, no datasource found! Likely bad " + "things to come!" % name + ) util.logexc(LOG, msg) print_exc(msg) if not args.force: @@ -539,8 +590,9 @@ def main_modules(action_name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -573,10 +625,12 @@ def main_single(name, args): # There was no datasource found, # that might be bad (or ok) depending on # the module being ran (so continue on) - util.logexc(LOG, ("Failed to fetch your datasource," - " likely bad things to come!")) - print_exc(("Failed to fetch your datasource," - " likely bad things to come!")) + util.logexc( + LOG, "Failed to fetch your datasource, likely bad things to come!" + ) + print_exc( + "Failed to fetch your datasource, likely bad things to come!" + ) if not args.force: return 1 _maybe_persist_instance_data(init) @@ -598,8 +652,9 @@ def main_single(name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -608,9 +663,7 @@ def main_single(name, args): welcome(name, msg=w_msg) # Stage 5 - (which_ran, failures) = mods.run_single(mod_name, - mod_args, - mod_freq) + (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq) if failures: LOG.warning("Ran %s but it failed!", mod_name) return 1 @@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None): result_path = os.path.join(data_d, "result.json") result_link = os.path.join(link_d, "result.json") - util.ensure_dirs((data_d, link_d,)) + util.ensure_dirs( + ( + data_d, + link_d, + ) + ) (_name, functor) = args.action @@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-init', 'modules-config', - 'modules-final') + modes = ( + "init", + "init-local", + "modules-init", + "modules-config", + "modules-final", + ) if mode not in modes: raise ValueError( - "Invalid cloud init mode specified '{0}'".format(mode)) + "Invalid cloud init mode specified '{0}'".format(mode) + ) status = None - if mode == 'init-local': + if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) else: @@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None): pass nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, + "errors": [], + "start": None, + "finished": None, } if status is None: - status = {'v1': {}} - status['v1']['datasource'] = None + status = {"v1": {}} + status["v1"]["datasource"] = None for m in modes: - if m not in status['v1']: - status['v1'][m] = nullstatus.copy() + if m not in status["v1"]: + status["v1"][m] = nullstatus.copy() - v1 = status['v1'] - v1['stage'] = mode - v1[mode]['start'] = time.time() + v1 = status["v1"] + v1["stage"] = mode + v1[mode]["start"] = time.time() atomic_helper.write_json(status_path, status) - util.sym_link(os.path.relpath(status_path, link_d), status_link, - force=True) + util.sym_link( + os.path.relpath(status_path, link_d), status_link, force=True + ) try: ret = functor(name, args) - if mode in ('init', 'init-local'): + if mode in ("init", "init-local"): (datasource, errors) = ret if datasource is not None: - v1['datasource'] = str(datasource) + v1["datasource"] = str(datasource) else: errors = ret - v1[mode]['errors'] = [str(e) for e in errors] + v1[mode]["errors"] = [str(e) for e in errors] except Exception as e: util.logexc(LOG, "failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]['errors'] = [str(e)] + v1[mode]["errors"] = [str(e)] - v1[mode]['finished'] = time.time() - v1['stage'] = None + v1[mode]["finished"] = time.time() + v1["stage"] = None atomic_helper.write_json(status_path, status) @@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): # write the 'finished' file errors = [] for m in modes: - if v1[m]['errors']: - errors.extend(v1[m].get('errors', [])) + if v1[m]["errors"]: + errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( - result_path, {'v1': {'datasource': v1['datasource'], - 'errors': errors}}) - util.sym_link(os.path.relpath(result_path, link_d), result_link, - force=True) + result_path, + {"v1": {"datasource": v1["datasource"], "errors": errors}}, + ) + util.sym_link( + os.path.relpath(result_path, link_d), result_link, force=True + ) - return len(v1[mode]['errors']) + return len(v1[mode]["errors"]) def _maybe_persist_instance_data(init): """Write instance-data.json file if absent and datasource is restored.""" if init.ds_restored: instance_data_file = os.path.join( - init.paths.run_dir, sources.INSTANCE_JSON_FILE) + init.paths.run_dir, sources.INSTANCE_JSON_FILE + ) if not os.path.exists(instance_data_file): init.datasource.persist_instance_data() @@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage): """ cloud = init.cloudify() (hostname, _fqdn) = util.get_hostname_fqdn( - init.cfg, cloud, metadata_only=True) + init.cfg, cloud, metadata_only=True + ) if hostname: # meta-data or user-data hostname content try: - cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None) except cc_set_hostname.SetHostnameError as e: LOG.debug( - 'Failed setting hostname in %s stage. Will' - ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + "Failed setting hostname in %s stage. Will" + " retry in %s stage. Error: %s.", + stage, + retry_stage, + str(e), + ) def main_features(name, args): - sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') + sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n") def main(sysv_args=None): @@ -760,129 +833,182 @@ def main(sysv_args=None): sysv_args = sysv_args[1:] # Top level args - parser.add_argument('--version', '-v', action='version', - version='%(prog)s ' + (version.version_string())) - parser.add_argument('--file', '-f', action='append', - dest='files', - help=('additional yaml configuration' - ' files to use'), - type=argparse.FileType('rb')) - parser.add_argument('--debug', '-d', action='store_true', - help=('show additional pre-action' - ' logging (default: %(default)s)'), - default=False) - parser.add_argument('--force', action='store_true', - help=('force running even if no datasource is' - ' found (use at your own risk)'), - dest='force', - default=False) + parser.add_argument( + "--version", + "-v", + action="version", + version="%(prog)s " + (version.version_string()), + ) + parser.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="additional yaml configuration files to use", + type=argparse.FileType("rb"), + ) + parser.add_argument( + "--debug", + "-d", + action="store_true", + help="show additional pre-action logging (default: %(default)s)", + default=False, + ) + parser.add_argument( + "--force", + action="store_true", + help=( + "force running even if no datasource is" + " found (use at your own risk)" + ), + dest="force", + default=False, + ) parser.set_defaults(reporter=None) - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True # Each action and its sub-options (if any) - parser_init = subparsers.add_parser('init', - help=('initializes cloud-init and' - ' performs initial modules')) - parser_init.add_argument("--local", '-l', action='store_true', - help="start in local mode (default: %(default)s)", - default=False) + parser_init = subparsers.add_parser( + "init", help="initializes cloud-init and performs initial modules" + ) + parser_init.add_argument( + "--local", + "-l", + action="store_true", + help="start in local mode (default: %(default)s)", + default=False, + ) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', main_init)) + parser_init.set_defaults(action=("init", main_init)) # These settings are used for the 'config' and 'final' stages - parser_mod = subparsers.add_parser('modules', - help=('activates modules using ' - 'a given configuration key')) - parser_mod.add_argument("--mode", '-m', action='store', - help=("module configuration name " - "to use (default: %(default)s)"), - default='config', - choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', main_modules)) + parser_mod = subparsers.add_parser( + "modules", help="activates modules using a given configuration key" + ) + parser_mod.add_argument( + "--mode", + "-m", + action="store", + help="module configuration name to use (default: %(default)s)", + default="config", + choices=("init", "config", "final"), + ) + parser_mod.set_defaults(action=("modules", main_modules)) # This subcommand allows you to run a single module - parser_single = subparsers.add_parser('single', - help=('run a single module ')) - parser_single.add_argument("--name", '-n', action="store", - help="module name to run", - required=True) - parser_single.add_argument("--frequency", action="store", - help=("frequency of the module"), - required=False, - choices=list(FREQ_SHORT_NAMES.keys())) - parser_single.add_argument("--report", action="store_true", - help="enable reporting", - required=False) - parser_single.add_argument("module_args", nargs="*", - metavar='argument', - help=('any additional arguments to' - ' pass to this module')) - parser_single.set_defaults(action=('single', main_single)) + parser_single = subparsers.add_parser( + "single", help="run a single module " + ) + parser_single.add_argument( + "--name", + "-n", + action="store", + help="module name to run", + required=True, + ) + parser_single.add_argument( + "--frequency", + action="store", + help="frequency of the module", + required=False, + choices=list(FREQ_SHORT_NAMES.keys()), + ) + parser_single.add_argument( + "--report", + action="store_true", + help="enable reporting", + required=False, + ) + parser_single.add_argument( + "module_args", + nargs="*", + metavar="argument", + help="any additional arguments to pass to this module", + ) + parser_single.set_defaults(action=("single", main_single)) parser_query = subparsers.add_parser( - 'query', - help='Query standardized instance metadata from the command line.') + "query", + help="Query standardized instance metadata from the command line.", + ) parser_dhclient = subparsers.add_parser( - dhclient_hook.NAME, help=dhclient_hook.__doc__) + dhclient_hook.NAME, help=dhclient_hook.__doc__ + ) dhclient_hook.get_parser(parser_dhclient) - parser_features = subparsers.add_parser('features', - help=('list defined features')) - parser_features.set_defaults(action=('features', main_features)) + parser_features = subparsers.add_parser( + "features", help="list defined features" + ) + parser_features.set_defaults(action=("features", main_features)) parser_analyze = subparsers.add_parser( - 'analyze', help='Devel tool: Analyze cloud-init logs and data') + "analyze", help="Devel tool: Analyze cloud-init logs and data" + ) - parser_devel = subparsers.add_parser( - 'devel', help='Run development tools') + parser_devel = subparsers.add_parser("devel", help="Run development tools") parser_collect_logs = subparsers.add_parser( - 'collect-logs', help='Collect and tar all cloud-init debug info') + "collect-logs", help="Collect and tar all cloud-init debug info" + ) parser_clean = subparsers.add_parser( - 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + "clean", help="Remove logs and artifacts so cloud-init can re-run." + ) parser_status = subparsers.add_parser( - 'status', help='Report cloud-init status or wait on completion.') + "status", help="Report cloud-init status or wait on completion." + ) if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost - if sysv_args[0] == 'analyze': + if sysv_args[0] == "analyze": from cloudinit.analyze.__main__ import get_parser as analyze_parser + # Construct analyze subcommand parser analyze_parser(parser_analyze) - elif sysv_args[0] == 'devel': + elif sysv_args[0] == "devel": from cloudinit.cmd.devel.parser import get_parser as devel_parser + # Construct devel subcommand parser devel_parser(parser_devel) - elif sysv_args[0] == 'collect-logs': + elif sysv_args[0] == "collect-logs": from cloudinit.cmd.devel.logs import ( - get_parser as logs_parser, handle_collect_logs_args) + get_parser as logs_parser, + handle_collect_logs_args, + ) + logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( - action=('collect-logs', handle_collect_logs_args)) - elif sysv_args[0] == 'clean': + action=("collect-logs", handle_collect_logs_args) + ) + elif sysv_args[0] == "clean": from cloudinit.cmd.clean import ( - get_parser as clean_parser, handle_clean_args) + get_parser as clean_parser, + handle_clean_args, + ) + clean_parser(parser_clean) - parser_clean.set_defaults( - action=('clean', handle_clean_args)) - elif sysv_args[0] == 'query': + parser_clean.set_defaults(action=("clean", handle_clean_args)) + elif sysv_args[0] == "query": from cloudinit.cmd.query import ( - get_parser as query_parser, handle_args as handle_query_args) + get_parser as query_parser, + handle_args as handle_query_args, + ) + query_parser(parser_query) - parser_query.set_defaults( - action=('render', handle_query_args)) - elif sysv_args[0] == 'status': + parser_query.set_defaults(action=("render", handle_query_args)) + elif sysv_args[0] == "status": from cloudinit.cmd.status import ( - get_parser as status_parser, handle_status_args) + get_parser as status_parser, + handle_status_args, + ) + status_parser(parser_status) - parser_status.set_defaults( - action=('status', handle_status_args)) + parser_status.set_defaults(action=("status", handle_status_args)) args = parser.parse_args(args=sysv_args) @@ -906,14 +1032,20 @@ def main(sysv_args=None): if args.local: rname, rdesc = ("init-local", "searching for local datasources") else: - rname, rdesc = ("init-network", - "searching for network datasources") + rname, rdesc = ( + "init-network", + "searching for network datasources", + ) elif name == "modules": - rname, rdesc = ("modules-%s" % args.mode, - "running modules for %s" % args.mode) + rname, rdesc = ( + "modules-%s" % args.mode, + "running modules for %s" % args.mode, + ) elif name == "single": - rname, rdesc = ("single/%s" % args.name, - "running single module %s" % args.name) + rname, rdesc = ( + "single/%s" % args.name, + "running single module %s" % args.name, + ) report_on = args.report else: rname = name @@ -921,19 +1053,24 @@ def main(sysv_args=None): report_on = False args.reporter = events.ReportEventStack( - rname, rdesc, reporting_enabled=report_on) + rname, rdesc, reporting_enabled=report_on + ) with args.reporter: retval = util.log_time( - logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, - get_uptime=True, func=functor, args=(name, args)) + logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, + get_uptime=True, + func=functor, + args=(name, args), + ) reporting.flush_events() return retval -if __name__ == '__main__': - if 'TZ' not in os.environ: - os.environ['TZ'] = ":/etc/localtime" +if __name__ == "__main__": + if "TZ" not in os.environ: + os.environ["TZ"] = ":/etc/localtime" return_value = main(sys.argv) if return_value: sys.exit(return_value) diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index e53cd855..46f17699 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -14,22 +14,24 @@ output; if this fails, they are treated as binary. """ import argparse -from errno import EACCES import os import sys +from errno import EACCES +from cloudinit import log, util +from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit.handlers.jinja_template import ( convert_jinja_instance_data, get_jinja_variable_alias, - render_jinja_payload + render_jinja_payload, ) -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths -from cloudinit import log from cloudinit.sources import ( - INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE) -from cloudinit import util + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + REDACT_SENSITIVE_VALUE, +) -NAME = 'query' +NAME = "query" LOG = log.getLogger(NAME) @@ -43,41 +45,79 @@ def get_parser(parser=None): @returns: ArgumentParser with proper argument configuration. """ if not parser: - parser = argparse.ArgumentParser( - prog=NAME, description=__doc__) + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - '-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Path to instance-data.json file. Default is /run/cloud-init/%s' - % INSTANCE_JSON_FILE)) + "-i", + "--instance-data", + type=str, + help="Path to instance-data.json file. Default is /run/cloud-init/%s" + % INSTANCE_JSON_FILE, + ) parser.add_argument( - '-l', '--list-keys', action='store_true', default=False, - help=('List query keys available at the provided instance-data' - ' .')) + "-l", + "--list-keys", + action="store_true", + default=False, + help=( + "List query keys available at the provided instance-data" + " ." + ), + ) parser.add_argument( - '-u', '--user-data', type=str, - help=('Path to user-data file. Default is' - ' /var/lib/cloud/instance/user-data.txt')) + "-u", + "--user-data", + type=str, + help=( + "Path to user-data file. Default is" + " /var/lib/cloud/instance/user-data.txt" + ), + ) parser.add_argument( - '-v', '--vendor-data', type=str, - help=('Path to vendor-data file. Default is' - ' /var/lib/cloud/instance/vendor-data.txt')) + "-v", + "--vendor-data", + type=str, + help=( + "Path to vendor-data file. Default is" + " /var/lib/cloud/instance/vendor-data.txt" + ), + ) parser.add_argument( - 'varname', type=str, nargs='?', - help=('A dot-delimited specific variable to query from' - ' instance-data. For example: v1.local_hostname. If the' - ' value is not JSON serializable, it will be base64-encoded and' - ' will contain the prefix "ci-b64:". ')) + "varname", + type=str, + nargs="?", + help=( + "A dot-delimited specific variable to query from" + " instance-data. For example: v1.local_hostname. If the" + " value is not JSON serializable, it will be base64-encoded and" + ' will contain the prefix "ci-b64:". ' + ), + ) parser.add_argument( - '-a', '--all', action='store_true', default=False, dest='dump_all', - help='Dump all available instance-data') + "-a", + "--all", + action="store_true", + default=False, + dest="dump_all", + help="Dump all available instance-data", + ) parser.add_argument( - '-f', '--format', type=str, dest='format', - help=('Optionally specify a custom output format string. Any' - ' instance-data variable can be specified between double-curly' - ' braces. For example -f "{{ v2.cloud_name }}"')) + "-f", + "--format", + type=str, + dest="format", + help=( + "Optionally specify a custom output format string. Any" + " instance-data variable can be specified between double-curly" + ' braces. For example -f "{{ v2.cloud_name }}"' + ), + ) return parser @@ -91,7 +131,7 @@ def load_userdata(ud_file_path): """ bdata = util.load_file(ud_file_path, decode=False) try: - return bdata.decode('utf-8') + return bdata.decode("utf-8") except UnicodeDecodeError: return util.decomp_gzip(bdata, quiet=False, decode=True) @@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: sensitive_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if os.path.exists(sensitive_data_fn): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + sensitive_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: @@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if user_data: user_data_fn = user_data else: - user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') + user_data_fn = os.path.join(paths.instance_link, "user-data.txt") if vendor_data: vendor_data_fn = vendor_data else: - vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') + vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt") try: instance_json = util.load_file(instance_data_fn) @@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if e.errno == EACCES: LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: - LOG.error('Missing instance-data file: %s', instance_data_fn) + LOG.error("Missing instance-data file: %s", instance_data_fn) raise instance_data = util.load_json(instance_json) if uid != 0: - instance_data['userdata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn)) - instance_data['vendordata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn)) + instance_data["userdata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + user_data_fn, + ) + instance_data["vendordata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + vendor_data_fn, + ) else: - instance_data['userdata'] = load_userdata(user_data_fn) - instance_data['vendordata'] = load_userdata(vendor_data_fn) + instance_data["userdata"] = load_userdata(user_data_fn) + instance_data["vendordata"] = load_userdata(vendor_data_fn) return instance_data def _find_instance_data_leaf_by_varname_path( - jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, - varname: str, list_keys: bool + jinja_vars_without_aliases: dict, + jinja_vars_with_aliases: dict, + varname: str, + list_keys: bool, ): """Return the value of the dot-delimited varname path in instance-data @@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path( """ walked_key_path = "" response = jinja_vars_without_aliases - for key_path_part in varname.split('.'): + for key_path_part in varname.split("."): try: # Walk key path using complete aliases dict, yet response # should only contain jinja_without_aliases @@ -205,8 +253,9 @@ def handle_args(name, args): addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) if not any([args.list_keys, args.varname, args.format, args.dump_all]): LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') + "Expected one of the options: --all, --format," + " --list-keys or varname" + ) get_parser().print_help() return 1 try: @@ -216,11 +265,13 @@ def handle_args(name, args): except (IOError, OSError): return 1 if args.format: - payload = '## template: jinja\n{fmt}'.format(fmt=args.format) + payload = "## template: jinja\n{fmt}".format(fmt=args.format) rendered_payload = render_jinja_payload( - payload=payload, payload_fn='query commandline', + payload=payload, + payload_fn="query commandline", instance_data=instance_data, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) if rendered_payload: print(rendered_payload) return 0 @@ -240,7 +291,7 @@ def handle_args(name, args): jinja_vars_without_aliases=response, jinja_vars_with_aliases=jinja_vars_with_aliases, varname=args.varname, - list_keys=args.list_keys + list_keys=args.list_keys, ) except (KeyError, ValueError) as e: LOG.error(e) @@ -248,11 +299,10 @@ def handle_args(name, args): if args.list_keys: if not isinstance(response, dict): LOG.error( - "--list-keys provided but '%s' is not a dict", - args.varname + "--list-keys provided but '%s' is not a dict", args.varname ) return 1 - response = '\n'.join(sorted(response.keys())) + response = "\n".join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) print(response) @@ -265,7 +315,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index ea79a85b..cff16c34 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -7,20 +7,20 @@ import argparse import os import sys -from time import gmtime, strftime, sleep +from time import gmtime, sleep, strftime from cloudinit.distros import uses_systemd from cloudinit.stages import Init from cloudinit.util import get_cmdline, load_file, load_json -CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' +CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" # customer visible status messages -STATUS_ENABLED_NOT_RUN = 'not run' -STATUS_RUNNING = 'running' -STATUS_DONE = 'done' -STATUS_ERROR = 'error' -STATUS_DISABLED = 'disabled' +STATUS_ENABLED_NOT_RUN = "not run" +STATUS_RUNNING = "running" +STATUS_DONE = "done" +STATUS_ERROR = "error" +STATUS_DISABLED = "disabled" def get_parser(parser=None): @@ -34,15 +34,25 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='status', - description='Report run status of cloud init') + prog="status", description="Report run status of cloud init" + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help=('Report long format of statuses including run stage name and' - ' error messages')) + "-l", + "--long", + action="store_true", + default=False, + help=( + "Report long format of statuses including run stage name and" + " error messages" + ), + ) parser.add_argument( - '-w', '--wait', action='store_true', default=False, - help='Block waiting on cloud-init to complete') + "-w", + "--wait", + action="store_true", + default=False, + help="Block waiting on cloud-init to complete", + ) return parser @@ -55,18 +65,18 @@ def handle_status_args(name, args): status, status_detail, time = _get_status_details(init.paths) if args.wait: while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() status, status_detail, time = _get_status_details(init.paths) sleep(0.25) - sys.stdout.write('\n') + sys.stdout.write("\n") if args.long: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) if time: - print('time: {0}'.format(time)) - print('detail:\n{0}'.format(status_detail)) + print("time: {0}".format(time)) + print("detail:\n{0}".format(status_detail)) else: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) return 1 if status == STATUS_ERROR else 0 @@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths): is_disabled = False cmdline_parts = get_cmdline().split() if not uses_systemd(): - reason = 'Cloud-init enabled on sysvinit' - elif 'cloud-init=enabled' in cmdline_parts: - reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + reason = "Cloud-init enabled on sysvinit" + elif "cloud-init=enabled" in cmdline_parts: + reason = "Cloud-init enabled by kernel command line cloud-init=enabled" elif os.path.exists(disable_file): is_disabled = True - reason = 'Cloud-init disabled by {0}'.format(disable_file) - elif 'cloud-init=disabled' in cmdline_parts: + reason = "Cloud-init disabled by {0}".format(disable_file) + elif "cloud-init=disabled" in cmdline_parts: is_disabled = True - reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' - elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + reason = "Cloud-init disabled by kernel parameter cloud-init=disabled" + elif not os.path.exists(os.path.join(paths.run_dir, "enabled")): is_disabled = True - reason = 'Cloud-init disabled by cloud-init-generator' + reason = "Cloud-init disabled by cloud-init-generator" else: - reason = 'Cloud-init enabled by systemd cloud-init-generator' + reason = "Cloud-init enabled by systemd cloud-init-generator" return (is_disabled, reason) @@ -106,34 +116,35 @@ def _get_status_details(paths): Values are obtained from parsing paths.run_dir/status.json. """ status = STATUS_ENABLED_NOT_RUN - status_detail = '' + status_detail = "" status_v1 = {} - status_file = os.path.join(paths.run_dir, 'status.json') - result_file = os.path.join(paths.run_dir, 'result.json') + status_file = os.path.join(paths.run_dir, "status.json") + result_file = os.path.join(paths.run_dir, "result.json") (is_disabled, reason) = _is_cloudinit_disabled( - CLOUDINIT_DISABLED_FILE, paths) + CLOUDINIT_DISABLED_FILE, paths + ) if is_disabled: status = STATUS_DISABLED status_detail = reason if os.path.exists(status_file): if not os.path.exists(result_file): status = STATUS_RUNNING - status_v1 = load_json(load_file(status_file)).get('v1', {}) + status_v1 = load_json(load_file(status_file)).get("v1", {}) errors = [] latest_event = 0 for key, value in sorted(status_v1.items()): - if key == 'stage': + if key == "stage": if value: status = STATUS_RUNNING - status_detail = 'Running in stage: {0}'.format(value) - elif key == 'datasource': + status_detail = "Running in stage: {0}".format(value) + elif key == "datasource": status_detail = value elif isinstance(value, dict): - errors.extend(value.get('errors', [])) - start = value.get('start') or 0 - finished = value.get('finished') or 0 + errors.extend(value.get("errors", [])) + start = value.get("start") or 0 + finished = value.get("finished") or 0 if finished == 0 and start != 0: status = STATUS_RUNNING event_time = max(start, finished) @@ -141,23 +152,23 @@ def _get_status_details(paths): latest_event = event_time if errors: status = STATUS_ERROR - status_detail = '\n'.join(errors) + status_detail = "\n".join(errors) elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: status = STATUS_DONE if latest_event: - time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event)) else: - time = '' + time = "" return status, status_detail, time def main(): """Tool to report status of cloud-init.""" parser = get_parser() - sys.exit(handle_status_args('status', parser.parse_args())) + sys.exit(handle_status_args("status", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 0ef9a748..ed124180 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -6,9 +6,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) - from cloudinit import log as logging +from cloudinit.settings import FREQUENCIES, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -22,26 +21,27 @@ MOD_PREFIX = "cc_" def form_module_name(name): canon_name = name.replace("-", "_") if canon_name.lower().endswith(".py"): - canon_name = canon_name[0:(len(canon_name) - 3)] + canon_name = canon_name[0 : (len(canon_name) - 3)] canon_name = canon_name.strip() if not canon_name: return None if not canon_name.startswith(MOD_PREFIX): - canon_name = '%s%s' % (MOD_PREFIX, canon_name) + canon_name = "%s%s" % (MOD_PREFIX, canon_name) return canon_name def fixup_module(mod, def_freq=PER_INSTANCE): - if not hasattr(mod, 'frequency'): - setattr(mod, 'frequency', def_freq) + if not hasattr(mod, "frequency"): + setattr(mod, "frequency", def_freq) else: freq = mod.frequency if freq and freq not in FREQUENCIES: LOG.warning("Module %s has an unknown frequency %s", mod, freq) - if not hasattr(mod, 'distros'): - setattr(mod, 'distros', []) - if not hasattr(mod, 'osfamilies'): - setattr(mod, 'osfamilies', []) + if not hasattr(mod, "distros"): + setattr(mod, "distros", []) + if not hasattr(mod, "osfamilies"): + setattr(mod, "osfamilies", []) return mod + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index d227a58d..a615c814 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -9,9 +9,7 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import util +from cloudinit import temp_utils, templater, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE @@ -54,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\ frequency = PER_INSTANCE -distros = ['alpine'] +distros = ["alpine"] meta = { - 'id': 'cc_apk_configure', - 'name': 'APK Configure', - 'title': 'Configure apk repositories file', - 'description': dedent("""\ + "id": "cc_apk_configure", + "name": "APK Configure", + "title": "Configure apk repositories file", + "description": dedent( + """\ This module handles configuration of the /etc/apk/repositories file. .. note:: To ensure that apk configuration is valid yaml, any strings containing special characters, especially ``:`` should be quoted. - """), - 'distros': distros, - 'examples': [ - dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ # Keep the existing /etc/apk/repositories file unaltered. apk_repos: preserve_repositories: true - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine v3.12 main and community # using default mirror site. apk_repos: alpine_repo: community_enabled: true version: 'v3.12' - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine Edge main, community, and # testing using a specified mirror site and also a local repo. apk_repos: @@ -91,21 +96,23 @@ meta = { testing_enabled: true version: 'edge' local_repo_base_url: 'https://my-local-server/local-alpine' - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apk_repos': { - 'type': 'object', - 'properties': { - 'preserve_repositories': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apk_repos": { + "type": "object", + "properties": { + "preserve_repositories": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos @@ -116,33 +123,41 @@ schema = { The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``. - """) + """ + ), }, - 'alpine_repo': { - 'type': ['object', 'null'], - 'properties': { - 'base_url': { - 'type': 'string', - 'default': DEFAULT_MIRROR, - 'description': dedent("""\ + "alpine_repo": { + "type": ["object", "null"], + "properties": { + "base_url": { + "type": "string", + "default": DEFAULT_MIRROR, + "description": dedent( + """\ The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``{}`` - """.format(DEFAULT_MIRROR)) + """.format( + DEFAULT_MIRROR + ) + ), }, - 'community_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "community_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Community repo to the repositories file. By default the Community repo is not included. - """) + """ + ), }, - 'testing_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "testing_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended @@ -151,32 +166,37 @@ schema = { installed from Testing may have dependancies that conflict with those in non-Edge Main or Community repos." - """) + """ + ), }, - 'version': { - 'type': 'string', - 'description': dedent("""\ + "version": { + "type": "string", + "description": dedent( + """\ The Alpine version to use (e.g. ``v3.12`` or ``edge``) - """) + """ + ), }, }, - 'required': ['version'], - 'minProperties': 1, - 'additionalProperties': False, + "required": ["version"], + "minProperties": 1, + "additionalProperties": False, }, - 'local_repo_base_url': { - 'type': 'string', - 'description': dedent("""\ + "local_repo_base_url": { + "type": "string", + "description": dedent( + """\ The base URL of an Alpine repository containing unofficial packages - """) - } + """ + ), + }, }, - 'minProperties': 1, # Either preserve_repositories or alpine_repo - 'additionalProperties': False, + "minProperties": 1, # Either preserve_repositories or alpine_repo + "additionalProperties": False, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -195,38 +215,44 @@ def handle(name, cfg, cloud, log, _args): # If there is no "apk_repos" section in the configuration # then do nothing. - apk_section = cfg.get('apk_repos') + apk_section = cfg.get("apk_repos") if not apk_section: - LOG.debug(("Skipping module named %s," - " no 'apk_repos' section found"), name) + LOG.debug( + "Skipping module named %s, no 'apk_repos' section found", name + ) return validate_cloudconfig_schema(cfg, schema) # If "preserve_repositories" is explicitly set to True in # the configuration do nothing. - if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): - LOG.debug(("Skipping module named %s," - " 'preserve_repositories' is set"), name) + if util.get_cfg_option_bool(apk_section, "preserve_repositories", False): + LOG.debug( + "Skipping module named %s, 'preserve_repositories' is set", name + ) return # If there is no "alpine_repo" subsection of "apk_repos" present in the # configuration then do nothing, as at least "version" is required to # create valid repositories entries. - alpine_repo = apk_section.get('alpine_repo') + alpine_repo = apk_section.get("alpine_repo") if not alpine_repo: - LOG.debug(("Skipping module named %s," - " no 'alpine_repo' configuration found"), name) + LOG.debug( + "Skipping module named %s, no 'alpine_repo' configuration found", + name, + ) return # If there is no "version" value present in configuration then do nothing. - alpine_version = alpine_repo.get('version') + alpine_version = alpine_repo.get("version") if not alpine_version: - LOG.debug(("Skipping module named %s," - " 'version' not specified in alpine_repo"), name) + LOG.debug( + "Skipping module named %s, 'version' not specified in alpine_repo", + name, + ) return - local_repo = apk_section.get('local_repo_base_url', '') + local_repo = apk_section.get("local_repo_base_url", "") _write_repositories_file(alpine_repo, alpine_version, local_repo) @@ -240,22 +266,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo): @param local_repo: A string containing the base URL of a local repo. """ - repo_file = '/etc/apk/repositories' + repo_file = "/etc/apk/repositories" - alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR) - params = {'alpine_baseurl': alpine_baseurl, - 'alpine_version': alpine_version, - 'community_enabled': alpine_repo.get('community_enabled'), - 'testing_enabled': alpine_repo.get('testing_enabled'), - 'local_repo': local_repo} + params = { + "alpine_baseurl": alpine_baseurl, + "alpine_version": alpine_version, + "community_enabled": alpine_repo.get("community_enabled"), + "testing_enabled": alpine_repo.get("testing_enabled"), + "local_repo": local_repo, + } - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # Filepath is second item in tuple util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) - LOG.debug('Generating Alpine repository configuration file: %s', - repo_file) + LOG.debug("Generating Alpine repository configuration file: %s", repo_file) templater.render_to_file(template_fn, repo_file, params) # Clean up temporary template util.del_file(template_fn) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 2e844c2c..b0728517 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -10,16 +10,14 @@ import glob import os -import re import pathlib +import re from textwrap import dedent -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging -from cloudinit import subp -from cloudinit import templater -from cloudinit import util +from cloudinit import subp, templater, util +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -27,59 +25,46 @@ LOG = logging.getLogger(__name__) # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' -APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' -CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' +APT_LOCAL_KEYS = "/etc/apt/trusted.gpg" +APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/" +CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { - 'type': 'array', - 'items': { - 'type': 'object', - 'additionalProperties': False, - 'required': ['arches'], - 'properties': { - 'arches': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 - }, - 'uri': { - 'type': 'string', - 'format': 'uri' - }, - 'search': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'uri' - }, - 'minItems': 1 - }, - 'search_dns': { - 'type': 'boolean', + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "required": ["arches"], + "properties": { + "arches": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, }, - 'keyid': { - 'type': 'string' + "uri": {"type": "string", "format": "uri"}, + "search": { + "type": "array", + "items": {"type": "string", "format": "uri"}, + "minItems": 1, }, - 'key': { - 'type': 'string' + "search_dns": { + "type": "boolean", }, - 'keyserver': { - 'type': 'string' - } - } - } + "keyid": {"type": "string"}, + "key": {"type": "string"}, + "keyserver": {"type": "string"}, + }, + }, } meta = { - 'id': 'cc_apt_configure', - 'name': 'Apt Configure', - 'title': 'Configure apt for the user', - 'description': dedent("""\ + "id": "cc_apt_configure", + "name": "Apt Configure", + "title": "Configure apt for the user", + "description": dedent( + """\ This module handles both configuration of apt options and adding source lists. There are configuration options such as ``apt_get_wrapper`` and ``apt_get_command`` that control how @@ -94,9 +79,12 @@ meta = { .. note:: For more information about apt configuration, see the - ``Additional apt configuration`` example."""), - 'distros': distros, - 'examples': [dedent("""\ + ``Additional apt configuration`` example.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ apt: preserve_sources_list: false disable_suites: @@ -153,21 +141,24 @@ meta = { key: | ------BEGIN PGP PUBLIC KEY BLOCK------- - ------END PGP PUBLIC KEY BLOCK-------""")], - 'frequency': frequency, + ------END PGP PUBLIC KEY BLOCK-------""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apt': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'preserve_sources_list': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apt": { + "type": "object", + "additionalProperties": False, + "properties": { + "preserve_sources_list": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this @@ -179,15 +170,15 @@ schema = { all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added - to ``sources.list.d``.""") + to ``sources.list.d``.""" + ), }, - 'disable_suites': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "disable_suites": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is @@ -206,11 +197,13 @@ schema = { When a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it - is just commented out.""") + is just commented out.""" + ), }, - 'primary': { + "primary": { **mirror_property, - 'description': dedent("""\ + "description": dedent( + """\ The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the @@ -264,27 +257,35 @@ schema = { ``http://archive.ubuntu.com/ubuntu``. - ``security`` => \ ``http://security.ubuntu.com/ubuntu`` - """) + """ + ), }, - 'security': { + "security": { **mirror_property, - 'description': dedent("""\ - Please refer to the primary config documentation""") + "description": dedent( + """\ + Please refer to the primary config documentation""" + ), }, - 'add_apt_repo_match': { - 'type': 'string', - 'default': ADD_APT_REPO_MATCH, - 'description': dedent("""\ + "add_apt_repo_match": { + "type": "string", + "default": ADD_APT_REPO_MATCH, + "description": dedent( + """\ All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it - defaults to ``{}``""".format(ADD_APT_REPO_MATCH)) + defaults to ``{}``""".format( + ADD_APT_REPO_MATCH + ) + ), }, - 'debconf_selections': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "debconf_selections": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a @@ -308,11 +309,13 @@ schema = { For example: \ ``ippackage ippackage/ip string 127.0.01`` - """) + """ + ), }, - 'sources_list': { - 'type': 'string', - 'description': dedent("""\ + "sources_list": { + "type": "string", + "description": dedent( + """\ Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within @@ -323,45 +326,55 @@ schema = { - ``$RELEASE`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$KEY_FILE``""") + - ``$KEY_FILE``""" + ), }, - 'conf': { - 'type': 'string', - 'description': dedent("""\ + "conf": { + "type": "string", + "description": dedent( + """\ Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline apt configuration, make sure - to follow yaml syntax.""") + to follow yaml syntax.""" + ), }, - 'https_proxy': { - 'type': 'string', - 'description': dedent("""\ + "https_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify https apt proxy. https proxy url is specified in the format - ``https://[[user][:pass]@]host[:port]/``.""") + ``https://[[user][:pass]@]host[:port]/``.""" + ), }, - 'http_proxy': { - 'type': 'string', - 'description': dedent("""\ + "http_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify http apt proxy. http proxy url is specified in the format - ``http://[[user][:pass]@]host[:port]/``.""") + ``http://[[user][:pass]@]host[:port]/``.""" + ), }, - 'proxy': { - 'type': 'string', - 'description': 'Alias for defining a http apt proxy.' + "proxy": { + "type": "string", + "description": "Alias for defining a http apt proxy.", }, - 'ftp_proxy': { - 'type': 'string', - 'description': dedent("""\ + "ftp_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify ftp apt proxy. ftp proxy url is specified in the format - ``ftp://[[user][:pass]@]host[:port]/``.""") + ``ftp://[[user][:pass]@]host[:port]/``.""" + ), }, - 'sources': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "sources": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source @@ -394,11 +407,12 @@ schema = { - ``$PRIMARY`` - ``$SECURITY`` - ``$RELEASE`` - - ``$KEY_FILE``""") - } - } + - ``$KEY_FILE``""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -415,18 +429,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy" DEFAULT_KEYSERVER = "keyserver.ubuntu.com" # Default archive mirrors -PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", - "SECURITY": "http://security.ubuntu.com/ubuntu/"} -PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", - "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} -PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] +PRIMARY_ARCH_MIRRORS = { + "PRIMARY": "http://archive.ubuntu.com/ubuntu/", + "SECURITY": "http://security.ubuntu.com/ubuntu/", +} +PORTS_MIRRORS = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", +} +PRIMARY_ARCHES = ["amd64", "i386"] +PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"] def get_default_mirrors(arch=None, target=None): """returns the default mirrors for the target. These depend on the - architecture, for more see: - https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" + architecture, for more see: + https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: @@ -438,8 +456,8 @@ def get_default_mirrors(arch=None, target=None): def handle(name, ocfg, cloud, log, _): """process the config for apt_config. This can be called from - curthooks if a global apt config was provided or via the "apt" - standalone command.""" + curthooks if a global apt config was provided or via the "apt" + standalone command.""" # keeping code close to curtin codebase via entry handler target = None if log is not None: @@ -447,12 +465,14 @@ def handle(name, ocfg, cloud, log, _): LOG = log # feed back converted config, but only work on the subset under 'apt' ocfg = convert_to_v3_apt_format(ocfg) - cfg = ocfg.get('apt', {}) + cfg = ocfg.get("apt", {}) if not isinstance(cfg, dict): raise ValueError( "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(cfg))) + config_type=type(cfg) + ) + ) validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) @@ -463,7 +483,7 @@ def _should_configure_on_empty_apt(): # if no config was provided, should apt configuration be done? if util.system_is_snappy(): return False, "system is snappy." - if not (subp.which('apt-get') or subp.which('apt')): + if not (subp.which("apt-get") or subp.which("apt")): return False, "no apt commands." return True, "Apt is available." @@ -478,12 +498,12 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) - release = util.lsb_release(target=target)['codename'] + release = util.lsb_release(target=target)["codename"] arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) - if util.is_false(cfg.get('preserve_sources_list', False)): + if util.is_false(cfg.get("preserve_sources_list", False)): add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -494,25 +514,34 @@ def apply_apt(cfg, cloud, target): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' - if 'sources' in cfg: + if "sources" in cfg: params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirrors["MIRROR"] + params["RELEASE"] = release + params["MIRROR"] = mirrors["MIRROR"] matcher = None - matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) + matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search - add_apt_sources(cfg['sources'], cloud, target=target, - template_params=params, aa_repo_match=matcher) + add_apt_sources( + cfg["sources"], + cloud, + target=target, + template_params=params, + aa_repo_match=matcher, + ) def debconf_set_selections(selections, target=None): - if not selections.endswith(b'\n'): - selections += b'\n' - subp.subp(['debconf-set-selections'], data=selections, target=target, - capture=True) + if not selections.endswith(b"\n"): + selections += b"\n" + subp.subp( + ["debconf-set-selections"], + data=selections, + target=target, + capture=True, + ) def dpkg_reconfigure(packages, target=None): @@ -532,12 +561,20 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warning("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning( + "The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", + unhandled, + ) if len(to_config): - subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + - list(to_config), data=None, target=target, capture=True) + subp.subp( + ["dpkg-reconfigure", "--frontend=noninteractive"] + + list(to_config), + data=None, + target=target, + capture=True, + ) def apply_debconf_selections(cfg, target=None): @@ -546,13 +583,12 @@ def apply_debconf_selections(cfg, target=None): # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar - selsets = cfg.get('debconf_selections') + selsets = cfg.get("debconf_selections") if not selsets: LOG.debug("debconf_selections was not set in config") return - selections = '\n'.join( - [selsets[key] for key in sorted(selsets.keys())]) + selections = "\n".join([selsets[key] for key in sorted(selsets.keys())]) debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input @@ -579,7 +615,8 @@ def apply_debconf_selections(cfg, target=None): def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*") + ) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -588,18 +625,18 @@ def clean_cloud_init(target): def mirrorurl_to_apt_fileprefix(mirror): """mirrorurl_to_apt_fileprefix - Convert a mirror url to the file prefix used by apt on disk to - store cache information for that mirror. - To do so do: - - take off ???:// - - drop tailing / - - convert in string / to _""" + Convert a mirror url to the file prefix used by apt on disk to + store cache information for that mirror. + To do so do: + - take off ???:// + - drop tailing / + - convert in string / to _""" string = mirror if string.endswith("/"): string = string[0:-1] pos = string.find("://") if pos >= 0: - string = string[pos + 3:] + string = string[pos + 3 :] string = string.replace("/", "_") return string @@ -631,8 +668,8 @@ def rename_apt_lists(new_mirrors, target, arch): def mirror_to_placeholder(tmpl, mirror, placeholder): """mirror_to_placeholder - replace the specified mirror in a template with a placeholder string - Checks for existance of the expected mirror and warns if not found""" + replace the specified mirror in a template with a placeholder string + Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -640,13 +677,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): def map_known_suites(suite): """there are a few default names which will be auto-extended. - This comes at the inability to use those names literally as suites, - but on the other hand increases readability of the cfg quite a lot""" - mapping = {'updates': '$RELEASE-updates', - 'backports': '$RELEASE-backports', - 'security': '$RELEASE-security', - 'proposed': '$RELEASE-proposed', - 'release': '$RELEASE'} + This comes at the inability to use those names literally as suites, + but on the other hand increases readability of the cfg quite a lot""" + mapping = { + "updates": "$RELEASE-updates", + "backports": "$RELEASE-backports", + "security": "$RELEASE-security", + "proposed": "$RELEASE-proposed", + "release": "$RELEASE", + } try: retsuite = mapping[suite] except KeyError: @@ -656,14 +695,14 @@ def map_known_suites(suite): def disable_suites(disabled, src, release): """reads the config for suites to be disabled and removes those - from the template""" + from the template""" if not disabled: return src retsrc = src for suite in disabled: suite = map_known_suites(suite) - releasesuite = templater.render_string(suite, {'RELEASE': release}) + releasesuite = templater.render_string(suite, {"RELEASE": release}) LOG.debug("Disabling suite %s as %s", suite, releasesuite) newsrc = "" @@ -685,7 +724,7 @@ def disable_suites(disabled, src, release): break if cols[pcol] == releasesuite: - line = '# suite disabled by cloud-init: %s' % line + line = "# suite disabled by cloud-init: %s" % line newsrc += line retsrc = newsrc @@ -694,36 +733,38 @@ def disable_suites(disabled, src, release): def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" - for key in ('primary', 'security'): + for key in ("primary", "security"): for mirror in cfg.get(key, []): add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list - create a source.list file based on a custom or default template - by replacing mirrors and release in the template""" + create a source.list file based on a custom or default template + by replacing mirrors and release in the template""" aptsrc = "/etc/apt/sources.list" - params = {'RELEASE': release, 'codename': release} + params = {"RELEASE": release, "codename": release} for k in mirrors: params[k] = mirrors[k] params[k.lower()] = mirrors[k] - tmpl = cfg.get('sources_list', None) + tmpl = cfg.get("sources_list", None) if tmpl is None: LOG.info("No custom template provided, fall back to builtin") - template_fn = cloud.get_template_filename('sources.list.%s' % - (cloud.distro.name)) + template_fn = cloud.get_template_filename( + "sources.list.%s" % (cloud.distro.name) + ) if not template_fn: - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename("sources.list") if not template_fn: - LOG.warning("No template found, " - "not rendering /etc/apt/sources.list") + LOG.warning( + "No template found, not rendering /etc/apt/sources.list" + ) return tmpl = util.load_file(template_fn) rendered = templater.render_string(tmpl, params) - disabled = disable_suites(cfg.get('disable_suites'), rendered, release) + disabled = disable_suites(cfg.get("disable_suites"), rendered, release) util.write_file(aptsrc, disabled, mode=0o644) @@ -735,7 +776,7 @@ def add_apt_key_raw(key, file_name, hardened=False, target=None): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key('add', output_file=name, data=key, hardened=hardened) + return apt_key("add", output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -747,26 +788,26 @@ def add_apt_key(ent, target=None, hardened=False, file_name=None): Supports raw keys or keyid's The latter will as a first step fetched to get the raw key """ - if 'keyid' in ent and 'key' not in ent: + if "keyid" in ent and "key" not in ent: keyserver = DEFAULT_KEYSERVER - if 'keyserver' in ent: - keyserver = ent['keyserver'] + if "keyserver" in ent: + keyserver = ent["keyserver"] - ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) + ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver) - if 'key' in ent: + if "key" in ent: return add_apt_key_raw( - ent['key'], - file_name or ent['filename'], - hardened=hardened) + ent["key"], file_name or ent["filename"], hardened=hardened + ) def update_packages(cloud): cloud.distro.update_package_sources() -def add_apt_sources(srcdict, cloud, target=None, template_params=None, - aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, target=None, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -795,33 +836,34 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, template_params = {} if aa_repo_match is None: - raise ValueError('did not get a valid repo matcher') + raise ValueError("did not get a valid repo matcher") if not isinstance(srcdict, dict): - raise TypeError('unknown apt format: %s' % (srcdict)) + raise TypeError("unknown apt format: %s" % (srcdict)) for filename in srcdict: ent = srcdict[filename] LOG.debug("adding source/key '%s'", ent) - if 'filename' not in ent: - ent['filename'] = filename + if "filename" not in ent: + ent["filename"] = filename - if 'source' in ent and '$KEY_FILE' in ent['source']: + if "source" in ent and "$KEY_FILE" in ent["source"]: key_file = add_apt_key(ent, target, hardened=True) - template_params['KEY_FILE'] = key_file + template_params["KEY_FILE"] = key_file else: key_file = add_apt_key(ent, target) - if 'source' not in ent: + if "source" not in ent: continue - source = ent['source'] + source = ent["source"] source = templater.render_string(source, template_params) - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - if not ent['filename'].endswith(".list"): - ent['filename'] += ".list" + if not ent["filename"].startswith("/"): + ent["filename"] = os.path.join( + "/etc/apt/sources.list.d/", ent["filename"] + ) + if not ent["filename"].endswith(".list"): + ent["filename"] += ".list" if aa_repo_match(source): try: @@ -831,7 +873,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, raise continue - sourcefn = subp.target_path(target, ent['filename']) + sourcefn = subp.target_path(target, ent["filename"]) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -850,14 +892,14 @@ def convert_v1_to_v2_apt_format(srclist): if isinstance(srclist, list): LOG.debug("apt config: convert V1 to V2 format (source list to dict)") for srcent in srclist: - if 'filename' not in srcent: + if "filename" not in srcent: # file collides for multiple !filename cases for compatibility # yet we need them all processed, so not same dictionary key - srcent['filename'] = "cloud_config_sources.list" + srcent["filename"] = "cloud_config_sources.list" key = util.rand_dict_key(srcdict, "cloud_config_sources.list") else: # all with filename use that as key (matching new format) - key = srcent['filename'] + key = srcent["filename"] srcdict[key] = srcent elif isinstance(srclist, dict): srcdict = srclist @@ -869,7 +911,7 @@ def convert_v1_to_v2_apt_format(srclist): def convert_key(oldcfg, aptcfg, oldkey, newkey): """convert an old key to the new one if the old one exists - returns true if a key was found and converted""" + returns true if a key was found and converted""" if oldcfg.get(oldkey, None) is not None: aptcfg[newkey] = oldcfg.get(oldkey) del oldcfg[oldkey] @@ -879,33 +921,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey): def convert_mirror(oldcfg, aptcfg): """convert old apt_mirror keys into the new more advanced mirror spec""" - keymap = [('apt_mirror', 'uri'), - ('apt_mirror_search', 'search'), - ('apt_mirror_search_dns', 'search_dns')] + keymap = [ + ("apt_mirror", "uri"), + ("apt_mirror_search", "search"), + ("apt_mirror_search_dns", "search_dns"), + ] converted = False - newmcfg = {'arches': ['default']} + newmcfg = {"arches": ["default"]} for oldkey, newkey in keymap: if convert_key(oldcfg, newmcfg, oldkey, newkey): converted = True # only insert new style config if anything was converted if converted: - aptcfg['primary'] = [newmcfg] + aptcfg["primary"] = [newmcfg] def convert_v2_to_v3_apt_format(oldcfg): """convert old to new keys and adapt restructured mirror spec""" - mapoldkeys = {'apt_sources': 'sources', - 'apt_mirror': None, - 'apt_mirror_search': None, - 'apt_mirror_search_dns': None, - 'apt_proxy': 'proxy', - 'apt_http_proxy': 'http_proxy', - 'apt_ftp_proxy': 'https_proxy', - 'apt_https_proxy': 'ftp_proxy', - 'apt_preserve_sources_list': 'preserve_sources_list', - 'apt_custom_sources_list': 'sources_list', - 'add_apt_repo_match': 'add_apt_repo_match'} + mapoldkeys = { + "apt_sources": "sources", + "apt_mirror": None, + "apt_mirror_search": None, + "apt_mirror_search_dns": None, + "apt_proxy": "proxy", + "apt_http_proxy": "http_proxy", + "apt_ftp_proxy": "https_proxy", + "apt_https_proxy": "ftp_proxy", + "apt_preserve_sources_list": "preserve_sources_list", + "apt_custom_sources_list": "sources_list", + "add_apt_repo_match": "add_apt_repo_match", + } needtoconvert = [] for oldkey in mapoldkeys: if oldkey in oldcfg: @@ -917,11 +963,13 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - LOG.debug("apt config: convert V2 to V3 format for keys '%s'", - ", ".join(needtoconvert)) + LOG.debug( + "apt config: convert V2 to V3 format for keys '%s'", + ", ".join(needtoconvert), + ) # if old AND new config are provided, prefer the new one (LP #1616831) - newaptcfg = oldcfg.get('apt', None) + newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") for oldkey in needtoconvert: @@ -932,10 +980,11 @@ def convert_v2_to_v3_apt_format(oldcfg): # no simple mapping or no collision on this particular key continue if verify != newaptcfg[newkey]: - raise ValueError("Old and New apt format defined with unequal " - "values %s vs %s @ %s" % (verify, - newaptcfg[newkey], - oldkey)) + raise ValueError( + "Old and New apt format defined with unequal " + "values %s vs %s @ %s" + % (verify, newaptcfg[newkey], oldkey) + ) # return conf after clearing conflicting V1/2 keys return oldcfg @@ -955,17 +1004,17 @@ def convert_v2_to_v3_apt_format(oldcfg): raise ValueError("old apt key '%s' left after conversion" % oldkey) # insert new format into config and return full cfg with only v3 content - oldcfg['apt'] = aptcfg + oldcfg["apt"] = aptcfg return oldcfg def convert_to_v3_apt_format(cfg): """convert the old list based format to the new dict based one. After that - convert the old dict keys/format to v3 a.k.a 'new apt config'""" + convert the old dict keys/format to v3 a.k.a 'new apt config'""" # V1 -> V2, the apt_sources entry from list to dict - apt_sources = cfg.get('apt_sources', None) + apt_sources = cfg.get("apt_sources", None) if apt_sources is not None: - cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources) + cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources) # V2 -> V3, move all former globals under the "apt" key # Restructure into new key names and mirror hierarchy @@ -997,7 +1046,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): if mydom: doms.append(".%s" % mydom) - doms.extend((".localdomain", "",)) + doms.extend( + ( + ".localdomain", + "", + ) + ) mirror_list = [] distro = cloud.distro.name @@ -1012,12 +1066,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): def update_mirror_info(pmirror, smirror, arch, cloud): """sets security mirror to primary if not defined. - returns defaults if no mirrors are defined""" + returns defaults if no mirrors are defined""" if pmirror is not None: if smirror is None: smirror = pmirror - return {'PRIMARY': pmirror, - 'SECURITY': smirror} + return {"PRIMARY": pmirror, "SECURITY": smirror} # None specified at all, get default mirrors from cloud mirror_info = cloud.datasource.get_package_mirror_info() @@ -1026,8 +1079,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud): # arbitrary key/value pairs including 'primary' and 'security' keys. # caller expects dict with PRIMARY and SECURITY. m = mirror_info.copy() - m['PRIMARY'] = m['primary'] - m['SECURITY'] = m['security'] + m["PRIMARY"] = m["primary"] + m["SECURITY"] = m["security"] return m @@ -1037,7 +1090,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud): def get_arch_mirrorconfig(cfg, mirrortype, arch): """out of a list of potential mirror configurations select - and return the one matching the architecture (or default)""" + and return the one matching the architecture (or default)""" # select the mirror specification (if-any) mirror_cfg_list = cfg.get(mirrortype, None) if mirror_cfg_list is None: @@ -1056,8 +1109,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): def get_mirror(cfg, mirrortype, arch, cloud): """pass the three potential stages of mirror specification - returns None is neither of them found anything otherwise the first - hit is returned""" + returns None is neither of them found anything otherwise the first + hit is returned""" mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch) if mcfg is None: return None @@ -1073,18 +1126,19 @@ def get_mirror(cfg, mirrortype, arch, cloud): # fallback to search_dns if specified if mirror is None: # list of mirrors to try to resolve - mirror = search_for_mirror_dns(mcfg.get("search_dns", None), - mirrortype, cfg, cloud) + mirror = search_for_mirror_dns( + mcfg.get("search_dns", None), mirrortype, cfg, cloud + ) return mirror def find_apt_mirror_info(cfg, cloud, arch=None): """find_apt_mirror_info - find an apt_mirror given the cfg provided. - It can check for separate config of primary and security mirrors - If only primary is given security is assumed to be equal to primary - If the generic apt_mirror is given that is defining for both + find an apt_mirror given the cfg provided. + It can check for separate config of primary and security mirrors + If only primary is given security is assumed to be equal to primary + If the generic apt_mirror is given that is defining for both """ if arch is None: @@ -1105,32 +1159,35 @@ def find_apt_mirror_info(cfg, cloud, arch=None): def apply_apt_config(cfg, proxy_fname, config_fname): """apply_apt_config - Applies any apt*proxy config from if specified + Applies any apt*proxy config from if specified """ # Set up any apt proxy - cfgs = (('proxy', 'Acquire::http::Proxy "%s";'), - ('http_proxy', 'Acquire::http::Proxy "%s";'), - ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'), - ('https_proxy', 'Acquire::https::Proxy "%s";')) + cfgs = ( + ("proxy", 'Acquire::http::Proxy "%s";'), + ("http_proxy", 'Acquire::http::Proxy "%s";'), + ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'), + ("https_proxy", 'Acquire::https::Proxy "%s";'), + ) proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] if len(proxies): LOG.debug("write apt proxy info to %s", proxy_fname) - util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + util.write_file(proxy_fname, "\n".join(proxies) + "\n") elif os.path.isfile(proxy_fname): util.del_file(proxy_fname) LOG.debug("no apt proxy configured, removed %s", proxy_fname) - if cfg.get('conf', None): + if cfg.get("conf", None): LOG.debug("write apt config info to %s", config_fname) - util.write_file(config_fname, cfg.get('conf')) + util.write_file(config_fname, cfg.get("conf")) elif os.path.isfile(config_fname): util.del_file(config_fname) LOG.debug("no apt config configured, removed %s", config_fname) -def apt_key(command, output_file=None, data=None, hardened=False, - human_output=True): +def apt_key( + command, output_file=None, data=None, hardened=False, human_output=True +): """apt-key replacement commands implemented: 'add', 'list', 'finger' @@ -1153,32 +1210,36 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] for file in os.listdir(APT_TRUSTED_GPG_DIR): - if file.endswith('.gpg') or file.endswith('.asc'): + if file.endswith(".gpg") or file.endswith(".asc"): key_files.append(APT_TRUSTED_GPG_DIR + file) - return key_files if key_files else '' + return key_files if key_files else "" def apt_key_add(): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs """ - file_name = '/dev/null' + file_name = "/dev/null" if not output_file: util.logexc( - LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + LOG, 'Unknown filename, failed to add key: "{}"'.format(data) + ) else: try: - key_dir = \ + key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + ) stdout = gpg.dearmor(data) - file_name = '{}{}.gpg'.format(key_dir, output_file) + file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: - util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Gpg error, failed to add key: {}".format(data) + ) except UnicodeDecodeError: - util.logexc(LOG, 'Decode error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Decode error, failed to add key: {}".format(data) + ) return file_name def apt_key_list(): @@ -1193,19 +1254,20 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_list.append(gpg.list(key_file, human_output=human_output)) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) - return '\n'.join(key_list) + return "\n".join(key_list) - if command == 'add': + if command == "add": return apt_key_add() - elif command == 'finger' or command == 'list': + elif command == "finger" or command == "list": return apt_key_list() else: raise ValueError( - 'apt_key() commands add, list, and finger are currently supported') + "apt_key() commands add, list, and finger are currently supported" + ) CONFIG_CLEANERS = { - 'cloud-init': clean_cloud_init, + "cloud-init": clean_cloud_init, } # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index aa186ce2..569849d1 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -29,17 +29,19 @@ not recommended. apt_pipelining: """ -from cloudinit.settings import PER_INSTANCE from cloudinit import util +from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" -APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" - 'Acquire::http::Pipeline-Depth "%s";\n') +APT_PIPE_TPL = ( + "//Written by cloud-init per 'apt_pipelining'\n" + 'Acquire::http::Pipeline-Depth "%s";\n' +) # Acquire::http::Pipeline-Depth can be a value # from 0 to 5 indicating how many outstanding requests APT should send. @@ -49,7 +51,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", "os") apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -69,4 +71,5 @@ def write_apt_snippet(setting, log, f_name): util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 06f7a26e..bff11a24 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,11 +12,9 @@ import os from textwrap import dedent +from cloudinit import subp, temp_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import temp_utils -from cloudinit import subp -from cloudinit import util frequency = PER_ALWAYS @@ -26,13 +24,14 @@ frequency = PER_ALWAYS # configuration options before actually attempting to deploy with said # configuration. -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_bootcmd', - 'name': 'Bootcmd', - 'title': 'Run arbitrary commands early in the boot process', - 'description': dedent("""\ + "id": "cc_bootcmd", + "name": "Bootcmd", + "title": "Run arbitrary commands early in the boot process", + "description": dedent( + """\ This module runs arbitrary commands very early in the boot process, only slightly after a boothook would run. This is very similar to a boothook, but more user friendly. The environment variable @@ -48,31 +47,37 @@ meta = { when writing files, do not use /tmp dir as it races with systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. - """), - 'distros': distros, - 'examples': [dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] - """)], - 'frequency': PER_ALWAYS, + """ + ) + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'bootcmd': { - 'type': 'array', - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] + "type": "object", + "properties": { + "bootcmd": { + "type": "array", + "items": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}}, + {"type": "string"}, + ] }, - 'additionalItems': False, # Reject items of non-string non-list - 'additionalProperties': False, - 'minItems': 1, + "additionalItems": False, # Reject items of non-string non-list + "additionalProperties": False, + "minItems": 1, } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -81,8 +86,9 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): if "bootcmd" not in cfg: - log.debug(("Skipping module named %s," - " no 'bootcmd' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'bootcmd' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) @@ -99,11 +105,12 @@ def handle(name, cfg, cloud, log, _args): env = os.environ.copy() iid = cloud.get_instance_id() if iid: - env['INSTANCE_ID'] = str(iid) - cmd = ['/bin/sh', tmpf.name] + env["INSTANCE_ID"] = str(iid) + cmd = ["/bin/sh", tmpf.name] subp.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 9fdaeba1..53b6d0c8 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -38,11 +38,10 @@ Valid configuration options for this module are: byobu_by_default: """ +from cloudinit import subp, util from cloudinit.distros import ug_util -from cloudinit import subp -from cloudinit import util -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def handle(name, cfg, cloud, log, args): @@ -58,8 +57,14 @@ def handle(name, cfg, cloud, log, args): if value == "user" or value == "system": value = "enable-%s" % value - valid = ("enable-user", "enable-system", "enable", - "disable-user", "disable-system", "disable") + valid = ( + "enable-user", + "enable-system", + "enable", + "disable-user", + "disable-system", + "disable", + ) if value not in valid: log.warning("Unknown value %s for byobu_by_default", value) @@ -81,13 +86,16 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warning(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning( + "No default byobu user provided, " + "can not launch %s for the default user", + bl_inst, + ) else: - shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) + shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: - shcmd += "echo \"%s\" | debconf-set-selections" % dc_val + shcmd += 'echo "%s" | debconf-set-selections' % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " @@ -96,4 +104,5 @@ def handle(name, cfg, cloud, log, args): log.debug("Setting byobu to %s", value) subp.subp(cmd, capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index bd7bead9..9de065ab 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -41,28 +41,27 @@ can be removed from the system with the configuration option import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util DEFAULT_CONFIG = { - 'ca_cert_path': '/usr/share/ca-certificates/', - 'ca_cert_filename': 'cloud-init-ca-certs.crt', - 'ca_cert_config': '/etc/ca-certificates.conf', - 'ca_cert_system_path': '/etc/ssl/certs/', - 'ca_cert_update_cmd': ['update-ca-certificates'] + "ca_cert_path": "/usr/share/ca-certificates/", + "ca_cert_filename": "cloud-init-ca-certs.crt", + "ca_cert_config": "/etc/ca-certificates.conf", + "ca_cert_system_path": "/etc/ssl/certs/", + "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { - 'rhel': { - 'ca_cert_path': '/usr/share/pki/ca-trust-source/', - 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', - 'ca_cert_config': None, - 'ca_cert_system_path': '/etc/pki/ca-trust/', - 'ca_cert_update_cmd': ['update-ca-trust'] + "rhel": { + "ca_cert_path": "/usr/share/pki/ca-trust-source/", + "ca_cert_filename": "anchors/cloud-init-ca-certs.crt", + "ca_cert_config": None, + "ca_cert_system_path": "/etc/pki/ca-trust/", + "ca_cert_update_cmd": ["update-ca-trust"], } } -distros = ['alpine', 'debian', 'ubuntu', 'rhel'] +distros = ["alpine", "debian", "ubuntu", "rhel"] def _distro_ca_certs_configs(distro_name): @@ -72,8 +71,9 @@ def _distro_ca_certs_configs(distro_name): @returns: Dict of distro configurations for ca-cert. """ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) - cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], - cfg['ca_cert_filename']) + cfg["ca_cert_full_path"] = os.path.join( + cfg["ca_cert_path"], cfg["ca_cert_filename"] + ) return cfg @@ -83,7 +83,7 @@ def update_ca_certs(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) + subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False) def add_ca_certs(distro_cfg, certs): @@ -98,9 +98,9 @@ def add_ca_certs(distro_cfg, certs): return # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(distro_cfg['ca_cert_full_path'], - cert_file_contents, - mode=0o644) + util.write_file( + distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644 + ) update_cert_config(distro_cfg) @@ -110,23 +110,27 @@ def update_cert_config(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - if distro_cfg['ca_cert_config'] is None: + if distro_cfg["ca_cert_config"] is None: return - if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + if os.stat(distro_cfg["ca_cert_config"]).st_size == 0: # If the CA_CERT_CONFIG file is empty (i.e. all existing # CA certs have been deleted) then simply output a single # line with the cloud-init cert filename. - out = "%s\n" % distro_cfg['ca_cert_filename'] + out = "%s\n" % distro_cfg["ca_cert_filename"] else: # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(distro_cfg['ca_cert_config']) - cr_cont = '\n'.join([line for line in orig.splitlines() - if line != distro_cfg['ca_cert_filename']]) - out = "%s\n%s\n" % (cr_cont.rstrip(), - distro_cfg['ca_cert_filename']) - util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + orig = util.load_file(distro_cfg["ca_cert_config"]) + cr_cont = "\n".join( + [ + line + for line in orig.splitlines() + if line != distro_cfg["ca_cert_filename"] + ] + ) + out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"]) + util.write_file(distro_cfg["ca_cert_config"], out, omode="wb") def remove_default_ca_certs(distro_name, distro_cfg): @@ -137,14 +141,15 @@ def remove_default_ca_certs(distro_name, distro_cfg): @param distro_name: String providing the distro class name. @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(distro_cfg['ca_cert_path']) - util.delete_dir_contents(distro_cfg['ca_cert_system_path']) - util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) + util.delete_dir_contents(distro_cfg["ca_cert_path"]) + util.delete_dir_contents(distro_cfg["ca_cert_system_path"]) + util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644) - if distro_name in ['debian', 'ubuntu']: + if distro_name in ["debian", "ubuntu"]: debconf_sel = ( - "ca-certificates ca-certificates/trust_new_crts " + "select no") - subp.subp(('debconf-set-selections', '-'), debconf_sel) + "ca-certificates ca-certificates/trust_new_crts " + "select no" + ) + subp.subp(("debconf-set-selections", "-"), debconf_sel) def handle(name, cfg, cloud, log, _args): @@ -159,11 +164,13 @@ def handle(name, cfg, cloud, log, _args): """ # If there isn't a ca-certs section in the configuration don't do anything if "ca-certs" not in cfg: - log.debug(("Skipping module named %s," - " no 'ca-certs' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'ca-certs' key in configuration", + name, + ) return - ca_cert_cfg = cfg['ca-certs'] + ca_cert_cfg = cfg["ca-certs"] distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system @@ -183,4 +190,5 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating certificates") update_ca_certs(distro_cfg) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index ed734d1c..67889683 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -13,87 +13,91 @@ import json import os from textwrap import dedent -from cloudinit import subp +from cloudinit import subp, temp_utils, templater, url_helper, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema -from cloudinit import templater -from cloudinit import temp_utils -from cloudinit import url_helper -from cloudinit import util from cloudinit.settings import PER_ALWAYS - RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = tuple([ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', -]) -REQUIRED_CHEF_DIRS = tuple([ - '/etc/chef', -]) +CHEF_DIRS = tuple( + [ + "/etc/chef", + "/var/log/chef", + "/var/lib/chef", + "/var/cache/chef", + "/var/backups/chef", + "/var/run/chef", + ] +) +REQUIRED_CHEF_DIRS = tuple( + [ + "/etc/chef", + ] +) # Used if fetching chef from a omnibus style package OMNIBUS_URL = "https://www.chef.io/chef/install.sh" OMNIBUS_URL_RETRIES = 5 -CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' -CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret' -CHEF_ENVIRONMENT = '_default' -CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem" +CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret" +CHEF_ENVIRONMENT = "_default" +CHEF_FB_PATH = "/etc/chef/firstboot.json" CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... - 'ssl_verify_mode': ':verify_none', - 'log_level': ':info', + "ssl_verify_mode": ":verify_none", + "log_level": ":info", # These are not symbols... - 'log_location': '/var/log/chef/client.log', - 'validation_key': CHEF_VALIDATION_PEM_PATH, - 'validation_cert': None, - 'client_key': '/etc/chef/client.pem', - 'json_attribs': CHEF_FB_PATH, - 'file_cache_path': '/var/cache/chef', - 'file_backup_path': '/var/backups/chef', - 'pid_file': '/var/run/chef/client.pid', - 'show_time': True, - 'encrypted_data_bag_secret': None, + "log_location": "/var/log/chef/client.log", + "validation_key": CHEF_VALIDATION_PEM_PATH, + "validation_cert": None, + "client_key": "/etc/chef/client.pem", + "json_attribs": CHEF_FB_PATH, + "file_cache_path": "/var/cache/chef", + "file_backup_path": "/var/backups/chef", + "pid_file": "/var/run/chef/client.pid", + "show_time": True, + "encrypted_data_bag_secret": None, } -CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) -CHEF_RB_TPL_PATH_KEYS = frozenset([ - 'log_location', - 'validation_key', - 'client_key', - 'file_cache_path', - 'json_attribs', - 'pid_file', - 'encrypted_data_bag_secret', -]) +CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"]) +CHEF_RB_TPL_PATH_KEYS = frozenset( + [ + "log_location", + "validation_key", + "client_key", + "file_cache_path", + "json_attribs", + "pid_file", + "encrypted_data_bag_secret", + ] +) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) -CHEF_RB_TPL_KEYS.extend([ - 'server_url', - 'node_name', - 'environment', - 'validation_name', - 'chef_license', -]) +CHEF_RB_TPL_KEYS.extend( + [ + "server_url", + "node_name", + "environment", + "validation_name", + "chef_license", + ] +) CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) -CHEF_RB_PATH = '/etc/chef/client.rb' -CHEF_EXEC_PATH = '/usr/bin/chef-client' -CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) +CHEF_RB_PATH = "/etc/chef/client.rb" +CHEF_EXEC_PATH = "/usr/bin/chef-client" +CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"]) frequency = PER_ALWAYS distros = ["all"] meta = { - 'id': 'cc_chef', - 'name': 'Chef', - 'title': 'module that configures, starts and installs chef', - 'description': dedent("""\ + "id": "cc_chef", + "name": "Chef", + "title": "module that configures, starts and installs chef", + "description": dedent( + """\ This module enables chef to be installed (from packages, gems, or from omnibus). Before this occurs, chef configuration is written to disk (validation.pem, client.pem, firstboot.json, @@ -101,9 +105,12 @@ meta = { /var/log/chef and so-on). If configured, chef will be installed and started in either daemon or non-daemon mode. If run in non-daemon mode, post run actions are executed to do - finishing activities such as removing validation.pem."""), - 'distros': distros, - 'examples': [dedent(""" + finishing activities such as removing validation.pem.""" + ), + "distros": distros, + "examples": [ + dedent( + """ chef: directories: - /etc/chef @@ -124,180 +131,237 @@ meta = { omnibus_url_retries: 2 server_url: https://chef.yourorg.com:4000 ssl_verify_mode: :verify_peer - validation_name: yourorg-validator""")], - 'frequency': frequency, + validation_name: yourorg-validator""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'chef': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'directories': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "chef": { + "type": "object", + "additionalProperties": False, + "properties": { + "directories": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Create the necessary directories for chef to run. By default, it creates the following directories: - {chef_dirs}""").format( + {chef_dirs}""" + ).format( chef_dirs="\n".join( [" - ``{}``".format(d) for d in CHEF_DIRS] ) - ) + ), }, - 'validation_cert': { - 'type': 'string', - 'description': dedent("""\ + "validation_cert": { + "type": "string", + "description": dedent( + """\ Optional string to be written to file validation_key. Special value ``system`` means set use existing file. - """) + """ + ), }, - 'validation_key': { - 'type': 'string', - 'default': CHEF_VALIDATION_PEM_PATH, - 'description': dedent("""\ + "validation_key": { + "type": "string", + "default": CHEF_VALIDATION_PEM_PATH, + "description": dedent( + """\ Optional path for validation_cert. default to - ``{}``.""".format(CHEF_VALIDATION_PEM_PATH)) + ``{}``.""".format( + CHEF_VALIDATION_PEM_PATH + ) + ), }, - 'firstboot_path': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "firstboot_path": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults - to ``{}``.""".format(CHEF_FB_PATH)) + to ``{}``.""".format( + CHEF_FB_PATH + ) + ), }, - 'exec': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "exec": { + "type": "boolean", + "default": False, + "description": dedent( + """\ define if we should run or not run chef (defaults to false, unless a gem installed is requested where this - will then default to true).""") + will then default to true).""" + ), }, - 'client_key': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['client_key'], - 'description': dedent("""\ + "client_key": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["client_key"], + "description": dedent( + """\ Optional path for client_cert. default to - ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key'])) + ``{}``.""".format( + CHEF_RB_TPL_DEFAULTS["client_key"] + ) + ), }, - 'encrypted_data_bag_secret': { - 'type': 'string', - 'default': None, - 'description': dedent("""\ + "encrypted_data_bag_secret": { + "type": "string", + "default": None, + "description": dedent( + """\ Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to None, meaning that chef will have to look at the path ``{}`` for it. - """.format(CHEF_ENCRYPTED_DATA_BAG_PATH)) + """.format( + CHEF_ENCRYPTED_DATA_BAG_PATH + ) + ), }, - 'environment': { - 'type': 'string', - 'default': CHEF_ENVIRONMENT, - 'description': dedent("""\ + "environment": { + "type": "string", + "default": CHEF_ENVIRONMENT, + "description": dedent( + """\ Specifies which environment chef will use. By default, it will use the ``{}`` configuration. - """.format(CHEF_ENVIRONMENT)) + """.format( + CHEF_ENVIRONMENT + ) + ), }, - 'file_backup_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'], - 'description': dedent("""\ + "file_backup_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"], + "description": dedent( + """\ Specifies the location in which backup files are stored. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_backup_path'])) + CHEF_RB_TPL_DEFAULTS["file_backup_path"] + ) + ), }, - 'file_cache_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'], - 'description': dedent("""\ + "file_cache_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"], + "description": dedent( + """\ Specifies the location in which chef cache files will be saved. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_cache_path'])) + CHEF_RB_TPL_DEFAULTS["file_cache_path"] + ) + ), }, - 'json_attribs': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "json_attribs": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Specifies the location in which some chef json data is stored. By default, it uses the - ``{}`` location.""".format(CHEF_FB_PATH)) + ``{}`` location.""".format( + CHEF_FB_PATH + ) + ), }, - 'log_level': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_level'], - 'description': dedent("""\ + "log_level": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_level"], + "description": dedent( + """\ Defines the level of logging to be stored in the log file. By default this value is set to ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['log_level'])) + """.format( + CHEF_RB_TPL_DEFAULTS["log_level"] + ) + ), }, - 'log_location': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_location'], - 'description': dedent("""\ + "log_location": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_location"], + "description": dedent( + """\ Specifies the location of the chef lof file. By default, the location is specified at ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS['log_location'])) + CHEF_RB_TPL_DEFAULTS["log_location"] + ) + ), }, - 'node_name': { - 'type': 'string', - 'description': dedent("""\ + "node_name": { + "type": "string", + "description": dedent( + """\ The name of the node to run. By default, we will - use th instance id as the node name.""") + use th instance id as the node name.""" + ), }, - 'omnibus_url': { - 'type': 'string', - 'default': OMNIBUS_URL, - 'description': dedent("""\ + "omnibus_url": { + "type": "string", + "default": OMNIBUS_URL, + "description": dedent( + """\ Omnibus URL if chef should be installed through Omnibus. By default, it uses the - ``{}``.""".format(OMNIBUS_URL)) + ``{}``.""".format( + OMNIBUS_URL + ) + ), }, - 'omnibus_url_retries': { - 'type': 'integer', - 'default': OMNIBUS_URL_RETRIES, - 'description': dedent("""\ + "omnibus_url_retries": { + "type": "integer", + "default": OMNIBUS_URL_RETRIES, + "description": dedent( + """\ The number of retries that will be attempted to reach - the Omnibus URL""") + the Omnibus URL""" + ), }, - 'omnibus_version': { - 'type': 'string', - 'description': dedent("""\ + "omnibus_version": { + "type": "string", + "description": dedent( + """\ Optional version string to require for omnibus - install.""") + install.""" + ), }, - 'pid_file': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['pid_file'], - 'description': dedent("""\ + "pid_file": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["pid_file"], + "description": dedent( + """\ The location in which a process identification number (pid) is saved. By default, it saves in the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['pid_file'])) + CHEF_RB_TPL_DEFAULTS["pid_file"] + ) + ), }, - 'server_url': { - 'type': 'string', - 'description': 'The URL for the chef server' + "server_url": { + "type": "string", + "description": "The URL for the chef server", }, - 'show_time': { - 'type': 'boolean', - 'default': True, - 'description': 'Show time in chef logs' + "show_time": { + "type": "boolean", + "default": True, + "description": "Show time in chef logs", }, - 'ssl_verify_mode': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'], - 'description': dedent("""\ + "ssl_verify_mode": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"], + "description": dedent( + """\ Set the verify mode for HTTPS requests. We can have two possible values for this parameter: @@ -306,67 +370,76 @@ schema = { - ``:verify_peer``: Validate all SSL certificates. By default, the parameter is set as ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'])) + """.format( + CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"] + ) + ), }, - 'validation_name': { - 'type': 'string', - 'description': dedent("""\ + "validation_name": { + "type": "string", + "description": dedent( + """\ The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during - the initial Chef Infra Client run.""") + the initial Chef Infra Client run.""" + ), }, - 'force_install': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "force_install": { + "type": "boolean", + "default": False, + "description": dedent( + """\ If set to ``True``, forces chef installation, even - if it is already installed.""") + if it is already installed.""" + ), }, - 'initial_attributes': { - 'type': 'object', - 'items': { - 'type': 'string' - }, - 'description': dedent("""\ + "initial_attributes": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Specify a list of initial attributes used by the - cookbooks.""") + cookbooks.""" + ), }, - 'install_type': { - 'type': 'string', - 'default': 'packages', - 'description': dedent("""\ + "install_type": { + "type": "string", + "default": "packages", + "description": dedent( + """\ The type of installation for chef. It can be one of the following values: - ``packages`` - ``gems`` - - ``omnibus``""") + - ``omnibus``""" + ), }, - 'run_list': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'description': 'A run list for a first boot json.' + "run_list": { + "type": "array", + "items": {"type": "string"}, + "description": "A run list for a first boot json.", }, "chef_license": { - 'type': 'string', - 'description': dedent("""\ + "type": "string", + "description": dedent( + """\ string that indicates if user accepts or not license - related to some of chef products""") - } - } + related to some of chef products""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): - delete_pem = util.get_cfg_option_bool(chef_cfg, - 'delete_validation_post_exec', - default=False) + delete_pem = util.get_cfg_option_bool( + chef_cfg, "delete_validation_post_exec", default=False + ) if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): os.unlink(CHEF_VALIDATION_PEM_PATH) @@ -389,16 +462,20 @@ def get_template_params(iid, chef_cfg, log): else: params[k] = util.get_cfg_option_str(chef_cfg, k) # These ones are overwritten to be exact values... - params.update({ - 'generated_by': util.make_header(), - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', - default=iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - default='_default'), - # These two are mandatory... - 'server_url': chef_cfg['server_url'], - 'validation_name': chef_cfg['validation_name'], - }) + params.update( + { + "generated_by": util.make_header(), + "node_name": util.get_cfg_option_str( + chef_cfg, "node_name", default=iid + ), + "environment": util.get_cfg_option_str( + chef_cfg, "environment", default="_default" + ), + # These two are mandatory... + "server_url": chef_cfg["server_url"], + "validation_name": chef_cfg["validation_name"], + } + ) return params @@ -406,35 +483,38 @@ def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything - if 'chef' not in cfg: - log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + if "chef" not in cfg: + log.debug( + "Skipping module named %s, no 'chef' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) - chef_cfg = cfg['chef'] + chef_cfg = cfg["chef"] # Ensure the chef directories we use exist - chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + chef_dirs = util.get_cfg_option_list(chef_cfg, "directories") if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) - vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) - vcert = chef_cfg.get('validation_cert') + vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH) + vcert = chef_cfg.get("validation_cert") # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warning("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning( + "chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path, + ) # Create the chef config from template - template_fn = cloud.get_template_filename('chef_client.rb') + template_fn = cloud.get_template_filename("chef_client.rb") if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) @@ -448,32 +528,33 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warning("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json - fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', - default=CHEF_FB_PATH) + fb_filename = util.get_cfg_option_str( + chef_cfg, "firstboot_path", default=CHEF_FB_PATH + ) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] + if "run_list" in chef_cfg: + initial_json["run_list"] = chef_cfg["run_list"] + if "initial_attributes" in chef_cfg: + initial_attributes = chef_cfg["initial_attributes"] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... - force_install = util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False) + force_install = util.get_cfg_option_bool( + chef_cfg, "force_install", default=False + ) installed = subp.is_exe(CHEF_EXEC_PATH) if not installed or force_install: run = install_chef(cloud, chef_cfg, log) elif installed: - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) else: run = False if run: @@ -482,18 +563,21 @@ def handle(name, cfg, cloud, log, _args): def run_chef(chef_cfg, log): - log.debug('Running chef-client') + log.debug("Running chef-client") cmd = [CHEF_EXEC_PATH] - if 'exec_arguments' in chef_cfg: - cmd_args = chef_cfg['exec_arguments'] + if "exec_arguments" in chef_cfg: + cmd_args = chef_cfg["exec_arguments"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.append(cmd_args) else: - log.warning("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -507,16 +591,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): The 'args' argument to subp will be updated with the full path to the filename as the first argument. """ - basename = kwargs.pop('basename', "subp_blob") + basename = kwargs.pop("basename", "subp_blob") - if len(args) == 0 and 'args' not in kwargs: + if len(args) == 0 and "args" not in kwargs: args = [tuple()] # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: tmpf = os.path.join(tmpd, basename) - if 'args' in kwargs: - kwargs['args'] = [tmpf] + list(kwargs['args']) + if "args" in kwargs: + kwargs["args"] = [tmpf] + list(kwargs["args"]) else: args = list(args) args[0] = [tmpf] + args[0] @@ -543,36 +627,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): if omnibus_version is None: args = [] else: - args = ['-v', omnibus_version] + args = ["-v", omnibus_version] content = url_helper.readurl(url=url, retries=retries).contents return subp_blob_in_tempfile( - blob=content, args=args, - basename='chef-omnibus-install', capture=False) + blob=content, args=args, basename="chef-omnibus-install", capture=False + ) def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + install_type = util.get_cfg_option_str( + chef_cfg, "install_type", "packages" + ) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) if install_type == "gems": # This will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) + chef_version = util.get_cfg_option_str(chef_cfg, "version", None) + ruby_version = util.get_cfg_option_str( + chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT + ) install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) - elif install_type == 'packages': + run = util.get_cfg_option_bool(chef_cfg, "exec", default=True) + elif install_type == "packages": # This will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': + cloud.distro.install_packages(("chef",)) + elif install_type == "omnibus": omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") install_chef_from_omnibus( url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), - omnibus_version=omnibus_version) + omnibus_version=omnibus_version, + ) else: log.warning("Unknown chef install type '%s'", install_type) run = False @@ -581,25 +668,47 @@ def install_chef(cloud, chef_cfg, log): def get_ruby_packages(version): # return a list of packages needed to install ruby at version - pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] + pkgs = ["ruby%s" % version, "ruby%s-dev" % version] if version == "1.8": - pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) + pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8")) return pkgs def install_chef_from_gems(ruby_version, chef_version, distro): distro.install_packages(get_ruby_packages(ruby_version)) - if not os.path.exists('/usr/bin/gem'): - util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') - if not os.path.exists('/usr/bin/ruby'): - util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') + if not os.path.exists("/usr/bin/gem"): + util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem") + if not os.path.exists("/usr/bin/ruby"): + util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby") if chef_version: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "-v %s" % chef_version, + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) else: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 4d5a6aa2..d09fc129 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -30,18 +30,16 @@ location that this cloud-init has been configured with when running. import copy from io import StringIO -from cloudinit import type_utils -from cloudinit import util -from cloudinit import safeyaml +from cloudinit import safeyaml, type_utils, util -SKIP_KEYS = frozenset(['log_cfgs']) +SKIP_KEYS = frozenset(["log_cfgs"]) def _make_header(text): header = StringIO() header.write("-" * 80) header.write("\n") - header.write(text.center(80, ' ')) + header.write(text.center(80, " ")) header.write("\n") header.write("-" * 80) header.write("\n") @@ -56,17 +54,16 @@ def _dumps(obj): def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" - verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) + verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: - out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) + out_file = util.get_cfg_by_path(cfg, ("debug", "output")) if not verbose: - log.debug(("Skipping module named %s," - " verbose printing disabled"), name) + log.debug("Skipping module named %s, verbose printing disabled", name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) @@ -85,8 +82,9 @@ def handle(name, cfg, cloud, log, args): to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % - (type_utils.obj_name(cloud.datasource))) + to_print.write( + "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource)) + ) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) @@ -102,4 +100,5 @@ def handle(name, cfg, cloud, log, args): else: util.multi_log("".join(content_to_file), console=True, stderr=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 61c769b3..5e528e81 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -26,32 +26,35 @@ by default. disable_ec2_metadata: """ -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] -REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] +REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"] +REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: reject_cmd = None - if subp.which('ip'): + if subp.which("ip"): reject_cmd = REJECT_CMD_IP - elif subp.which('ifconfig'): + elif subp.which("ifconfig"): reject_cmd = REJECT_CMD_IF else: - log.error(('Neither "route" nor "ip" command found, unable to ' - 'manipulate routing table')) + log.error( + 'Neither "route" nor "ip" command found, unable to ' + "manipulate routing table" + ) return subp.subp(reject_cmd, capture=False) else: - log.debug(("Skipping module named %s," - " disabling the ec2 route not enabled"), name) + log.debug( + "Skipping module named %s, disabling the ec2 route not enabled", + name, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 440f05f1..4d527c7a 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -100,13 +100,13 @@ A label can be specified for the filesystem using replace_fs: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp import logging import os import shlex +from cloudinit import subp, util +from cloudinit.settings import PER_INSTANCE + frequency = PER_INSTANCE # Define the commands to use @@ -118,7 +118,7 @@ BLKDEV_CMD = subp.which("blockdev") PARTPROBE_CMD = subp.which("partprobe") WIPEFS_CMD = subp.which("wipefs") -LANG_C_ENV = {'LANG': 'C'} +LANG_C_ENV = {"LANG": "C"} LOG = logging.getLogger(__name__) @@ -145,9 +145,12 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new partition table/disk") - util.log_time(logfunc=LOG.debug, - msg="Creating partition on %s" % disk, - func=mkpart, args=(disk, definition)) + util.log_time( + logfunc=LOG.debug, + msg="Creating partition on %s" % disk, + func=mkpart, + args=(disk, definition), + ) except Exception as e: util.logexc(LOG, "Failed partitioning operation\n%s" % e) @@ -162,10 +165,13 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new filesystem.") - device = definition.get('device') - util.log_time(logfunc=LOG.debug, - msg="Creating fs for %s" % device, - func=mkfs, args=(definition,)) + device = definition.get("device") + util.log_time( + logfunc=LOG.debug, + msg="Creating fs for %s" % device, + func=mkfs, + args=(definition,), + ) except Exception as e: util.logexc(LOG, "Failed during filesystem operation\n%s" % e) @@ -178,16 +184,22 @@ def update_disk_setup_devices(disk_setup, tformer): if transformed is None or transformed == origname: continue if transformed in disk_setup: - LOG.info("Replacing %s in disk_setup for translation of %s", - origname, transformed) + LOG.info( + "Replacing %s in disk_setup for translation of %s", + origname, + transformed, + ) del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] if isinstance(disk_setup[transformed], dict): - disk_setup[transformed]['_origname'] = origname + disk_setup[transformed]["_origname"] = origname del disk_setup[origname] - LOG.debug("updated disk_setup device entry '%s' to '%s'", - origname, transformed) + LOG.debug( + "updated disk_setup device entry '%s' to '%s'", + origname, + transformed, + ) def update_fs_setup_devices(disk_setup, tformer): @@ -198,7 +210,7 @@ def update_fs_setup_devices(disk_setup, tformer): LOG.warning("entry in disk_setup not a dict: %s", definition) continue - origname = definition.get('device') + origname = definition.get("device") if origname is None: continue @@ -208,19 +220,24 @@ def update_fs_setup_devices(disk_setup, tformer): tformed = tformer(dev) if tformed is not None: dev = tformed - LOG.debug("%s is mapped to disk=%s part=%s", - origname, tformed, part) - definition['_origname'] = origname - definition['device'] = tformed + LOG.debug( + "%s is mapped to disk=%s part=%s", origname, tformed, part + ) + definition["_origname"] = origname + definition["device"] = tformed if part: # In origname with .N, N overrides 'partition' key. - if 'partition' in definition: - LOG.warning("Partition '%s' from dotted device name '%s' " - "overrides 'partition' key in %s", part, origname, - definition) - definition['_partition'] = definition['partition'] - definition['partition'] = part + if "partition" in definition: + LOG.warning( + "Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", + part, + origname, + definition, + ) + definition["_partition"] = definition["partition"] + definition["partition"] = part def value_splitter(values, start=None): @@ -232,7 +249,7 @@ def value_splitter(values, start=None): if start: _values = _values[start:] - for key, value in [x.split('=') for x in _values]: + for key, value in [x.split("=") for x in _values]: yield key, value @@ -251,11 +268,16 @@ def enumerate_disk(device, nodeps=False): name: the device name, i.e. sda """ - lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL', - device] + lsblk_cmd = [ + LSBLK_CMD, + "--pairs", + "--output", + "NAME,TYPE,FSTYPE,LABEL", + device, + ] if nodeps: - lsblk_cmd.append('--nodeps') + lsblk_cmd.append("--nodeps") info = None try: @@ -269,10 +291,10 @@ def enumerate_disk(device, nodeps=False): for part in parts: d = { - 'name': None, - 'type': None, - 'fstype': None, - 'label': None, + "name": None, + "type": None, + "fstype": None, + "label": None, } for key, value in value_splitter(part): @@ -303,9 +325,9 @@ def is_device_valid(name, partition=False): LOG.warning("Query against device %s failed", name) return False - if partition and d_type == 'part': + if partition and d_type == "part": return True - elif not partition and d_type == 'disk': + elif not partition and d_type == "disk": return True return False @@ -321,7 +343,7 @@ def check_fs(device): """ out, label, fs_type, uuid = None, None, None, None - blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] + blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device] try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: @@ -332,11 +354,11 @@ def check_fs(device): if out: if len(out.splitlines()) == 1: for key, value in value_splitter(out, start=1): - if key.lower() == 'label': + if key.lower() == "label": label = value - elif key.lower() == 'type': + elif key.lower() == "type": fs_type = value - elif key.lower() == 'uuid': + elif key.lower() == "uuid": uuid = value return label, fs_type, uuid @@ -350,8 +372,14 @@ def is_filesystem(device): return fs_type -def find_device_node(device, fs_type=None, label=None, valid_targets=None, - label_match=True, replace_fs=None): +def find_device_node( + device, + fs_type=None, + label=None, + valid_targets=None, + label_match=True, + replace_fs=None, +): """ Find a device that is either matches the spec, or the first @@ -366,31 +394,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, label = "" if not valid_targets: - valid_targets = ['disk', 'part'] + valid_targets = ["disk", "part"] raw_device_used = False for d in enumerate_disk(device): - if d['fstype'] == replace_fs and label_match is False: + if d["fstype"] == replace_fs and label_match is False: # We found a device where we want to replace the FS - return ('/dev/%s' % d['name'], False) + return ("/dev/%s" % d["name"], False) - if (d['fstype'] == fs_type and - ((label_match and d['label'] == label) or not label_match)): + if d["fstype"] == fs_type and ( + (label_match and d["label"] == label) or not label_match + ): # If we find a matching device, we return that - return ('/dev/%s' % d['name'], True) + return ("/dev/%s" % d["name"], True) - if d['type'] in valid_targets: + if d["type"] in valid_targets: - if d['type'] != 'disk' or d['fstype']: + if d["type"] != "disk" or d["fstype"]: raw_device_used = True - if d['type'] == 'disk': + if d["type"] == "disk": # Skip the raw disk, its the default pass - elif not d['fstype']: - return ('/dev/%s' % d['name'], False) + elif not d["fstype"]: + return ("/dev/%s" % d["name"], False) if not raw_device_used: return (device, False) @@ -433,7 +462,7 @@ def get_dyn_func(*args): if len(args) < 2: raise Exception("Unable to determine dynamic funcation name") - func_name = (args[0] % args[1]) + func_name = args[0] % args[1] func_args = args[2:] try: @@ -448,8 +477,8 @@ def get_dyn_func(*args): def get_hdd_size(device): try: - size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) - sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) + size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device]) + sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) from e @@ -481,13 +510,13 @@ def check_partition_mbr_layout(device, layout): if device in _line[0]: # We don't understand extended partitions yet - if _line[-1].lower() in ['extended', 'empty']: + if _line[-1].lower() in ["extended", "empty"]: continue # Find the partition types type_label = None for x in sorted(range(1, len(_line)), reverse=True): - if _line[x].isdigit() and _line[x] != '/': + if _line[x].isdigit() and _line[x] != "/": type_label = _line[x] break @@ -496,7 +525,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): - prt_cmd = [SGDISK_CMD, '-p', device] + prt_cmd = [SGDISK_CMD, "-p", device] try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: @@ -522,7 +551,7 @@ def check_partition_gpt_layout(device, layout): # Number Start (sector) End (sector) Size Code Name # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: - if line.strip().startswith('Number'): + if line.strip().startswith("Number"): break codes = [line.strip().split()[5] for line in out_lines] @@ -545,10 +574,16 @@ def check_partition_layout(table_type, device, layout): function called check_partition_%s_layout """ found_layout = get_dyn_func( - "check_partition_%s_layout", table_type, device, layout) - - LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", - table_type, device, layout, found_layout) + "check_partition_%s_layout", table_type, device, layout + ) + + LOG.debug( + "called check_partition_%s_layout(%s, %s), returned: %s", + table_type, + device, + layout, + found_layout, + ) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -559,10 +594,12 @@ def check_partition_layout(table_type, device, layout): elif len(found_layout) == len(layout): # This just makes sure that the number of requested # partitions and the type labels are right - layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None - for x in layout] - LOG.debug("Layout types=%s. Found types=%s", - layout_types, found_layout) + layout_types = [ + str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout + ] + LOG.debug( + "Layout types=%s. Found types=%s", layout_types, found_layout + ) for itype, ftype in zip(layout_types, found_layout): if itype is not None and str(ftype) != str(itype): return False @@ -588,8 +625,9 @@ def get_partition_mbr_layout(size, layout): # Create a single partition return "0," - if ((len(layout) == 0 and isinstance(layout, list)) or - not isinstance(layout, list)): + if (len(layout) == 0 and isinstance(layout, list)) or not isinstance( + layout, list + ): raise Exception("Partition layout is invalid") last_part_num = len(layout) @@ -617,8 +655,10 @@ def get_partition_mbr_layout(size, layout): sfdisk_definition = "\n".join(part_definition) if len(part_definition) > 4: - raise Exception("Calculated partition definition is too big\n%s" % - sfdisk_definition) + raise Exception( + "Calculated partition definition is too big\n%s" + % sfdisk_definition + ) return sfdisk_definition @@ -632,14 +672,15 @@ def get_partition_gpt_layout(size, layout): if isinstance(partition, list): if len(partition) != 2: raise Exception( - "Partition was incorrectly defined: %s" % partition) + "Partition was incorrectly defined: %s" % partition + ) percent, partition_type = partition else: percent = partition partition_type = None part_size = int(float(size) * (float(percent) / 100)) - partition_specs.append((partition_type, [0, '+{}'.format(part_size)])) + partition_specs.append((partition_type, [0, "+{}".format(part_size)])) # The last partition should use up all remaining space partition_specs[-1][-1][-1] = 0 @@ -649,7 +690,7 @@ def get_partition_gpt_layout(size, layout): def purge_disk_ptable(device): # wipe the first and last megabyte of a disk (or file) # gpt stores partition table both at front and at end. - null = '\0' + null = "\0" start_len = 1024 * 1024 end_len = 1024 * 1024 with open(device, "rb+") as fp: @@ -668,14 +709,14 @@ def purge_disk(device): # wipe any file systems first for d in enumerate_disk(device): - if d['type'] not in ["disk", "crypt"]: - wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] + if d["type"] not in ["disk", "crypt"]: + wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]] try: - LOG.info("Purging filesystem on /dev/%s", d['name']) + LOG.info("Purging filesystem on /dev/%s", d["name"]) subp.subp(wipefs_cmd) except Exception as e: raise Exception( - "Failed FS purge of /dev/%s" % d['name'] + "Failed FS purge of /dev/%s" % d["name"] ) from e purge_disk_ptable(device) @@ -701,7 +742,7 @@ def read_parttbl(device): if PARTPROBE_CMD is not None: probe_cmd = [PARTPROBE_CMD, device] else: - probe_cmd = [BLKDEV_CMD, '--rereadpt', device] + probe_cmd = [BLKDEV_CMD, "--rereadpt", device] util.udevadm_settle() try: subp.subp(probe_cmd) @@ -730,17 +771,24 @@ def exec_mkpart_mbr(device, layout): def exec_mkpart_gpt(device, layout): try: - subp.subp([SGDISK_CMD, '-Z', device]) + subp.subp([SGDISK_CMD, "-Z", device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 - subp.subp([SGDISK_CMD, - '-n', '{}:{}:{}'.format(index, start, end), device]) + subp.subp( + [ + SGDISK_CMD, + "-n", + "{}:{}:{}".format(index, start, end), + device, + ] + ) if partition_type is not None: # convert to a 4 char (or more) string right padded with 0 # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") subp.subp( - [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) + [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device] + ) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -766,8 +814,10 @@ def assert_and_settle_device(device): if not os.path.exists(device): util.udevadm_settle() if not os.path.exists(device): - raise RuntimeError("Device %s did not exist and was not created " - "with a udevadm settle." % device) + raise RuntimeError( + "Device %s did not exist and was not created " + "with a udevadm settle." % device + ) # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have @@ -794,9 +844,9 @@ def mkpart(device, definition): device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) - overwrite = definition.get('overwrite', False) - layout = definition.get('layout', False) - table_type = definition.get('table_type', 'mbr') + overwrite = definition.get("overwrite", False) + layout = definition.get("layout", False) + table_type = definition.get("table_type", "mbr") # Check if the default device is a partition or not LOG.debug("Checking against default devices") @@ -809,7 +859,8 @@ def mkpart(device, definition): LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): raise Exception( - 'Device {device} is not a disk device!'.format(device=device)) + "Device {device} is not a disk device!".format(device=device) + ) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -845,21 +896,21 @@ def lookup_force_flag(fs): A force flag might be -F or -F, this look it up """ flags = { - 'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - 'swap': '-f', + "ext": "-F", + "btrfs": "-f", + "xfs": "-f", + "reiserfs": "-f", + "swap": "-f", } - if 'ext' in fs.lower(): - fs = 'ext' + if "ext" in fs.lower(): + fs = "ext" if fs.lower() in flags: return flags[fs] LOG.warning("Force flag for %s is unknown.", fs) - return '' + return "" def mkfs(fs_cfg): @@ -883,14 +934,14 @@ def mkfs(fs_cfg): When 'cmd' is provided then no other parameter is required. """ - label = fs_cfg.get('label') - device = fs_cfg.get('device') - partition = str(fs_cfg.get('partition', 'any')) - fs_type = fs_cfg.get('filesystem') - fs_cmd = fs_cfg.get('cmd', []) - fs_opts = fs_cfg.get('extra_opts', []) - fs_replace = fs_cfg.get('replace_fs', False) - overwrite = fs_cfg.get('overwrite', False) + label = fs_cfg.get("label") + device = fs_cfg.get("device") + partition = str(fs_cfg.get("partition", "any")) + fs_type = fs_cfg.get("filesystem") + fs_cmd = fs_cfg.get("cmd", []) + fs_opts = fs_cfg.get("extra_opts", []) + fs_replace = fs_cfg.get("replace_fs", False) + overwrite = fs_cfg.get("overwrite", False) # ensure that we get a real device rather than a symbolic link assert_and_settle_device(device) @@ -903,14 +954,19 @@ def mkfs(fs_cfg): # Handle manual definition of partition if partition.isdigit(): device = "%s%s" % (device, partition) - LOG.debug("Manual request of partition %s for %s", - partition, device) + LOG.debug( + "Manual request of partition %s for %s", partition, device + ) # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", - device, check_label, check_fstype) + LOG.debug( + "Device '%s' has check_label='%s' check_fstype=%s", + device, + check_label, + check_fstype, + ) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -924,19 +980,23 @@ def mkfs(fs_cfg): else: LOG.debug("Device %s is cleared for formating", device) - elif partition and str(partition).lower() in ('auto', 'any'): + elif partition and str(partition).lower() in ("auto", "any"): # For auto devices, we match if the filesystem does exist odevice = device LOG.debug("Identifying device to create %s filesytem on", label) # any mean pick the first match on the device with matching fs_type label_match = True - if partition.lower() == 'any': + if partition.lower() == "any": label_match = False - device, reuse = find_device_node(device, fs_type=fs_type, label=label, - label_match=label_match, - replace_fs=fs_replace) + device, reuse = find_device_node( + device, + fs_type=fs_type, + label=label, + label_match=label_match, + replace_fs=fs_replace, + ) LOG.debug("Automatic device for %s identified as %s", odevice, device) if reuse: @@ -947,18 +1007,25 @@ def mkfs(fs_cfg): LOG.debug("Replacing file system on %s as instructed.", device) if not device: - LOG.debug("No device aviable that matches request. " - "Skipping fs creation for %s", fs_cfg) + LOG.debug( + "No device aviable that matches request. " + "Skipping fs creation for %s", + fs_cfg, + ) return - elif not partition or str(partition).lower() == 'none': + elif not partition or str(partition).lower() == "none": LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") return - LOG.debug("File system type '%s' with label '%s' will be created on %s", - fs_type, label, device) + LOG.debug( + "File system type '%s' with label '%s' will be created on %s", + fs_type, + label, + device, + ) # Make sure the device is defined if not device: @@ -969,26 +1036,29 @@ def mkfs(fs_cfg): if not (fs_type or fs_cmd): raise Exception( "No way to create filesystem '{label}'. fs_type or fs_cmd " - "must be set.".format(label=label)) + "must be set.".format(label=label) + ) # Create the commands shell = False if fs_cmd: - fs_cmd = fs_cfg['cmd'] % { - 'label': label, - 'filesystem': fs_type, - 'device': device, + fs_cmd = fs_cfg["cmd"] % { + "label": label, + "filesystem": fs_type, + "device": device, } shell = True if overwrite: LOG.warning( "fs_setup:overwrite ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) if fs_opts: LOG.warning( "fs_setup:extra_opts ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) else: # Find the mkfs command mkfs_cmd = subp.which("mkfs.%s" % fs_type) @@ -996,8 +1066,11 @@ def mkfs(fs_cfg): mkfs_cmd = subp.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", - fs_type, fs_type) + LOG.warning( + "Cannot create fstype '%s'. No mkfs.%s command", + fs_type, + fs_type, + ) return fs_cmd = [mkfs_cmd, device] @@ -1022,4 +1095,5 @@ def mkfs(fs_cfg): except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 40eee052..a928082b 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -24,12 +24,12 @@ user configuration should be required. import os from cloudinit import log as logging -from cloudinit.settings import PER_ALWAYS from cloudinit import subp +from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] LOG = logging.getLogger(__name__) @@ -39,15 +39,18 @@ def is_upstart_system(): return False myenv = os.environ.copy() - if 'UPSTART_SESSION' in myenv: - del myenv['UPSTART_SESSION'] - check_cmd = ['initctl', 'version'] + if "UPSTART_SESSION" in myenv: + del myenv["UPSTART_SESSION"] + check_cmd = ["initctl", "version"] try: (out, _err) = subp.subp(check_cmd, env=myenv) - return 'upstart' in out + return "upstart" in out except subp.ProcessExecutionError as e: - LOG.debug("'%s' returned '%s', not using upstart", - ' '.join(check_cmd), e.exit_code) + LOG.debug( + "'%s' returned '%s', not using upstart", + " ".join(check_cmd), + e.exit_code, + ) return False @@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args): if not event_names: # Default to the 'cloud-config' # event for backwards compat. - event_names = ['cloud-config'] + event_names = ["cloud-config"] if not is_upstart_system(): log.debug("not upstart system, '%s' disabled", name) @@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args): cfgpath = cloud.paths.get_ipath_cur("cloud_config") for n in event_names: - cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath] + cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath] try: subp.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? log.warning("Emission of upstart event %s failed due to: %s", n, e) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 91f50e22..50a81744 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -38,60 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will: """ from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util LOG = logging.getLogger(__name__) frequency = PER_INSTANCE BUILTIN_CFG = { - 'config': None, - 'config_path': '/etc/network/fan', + "config": None, + "config_path": "/etc/network/fan", } def stop_update_start(distro, service, config_file, content): try: - distro.manage_service('stop', service) + distro.manage_service("stop", service) stop_failed = False except subp.ProcessExecutionError as e: stop_failed = True LOG.warning("failed to stop %s: %s", service, e) - if not content.endswith('\n'): - content += '\n' + if not content.endswith("\n"): + content += "\n" util.write_file(config_file, content, omode="w") try: - distro.manage_service('start', service) + distro.manage_service("start", service) if stop_failed: LOG.warning("success: %s started", service) except subp.ProcessExecutionError as e: LOG.warning("failed to start %s: %s", service, e) - distro.manage_service('enable', service) + distro.manage_service("enable", service) def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('fan') + cfgin = cfg.get("fan") if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) - if not mycfg.get('config'): + if not mycfg.get("config"): LOG.debug("%s: no 'fan' config entry. disabling", name) return - util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") + util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w") distro = cloud.distro - if not subp.which('fanctl'): - distro.install_packages(['ubuntu-fan']) + if not subp.which("fanctl"): + distro.install_packages(["ubuntu-fan"]) stop_update_start( distro, - service='ubuntu-fan', config_file=mycfg.get('config_path'), - content=mycfg.get('config')) + service="ubuntu-fan", + config_file=mycfg.get("config_path"), + content=mycfg.get("config"), + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 4fa5297e..f443ccd8 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -31,10 +31,7 @@ specified as a jinja template with the following variables set: """ -from cloudinit import templater -from cloudinit import util -from cloudinit import version - +from cloudinit import templater, util, version from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS @@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = ( def handle(_name, cfg, cloud, log, args): - msg_in = '' + msg_in = "" if len(args) != 0: msg_in = str(args[0]) else: @@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args): cver = version.version_string() try: subs = { - 'uptime': uptime, - 'timestamp': ts, - 'version': cver, - 'datasource': str(cloud.datasource), + "uptime": uptime, + "timestamp": ts, + "version": cver, + "datasource": str(cloud.datasource), } subs.update(dict([(k.upper(), v) for k, v in subs.items()])) - util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), - console=False, stderr=True, log=log) + util.multi_log( + "%s\n" % (templater.render_string(msg_in, subs)), + console=False, + stderr=True, + log=log, + ) except Exception: util.logexc(log, "Failed to render final message template") @@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args): if cloud.datasource.is_disconnected: log.warning("Used fallback datasource") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py index 924b967c..3c307153 100644 --- a/cloudinit/config/cc_foo.py +++ b/cloudinit/config/cc_foo.py @@ -53,4 +53,5 @@ frequency = PER_INSTANCE def handle(name, _cfg, _cloud, log, _args): log.debug("Hi from module %s", name) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 1ddc9dc7..43334caa 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -70,17 +70,15 @@ import re import stat from cloudinit import log as logging +from cloudinit import subp, temp_utils, util from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], - 'ignore_growroot_disabled': False, + "mode": "auto", + "devices": ["/"], + "ignore_growroot_disabled": False, } @@ -131,7 +129,7 @@ class ResizeFailedException(Exception): class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (out, _err) = subp.subp(["growpart", "--help"], env=myenv) @@ -144,7 +142,7 @@ class ResizeGrowPart(object): def resize(self, diskdev, partnum, partdev): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" before = get_size(partdev) # growpart uses tmp dir to store intermediate states @@ -153,14 +151,19 @@ class ResizeGrowPart(object): growpart_tmp = os.path.join(tmpd, "growpart") if not os.path.exists(growpart_tmp): os.mkdir(growpart_tmp, 0o700) - myenv['TMPDIR'] = growpart_tmp + myenv["TMPDIR"] = growpart_tmp try: - subp.subp(["growpart", '--dry-run', diskdev, partnum], - env=myenv) + subp.subp( + ["growpart", "--dry-run", diskdev, partnum], env=myenv + ) except subp.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", - diskdev, partnum) + util.logexc( + LOG, + "Failed growpart --dry-run for (%s, %s)", + diskdev, + partnum, + ) raise ResizeFailedException(e) from e return (before, before) @@ -176,7 +179,7 @@ class ResizeGrowPart(object): class ResizeGpart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) @@ -234,11 +237,11 @@ def device_part_info(devpath): # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) - m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) + m = re.search("^(/dev/.+)p([0-9])$", freebsd_part) return (m.group(1), m.group(2)) elif util.is_DragonFlyBSD(): dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) - m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): @@ -275,7 +278,7 @@ def devent2dev(devent): container = util.is_container() # Ensure the path is a block device. - if (dev == "/dev/root" and not container): + if dev == "/dev/root" and not container: dev = util.rootdev_from_cmdline(util.get_cmdline()) if dev is None: if os.path.exists(dev): @@ -293,65 +296,102 @@ def resize_devices(resizer, devices): try: blockdev = devent2dev(devent) except ValueError as e: - info.append((devent, RESIZE.SKIPPED, - "unable to convert to device: %s" % e,)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "unable to convert to device: %s" % e, + ) + ) continue try: statret = os.stat(blockdev) except OSError as e: - info.append((devent, RESIZE.SKIPPED, - "stat of '%s' failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e), + ) + ) continue - if (not stat.S_ISBLK(statret.st_mode) and - not stat.S_ISCHR(statret.st_mode)): - info.append((devent, RESIZE.SKIPPED, - "device '%s' not a block device" % blockdev,)) + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR( + statret.st_mode + ): + info.append( + ( + devent, + RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev, + ) + ) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - info.append((devent, RESIZE.SKIPPED, - "device_part_info(%s) failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e), + ) + ) continue try: (old, new) = resizer.resize(disk, ptnum, blockdev) if old == new: - info.append((devent, RESIZE.NOCHANGE, - "no change necessary (%s, %s)" % (disk, ptnum),)) + info.append( + ( + devent, + RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum), + ) + ) else: - info.append((devent, RESIZE.CHANGED, - "changed (%s, %s) from %s to %s" % - (disk, ptnum, old, new),)) + info.append( + ( + devent, + RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" + % (disk, ptnum, old, new), + ) + ) except ResizeFailedException as e: - info.append((devent, RESIZE.FAILED, - "failed to resize: disk=%s, ptnum=%s: %s" % - (disk, ptnum, e),)) + info.append( + ( + devent, + RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" + % (disk, ptnum, e), + ) + ) return info def handle(_name, cfg, _cloud, log, _args): - if 'growpart' not in cfg: - log.debug("No 'growpart' entry in cfg. Using default: %s" % - DEFAULT_CONFIG) - cfg['growpart'] = DEFAULT_CONFIG + if "growpart" not in cfg: + log.debug( + "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG + ) + cfg["growpart"] = DEFAULT_CONFIG - mycfg = cfg.get('growpart') + mycfg = cfg.get("growpart") if not isinstance(mycfg, dict): log.warning("'growpart' in config was not a dict") return - mode = mycfg.get('mode', "auto") + mode = mycfg.get("mode", "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return - if util.is_false(mycfg.get('ignore_growroot_disabled', False)): + if util.is_false(mycfg.get("ignore_growroot_disabled", False)): if os.path.isfile("/etc/growroot-disabled"): log.debug("growpart disabled: /etc/growroot-disabled exists") log.debug("use ignore_growroot_disabled to ignore") @@ -370,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = util.log_time(logfunc=log.debug, msg="resize_devices", - func=resize_devices, args=(resizer, devices)) + resized = util.log_time( + logfunc=log.debug, + msg="resize_devices", + func=resize_devices, + args=(resizer, devices), + ) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) @@ -379,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart)) +RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart)) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index eb03c664..ad7243d9 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true. import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.subp import ProcessExecutionError -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def fetch_idevs(log): @@ -60,8 +59,9 @@ def fetch_idevs(log): try: # get the root disk where the /boot directory resides. - disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'], - capture=True)[0].strip() + disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[ + 0 + ].strip() except ProcessExecutionError as e: # grub-common may not be installed, especially on containers # FileNotFoundError is a nested exception of ProcessExecutionError @@ -81,26 +81,30 @@ def fetch_idevs(log): if not disk or not os.path.exists(disk): # If we failed to detect a disk, we can return early - return '' + return "" try: # check if disk exists and use udevadm to fetch symlinks - devices = subp.subp( - ['udevadm', 'info', '--root', '--query=symlink', disk], - capture=True - )[0].strip().split() + devices = ( + subp.subp( + ["udevadm", "info", "--root", "--query=symlink", disk], + capture=True, + )[0] + .strip() + .split() + ) except Exception: util.logexc( log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk ) - log.debug('considering these device symlinks: %s', ','.join(devices)) + log.debug("considering these device symlinks: %s", ",".join(devices)) # filter symlinks for /dev/disk/by-id entries - devices = [dev for dev in devices if 'disk/by-id' in dev] - log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices)) + devices = [dev for dev in devices if "disk/by-id" in dev] + log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices)) # select first device if there is one, else fall back to plain name idevs = sorted(devices)[0] if devices else disk - log.debug('selected %s', idevs) + log.debug("selected %s", idevs) return idevs @@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args): if not mycfg: mycfg = {} - enabled = mycfg.get('enabled', True) + enabled = mycfg.get("enabled", True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str( - mycfg, "grub-pc/install_devices_empty", None) + mycfg, "grub-pc/install_devices_empty", None + ) if idevs is None: idevs = fetch_idevs(log) @@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args): # now idevs and idevs_empty are set to determined values # or, those set by user - dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" - "grub-pc grub-pc/install_devices_empty boolean %s\n") % - (idevs, idevs_empty)) + dconf_sel = ( + "grub-pc grub-pc/install_devices string %s\n" + "grub-pc grub-pc/install_devices_empty boolean %s\n" + % (idevs, idevs_empty) + ) - log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + log.debug( + "Setting grub debconf-set-selections with '%s','%s'" + % (idevs, idevs_empty) + ) try: - subp.subp(['debconf-set-selections'], dconf_sel) + subp.subp(["debconf-set-selections"], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index 9b4075cc..952d9f13 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -3,15 +3,12 @@ import os from textwrap import dedent -from cloudinit import util -from cloudinit import subp -from cloudinit import stages +from cloudinit import stages, subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS -from cloudinit.event import EventType, EventScope +from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE distros = [ALL_DISTROS] @@ -19,7 +16,8 @@ meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", - "description": dedent("""\ + "description": dedent( + """\ This module will install the udev rules to enable hotplug if supported by the datasource and enabled in the userdata. The udev rules will be installed as @@ -32,21 +30,26 @@ meta = { network configuration. Currently supported datasources: Openstack, EC2 - """), + """ + ), "distros": distros, "examples": [ - dedent("""\ + dedent( + """\ # Enable hotplug of network devices updates: network: when: ["hotplug"] - """), - dedent("""\ + """ + ), + dedent( + """\ # Enable network hotplug alongside boot event updates: network: when: ["boot", "hotplug"] - """), + """ + ), ], "frequency": frequency, } @@ -74,14 +77,14 @@ schema = { "boot-legacy", "boot", "hotplug", - ] - } + ], + }, } - } + }, } - } + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -100,14 +103,15 @@ LABEL="cloudinit_end" def handle(_name, cfg, cloud, log, _args): validate_cloudconfig_schema(cfg, schema) network_hotplug_enabled = ( - 'updates' in cfg and - 'network' in cfg['updates'] and - 'when' in cfg['updates']['network'] and - 'hotplug' in cfg['updates']['network']['when'] + "updates" in cfg + and "network" in cfg["updates"] + and "when" in cfg["updates"]["network"] + and "hotplug" in cfg["updates"]["network"]["when"] ) hotplug_supported = EventType.HOTPLUG in ( - cloud.datasource.get_supported_events( - [EventType.HOTPLUG]).get(EventScope.NETWORK, set()) + cloud.datasource.get_supported_events([EventType.HOTPLUG]).get( + EventScope.NETWORK, set() + ) ) hotplug_enabled = stages.update_event_enabled( datasource=cloud.datasource, diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index d72b5244..ab35e136 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -38,49 +38,53 @@ host keys are not written to console. import os +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE # This is a tool that cloud init provides -HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints' +HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints" def _get_helper_tool_path(distro): try: base_lib = distro.usr_lib_exec except AttributeError: - base_lib = '/usr/lib' + base_lib = "/usr/lib" return HELPER_TOOL_TPL % base_lib def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): - log.debug(("Skipping module named %s, " - "logging of SSH host keys disabled"), name) + log.debug( + "Skipping module named %s, logging of SSH host keys disabled", name + ) return helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warning(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning( + "Unable to activate module %s, helper tool not found at %s", + name, + helper_path, + ) return - fp_blacklist = util.get_cfg_option_list(cfg, - "ssh_fp_console_blacklist", []) - key_blacklist = util.get_cfg_option_list(cfg, - "ssh_key_console_blacklist", - ["ssh-dss"]) + fp_blacklist = util.get_cfg_option_list( + cfg, "ssh_fp_console_blacklist", [] + ) + key_blacklist = util.get_cfg_option_list( + cfg, "ssh_key_console_blacklist", ["ssh-dss"] + ) try: - cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] + cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)] (stdout, _stderr) = subp.subp(cmd) - util.multi_log("%s\n" % (stdout.strip()), - stderr=False, console=True) + util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: log.warning("Writing keys to the system console failed!") raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 299c4d01..03ebf411 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -60,10 +60,7 @@ from io import BytesIO from configobj import ConfigObj -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, type_utils, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE @@ -71,15 +68,15 @@ frequency = PER_INSTANCE LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" LS_DEFAULT_FILE = "/etc/default/landscape-client" -distros = ['ubuntu'] +distros = ["ubuntu"] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", } } @@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args): raise RuntimeError( "'landscape' key existed in config, but not a dictionary type," " is a {_type} instead".format( - _type=type_utils.obj_name(ls_cloudcfg))) + _type=type_utils.obj_name(ls_cloudcfg) + ) + ) if not ls_cloudcfg: return - cloud.distro.install_packages(('landscape-client',)) + cloud.distro.install_packages(("landscape-client",)) merge_data = [ LSC_BUILTIN_CFG, @@ -135,4 +134,5 @@ def merge_together(objs): cfg.merge(ConfigObj(obj)) return cfg + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 7fed9abd..487f58f7 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -14,45 +14,48 @@ from cloudinit import util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_locale', - 'name': 'Locale', - 'title': 'Set system locale', - 'description': dedent( + "id": "cc_locale", + "name": "Locale", + "title": "Set system locale", + "description": dedent( """\ Configure the system locale and apply it system wide. By default use the locale specified by the datasource.""" ), - 'distros': distros, - 'examples': [ - dedent("""\ + "distros": distros, + "examples": [ + dedent( + """\ # Set the locale to ar_AE locale: ar_AE - """), - dedent("""\ + """ + ), + dedent( + """\ # Set the locale to fr_CA in /etc/alternate_path/locale locale: fr_CA locale_configfile: /etc/alternate_path/locale - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'locale': { - 'type': 'string', - 'description': ( + "type": "object", + "properties": { + "locale": { + "type": "string", + "description": ( "The locale to set as the system's locale (e.g. ar_PS)" ), }, - 'locale_configfile': { - 'type': 'string', - 'description': ( + "locale_configfile": { + "type": "string", + "description": ( "The file in which to write the locale configuration (defaults" " to the distro's default location)" ), @@ -70,8 +73,9 @@ def handle(name, cfg, cloud, log, args): locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): - log.debug("Skipping module named %s, disabled by config: %s", - name, locale) + log.debug( + "Skipping module named %s, disabled by config: %s", name, locale + ) return validate_cloudconfig_schema(cfg, schema) @@ -80,4 +84,5 @@ def handle(name, cfg, cloud, log, args): locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 486037d9..13ddcbe9 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly. domain: """ -from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util import os -distros = ['ubuntu'] +from cloudinit import log as logging +from cloudinit import subp, util + +distros = ["ubuntu"] LOG = logging.getLogger(__name__) @@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0" def handle(name, cfg, cloud, log, args): # Get config - lxd_cfg = cfg.get('lxd') + lxd_cfg = cfg.get("lxd") if not lxd_cfg: - log.debug("Skipping module named %s, not present or disabled by cfg", - name) + log.debug( + "Skipping module named %s, not present or disabled by cfg", name + ) return if not isinstance(lxd_cfg, dict): - log.warning("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning( + "lxd config must be a dictionary. found a '%s'", type(lxd_cfg) + ) return # Grab the configuration - init_cfg = lxd_cfg.get('init') + init_cfg = lxd_cfg.get("init") if not isinstance(init_cfg, dict): - log.warning("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning( + "lxd/init config must be a dictionary. found a '%s'", + type(init_cfg), + ) init_cfg = {} - bridge_cfg = lxd_cfg.get('bridge', {}) + bridge_cfg = lxd_cfg.get("bridge", {}) if not isinstance(bridge_cfg, dict): - log.warning("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning( + "lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg), + ) bridge_cfg = {} # Install the needed packages packages = [] if not subp.which("lxd"): - packages.append('lxd') + packages.append("lxd") - if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'): - packages.append('zfsutils-linux') + if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"): + packages.append("zfsutils-linux") if len(packages): try: @@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args): # Set up lxd if init config is given if init_cfg: init_keys = ( - 'network_address', 'network_port', 'storage_backend', - 'storage_create_device', 'storage_create_loop', - 'storage_pool', 'trust_password') - subp.subp(['lxd', 'waitready', '--timeout=300']) - cmd = ['lxd', 'init', '--auto'] + "network_address", + "network_port", + "storage_backend", + "storage_create_device", + "storage_create_loop", + "storage_pool", + "trust_password", + ) + subp.subp(["lxd", "waitready", "--timeout=300"]) + cmd = ["lxd", "init", "--auto"] for k in init_keys: if init_cfg.get(k): - cmd.extend(["--%s=%s" % - (k.replace('_', '-'), str(init_cfg[k]))]) + cmd.extend( + ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))] + ) subp.subp(cmd) # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) - if os.path.exists("/etc/default/lxd-bridge") \ - and subp.which(dconf_comm): + if os.path.exists("/etc/default/lxd-bridge") and subp.which( + dconf_comm + ): # Bridge configured through packaging debconf = bridge_to_debconf(bridge_cfg) @@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args): # Update debconf database try: log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - subp.subp(['debconf-communicate'], data) + data = ( + "\n".join( + ["set %s %s" % (k, v) for k, v in debconf.items()] + ) + + "\n" + ) + subp.subp(["debconf-communicate"], data) except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % - dconf_comm) + util.logexc( + log, "Failed to run '%s' for lxd with" % dconf_comm + ) # Remove the existing configuration file (forces re-generation) util.del_file("/etc/default/lxd-bridge") # Run reconfigure log.debug("Running dpkg-reconfigure for lxd") - subp.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) + subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"]) else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) maybe_cleanup_default( - net_name=net_name, did_init=bool(init_cfg), - create=bool(cmd_create), attach=bool(cmd_attach)) + net_name=net_name, + did_init=bool(init_cfg), + create=bool(cmd_create), + attach=bool(cmd_attach), + ) if cmd_create: - log.debug("Creating lxd bridge: %s" % - " ".join(cmd_create)) + log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) _lxc(cmd_create) if cmd_attach: - log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_attach)) + log.debug( + "Setting up default lxd bridge: %s" % " ".join(cmd_attach) + ) _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( - "Unable to configure lxd bridge without %s." + dconf_comm) + "Unable to configure lxd bridge without %s." + dconf_comm + ) def bridge_to_debconf(bridge_cfg): @@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg): if bridge_cfg.get("ipv4_address"): debconf["lxd/bridge-ipv4"] = "true" - debconf["lxd/bridge-ipv4-address"] = \ - bridge_cfg.get("ipv4_address") - debconf["lxd/bridge-ipv4-netmask"] = \ - bridge_cfg.get("ipv4_netmask") - debconf["lxd/bridge-ipv4-dhcp-first"] = \ - bridge_cfg.get("ipv4_dhcp_first") - debconf["lxd/bridge-ipv4-dhcp-last"] = \ - bridge_cfg.get("ipv4_dhcp_last") - debconf["lxd/bridge-ipv4-dhcp-leases"] = \ - bridge_cfg.get("ipv4_dhcp_leases") - debconf["lxd/bridge-ipv4-nat"] = \ - bridge_cfg.get("ipv4_nat", "true") + debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address") + debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask") + debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get( + "ipv4_dhcp_first" + ) + debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get( + "ipv4_dhcp_last" + ) + debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get( + "ipv4_dhcp_leases" + ) + debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true") if bridge_cfg.get("ipv6_address"): debconf["lxd/bridge-ipv6"] = "true" - debconf["lxd/bridge-ipv6-address"] = \ - bridge_cfg.get("ipv6_address") - debconf["lxd/bridge-ipv6-netmask"] = \ - bridge_cfg.get("ipv6_netmask") - debconf["lxd/bridge-ipv6-nat"] = \ - bridge_cfg.get("ipv6_nat", "false") + debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address") + debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask") + debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get( + "ipv6_nat", "false" + ) if bridge_cfg.get("domain"): debconf["lxd/bridge-domain"] = bridge_cfg.get("domain") else: - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) return debconf @@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg): bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["network", "attach-profile", bridge_name, - "default", "eth0"] + cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach if bridge_cfg.get("mode") != "new": - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): - cmd_create.append("ipv4.address=%s/%s" % - (bridge_cfg.get("ipv4_address"), - bridge_cfg.get("ipv4_netmask"))) + cmd_create.append( + "ipv4.address=%s/%s" + % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask")) + ) if bridge_cfg.get("ipv4_nat", "true") == "true": cmd_create.append("ipv4.nat=true") - if bridge_cfg.get("ipv4_dhcp_first") and \ - bridge_cfg.get("ipv4_dhcp_last"): - dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), - bridge_cfg.get("ipv4_dhcp_last")) + if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get( + "ipv4_dhcp_last" + ): + dhcp_range = "%s-%s" % ( + bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last"), + ) cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) else: cmd_create.append("ipv4.address=none") if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): - cmd_create.append("ipv6.address=%s/%s" % - (bridge_cfg.get("ipv6_address"), - bridge_cfg.get("ipv6_netmask"))) + cmd_create.append( + "ipv6.address=%s/%s" + % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask")) + ) if bridge_cfg.get("ipv6_nat", "false") == "true": cmd_create.append("ipv6.nat=true") @@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg): def _lxc(cmd): - env = {'LC_ALL': 'C', - 'HOME': os.environ.get('HOME', '/root'), - 'USER': os.environ.get('USER', 'root')} - subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + env = { + "LC_ALL": "C", + "HOME": os.environ.get("HOME", "/root"), + "USER": os.environ.get("USER", "root"), + } + subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env) -def maybe_cleanup_default(net_name, did_init, create, attach, - profile="default", nic_name="eth0"): +def maybe_cleanup_default( + net_name, did_init, create, attach, profile="default", nic_name="eth0" +): """Newer versions of lxc (3.0.1+) create a lxdbr0 network when 'lxd init --auto' is run. Older versions did not. @@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 41ea4fc9..1b0158ec 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -56,18 +56,21 @@ import io from configobj import ConfigObj from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" -SERVER_CFG = '/etc/mcollective/server.cfg' +SERVER_CFG = "/etc/mcollective/server.cfg" LOG = logging.getLogger(__name__) -def configure(config, server_cfg=SERVER_CFG, - pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): +def configure( + config, + server_cfg=SERVER_CFG, + pubcert_file=PUBCERT_FILE, + pricert_file=PRICERT_FILE, +): # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: @@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG, if e.errno != errno.ENOENT: raise else: - LOG.debug("Did not find file %s (starting with an empty" - " config)", server_cfg) + LOG.debug( + "Did not find file %s (starting with an empty config)", + server_cfg, + ) mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): - if cfg_name == 'public-cert': + if cfg_name == "public-cert": util.write_file(pubcert_file, cfg, mode=0o644) - mcollective_config[ - 'plugin.ssl_server_public'] = pubcert_file - mcollective_config['securityprovider'] = 'ssl' - elif cfg_name == 'private-cert': + mcollective_config["plugin.ssl_server_public"] = pubcert_file + mcollective_config["securityprovider"] = "ssl" + elif cfg_name == "private-cert": util.write_file(pricert_file, cfg, mode=0o600) - mcollective_config[ - 'plugin.ssl_server_private'] = pricert_file - mcollective_config['securityprovider'] = 'ssl' + mcollective_config["plugin.ssl_server_private"] = pricert_file + mcollective_config["securityprovider"] = "ssl" else: if isinstance(cfg, str): # Just set it in the 'main' section @@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG, def handle(name, cfg, cloud, log, _args): # If there isn't a mcollective key in the configuration don't do anything - if 'mcollective' not in cfg: - log.debug(("Skipping module named %s, " - "no 'mcollective' key in configuration"), name) + if "mcollective" not in cfg: + log.debug( + "Skipping module named %s, no 'mcollective' key in configuration", + name, + ) return - mcollective_cfg = cfg['mcollective'] + mcollective_cfg = cfg["mcollective"] # Start by installing the mcollective package ... cloud.distro.install_packages(("mcollective",)) # ... and then update the mcollective configuration - if 'conf' in mcollective_cfg: - configure(config=mcollective_cfg['conf']) + if "conf" in mcollective_cfg: + configure(config=mcollective_cfg["conf"]) # restart mcollective to handle updated config - subp.subp(['service', 'mcollective', 'restart'], capture=False) + subp.subp(["service", "mcollective", "restart"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 79bcc27d..4fafb4af 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -29,16 +29,14 @@ false`` in config. import os import shutil -from cloudinit import helpers -from cloudinit import util - +from cloudinit import helpers, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS def _migrate_canon_sems(cloud): - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) am_adjusted = 0 for sem_path in paths: if not sem_path or not os.path.exists(sem_path): @@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): legacy_adjust = { - 'apt-update-upgrade': [ - 'apt-configure', - 'package-update-upgrade-install', + "apt-update-upgrade": [ + "apt-configure", + "package-update-upgrade-install", ], } - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) for sem_path in paths: if not sem_path or not os.path.exists(sem_path): continue @@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log): util.del_file(os.path.join(sem_path, p)) (_name, freq) = os.path.splitext(p) for m in migrate_to: - log.debug("Migrating %s => %s with the same frequency", - p, m) + log.debug( + "Migrating %s => %s with the same frequency", p, m + ) with sem_helper.lock(m, freq): pass @@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args): log.debug("Skipping module named %s, migration disabled", name) return sems_moved = _migrate_canon_sems(cloud) - log.debug("Migrated %s semaphore files to there canonicalized names", - sems_moved) + log.debug( + "Migrated %s semaphore files to there canonicalized names", sems_moved + ) _migrate_legacy_sems(cloud, log) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index eeb008d2..ec2e46ff 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -62,15 +62,12 @@ swap file is created. maxsize: """ -from string import whitespace - import logging import os import re +from string import whitespace -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, type_utils, util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" @@ -105,21 +102,25 @@ def is_network_device(name): def _get_nth_partition_for_device(device_path, partition_number): - potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), - '-part%s' % (partition_number,)] + potential_suffixes = [ + str(partition_number), + "p%s" % (partition_number,), + "-part%s" % (partition_number,), + ] for suffix in potential_suffixes: - potential_partition_device = '%s%s' % (device_path, suffix) + potential_partition_device = "%s%s" % (device_path, suffix) if os.path.exists(potential_partition_device): return potential_partition_device return None def _is_block_device(device_path, partition_path=None): - device_name = os.path.realpath(device_path).split('/')[-1] - sys_path = os.path.join('/sys/block/', device_name) + device_name = os.path.realpath(device_path).split("/")[-1] + sys_path = os.path.join("/sys/block/", device_name) if partition_path is not None: sys_path = os.path.join( - sys_path, os.path.realpath(partition_path).split('/')[-1]) + sys_path, os.path.realpath(partition_path).split("/")[-1] + ) return os.path.exists(sys_path) @@ -159,8 +160,9 @@ def sanitize_devname(startname, transformer, log, aliases=None): if partition_number is None: partition_path = _get_nth_partition_for_device(device_path, 1) else: - partition_path = _get_nth_partition_for_device(device_path, - partition_number) + partition_path = _get_nth_partition_for_device( + device_path, partition_number + ) if partition_path is None: return None @@ -174,12 +176,12 @@ def sanitize_devname(startname, transformer, log, aliases=None): def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] GB = 2 ** 30 sugg_max = 8 * GB - info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize} + info = {"avail": "na", "max_in": maxsize, "mem": memsize} if fsys is None and maxsize is None: # set max to 8GB default if no filesystem given @@ -187,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): elif fsys: statvfs = os.statvfs(fsys) avail = statvfs.f_frsize * statvfs.f_bfree - info['avail'] = avail + info["avail"] = avail if maxsize is None: # set to 25% of filesystem space maxsize = min(int(avail / 4), sugg_max) - elif maxsize > ((avail * .9)): + elif maxsize > ((avail * 0.9)): # set to 90% of available disk space - maxsize = int(avail * .9) + maxsize = int(avail * 0.9) elif maxsize is None: maxsize = sugg_max - info['max'] = maxsize + info["max"] = maxsize formulas = [ # < 1G: swap = double memory @@ -226,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): if size is not None: size = maxsize - info['size'] = size + info["size"] = size MB = 2 ** 20 pinfo = {} @@ -236,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %s swap for %s memory with '%s'" - " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], - pinfo['avail'], pinfo['max_in'], pinfo['max']) + LOG.debug( + "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'", + pinfo["size"], + pinfo["mem"], + pinfo["avail"], + pinfo["max_in"], + pinfo["max"], + ) return size @@ -248,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None: errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" def create_swap(fname, size, method): - LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", - fname, fstype, method) + LOG.debug( + "Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, + fstype, + method, + ) if method == "fallocate": - cmd = ['fallocate', '-l', '%sM' % size, fname] + cmd = ["fallocate", "-l", "%sM" % size, fname] elif method == "dd": - cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', - 'count=%s' % size] + cmd = [ + "dd", + "if=/dev/zero", + "of=%s" % fname, + "bs=1M", + "count=%s" % size, + ] try: subp.subp(cmd, capture=True) @@ -269,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None: fstype = util.get_mount_info(swap_dir)[1] - if (fstype == "xfs" and - util.kernel_version() < (4, 18)) or fstype == "btrfs": + if ( + fstype == "xfs" and util.kernel_version() < (4, 18) + ) or fstype == "btrfs": create_swap(fname, size, "dd") else: try: @@ -282,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None: if os.path.exists(fname): util.chmod(fname, 0o600) try: - subp.subp(['mkswap', fname]) + subp.subp(["mkswap", fname]) except subp.ProcessExecutionError: util.del_file(fname) raise @@ -297,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None): swap_dir = os.path.dirname(fname) if str(size).lower() == "auto": try: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] except IOError: LOG.debug("Not creating swap: failed to read meminfo") return util.ensure_dir(swap_dir) - size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, - memsize=memsize) + size = suggested_swapsize( + fsys=swap_dir, maxsize=maxsize, memsize=memsize + ) mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, - args=[fname, mibsize]) + util.log_time( + LOG.debug, + msg="Setting up swap file", + func=create_swapfile, + args=[fname, mibsize], + ) return fname def handle_swapcfg(swapcfg): """handle the swap config, calling setup_swap if necessary. - return None or (filename, size) + return None or (filename, size) """ if not isinstance(swapcfg, dict): LOG.warning("input for swap config was not a dict.") return None - fname = swapcfg.get('filename', '/swap.img') - size = swapcfg.get('size', 0) - maxsize = swapcfg.get('maxsize', None) + fname = swapcfg.get("filename", "/swap.img") + size = swapcfg.get("size", 0) + maxsize = swapcfg.get("maxsize", None) if not (size and fname): LOG.debug("no need to setup swap") @@ -335,8 +357,10 @@ def handle_swapcfg(swapcfg): if os.path.exists(fname): if not os.path.exists("/proc/swaps"): - LOG.debug("swap file %s exists, but no /proc/swaps exists, " - "being safe", fname) + LOG.debug( + "swap file %s exists, but no /proc/swaps exists, being safe", + fname, + ) return fname try: for line in util.load_file("/proc/swaps").splitlines(): @@ -345,8 +369,9 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s exists, but not in /proc/swaps", fname) except Exception: - LOG.warning("swap file %s exists. Error reading /proc/swaps", - fname) + LOG.warning( + "swap file %s exists. Error reading /proc/swaps", fname + ) return fname try: @@ -373,8 +398,10 @@ def handle(_name, cfg, cloud, log, _args): defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts - defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"]] + defmnts = [ + ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], + ] cfgmnt = [] if "mounts" in cfg: @@ -404,13 +431,17 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warning("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning( + "Mount option %s not a list, got a %s instead", + (i + 1), + type_utils.obj_name(cfgmnt[i]), + ) continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -418,8 +449,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent named mount %s", start) continue elif sanitized in fstab_devs: - log.info("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.info( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue cfgmnt[i][0] = sanitized @@ -452,8 +486,9 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) @@ -461,8 +496,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent default named mount %s", start) continue elif sanitized in fstab_devs: - log.debug("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue defmnt[0] = sanitized @@ -474,8 +512,7 @@ def handle(_name, cfg, cloud, log, _args): break if cfgmnt_has: - log.debug(("Not including %s, already" - " previously included"), start) + log.debug("Not including %s, already previously included", start) continue cfgmnt.append(defmnt) @@ -488,7 +525,7 @@ def handle(_name, cfg, cloud, log, _args): else: actlist.append(x) - swapret = handle_swapcfg(cfg.get('swap', {})) + swapret = handle_swapcfg(cfg.get("swap", {})) if swapret: actlist.append([swapret, "none", "swap", "sw", "0", "0"]) @@ -507,10 +544,11 @@ def handle(_name, cfg, cloud, log, _args): needswap = True if line[1].startswith("/"): dirs.append(line[1]) - cc_lines.append('\t'.join(line)) + cc_lines.append("\t".join(line)) - mount_points = [v['mountpoint'] for k, v in util.mounts().items() - if 'mountpoint' in v] + mount_points = [ + v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v + ] for d in dirs: try: util.ensure_dir(d) @@ -525,11 +563,12 @@ def handle(_name, cfg, cloud, log, _args): sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] - sops = (["- " + drop for drop in sdrops if drop not in sadds] + - ["+ " + add for add in sadds if add not in sdrops]) + sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ + "+ " + add for add in sadds if add not in sdrops + ] fstab_lines.extend(cc_lines) - contents = "%s\n" % ('\n'.join(fstab_lines)) + contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) activate_cmds = [] @@ -549,7 +588,7 @@ def handle(_name, cfg, cloud, log, _args): fmt = "Activating swap and mounts with: %s" for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + ' '.join(cmd) + fmt = "Activate mounts: %s:" + " ".join(cmd) try: subp.subp(cmd) log.debug(fmt, "PASS") @@ -557,4 +596,5 @@ def handle(_name, cfg, cloud, log, _args): log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c55d5d86..a31da9bb 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -11,124 +11,132 @@ import os from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, temp_utils, templater, type_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -NTP_CONF = '/etc/ntp.conf' +NTP_CONF = "/etc/ntp.conf" NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = [ + "almalinux", + "alpine", + "centos", + "cloudlinux", + "debian", + "eurolinux", + "fedora", + "miraclelinux", + "openEuler", + "opensuse", + "photon", + "rhel", + "rocky", + "sles", + "ubuntu", + "virtuozzo", +] NTP_CLIENT_CONFIG = { - 'chrony': { - 'check_exe': 'chronyd', - 'confpath': '/etc/chrony.conf', - 'packages': ['chrony'], - 'service_name': 'chrony', - 'template_name': 'chrony.conf.{distro}', - 'template': None, + "chrony": { + "check_exe": "chronyd", + "confpath": "/etc/chrony.conf", + "packages": ["chrony"], + "service_name": "chrony", + "template_name": "chrony.conf.{distro}", + "template": None, }, - 'ntp': { - 'check_exe': 'ntpd', - 'confpath': NTP_CONF, - 'packages': ['ntp'], - 'service_name': 'ntp', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntp": { + "check_exe": "ntpd", + "confpath": NTP_CONF, + "packages": ["ntp"], + "service_name": "ntp", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'ntpdate': { - 'check_exe': 'ntpdate', - 'confpath': NTP_CONF, - 'packages': ['ntpdate'], - 'service_name': 'ntpdate', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntpdate": { + "check_exe": "ntpdate", + "confpath": NTP_CONF, + "packages": ["ntpdate"], + "service_name": "ntpdate", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'systemd-timesyncd': { - 'check_exe': '/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', - 'packages': [], - 'service_name': 'systemd-timesyncd', - 'template_name': 'timesyncd.conf', - 'template': None, + "systemd-timesyncd": { + "check_exe": "/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf", + "packages": [], + "service_name": "systemd-timesyncd", + "template_name": "timesyncd.conf", + "template": None, }, } # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { - 'alpine': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', - 'service_name': 'chronyd', + "alpine": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'packages': [], - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "packages": [], + "service_name": "ntpd", }, }, - 'debian': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "debian": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, - 'opensuse': { - 'chrony': { - 'service_name': 'chronyd', + "opensuse": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'photon': { - 'chrony': { - 'service_name': 'chronyd', + "photon": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'service_name': 'ntpd', - 'confpath': '/etc/ntp.conf' - }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf', + "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"}, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", }, }, - 'rhel': { - 'ntp': { - 'service_name': 'ntpd', + "rhel": { + "ntp": { + "service_name": "ntpd", }, - 'chrony': { - 'service_name': 'chronyd', + "chrony": { + "service_name": "chronyd", }, }, - 'sles': { - 'chrony': { - 'service_name': 'chronyd', + "sles": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'ubuntu': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "ubuntu": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, } @@ -141,10 +149,11 @@ DISTRO_CLIENT_CONFIG = { # configuration. meta = { - 'id': 'cc_ntp', - 'name': 'NTP', - 'title': 'enable and configure ntp', - 'description': dedent("""\ + "id": "cc_ntp", + "name": "NTP", + "title": "enable and configure ntp", + "description": dedent( + """\ Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the @@ -152,16 +161,20 @@ meta = { appended to the filename before any changes are made. A list of ntp pools and ntp servers can be provided under the ``ntp`` config key. If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used - in the format ``{0-3}.{distro}.pool.ntp.org``."""), - 'distros': distros, - 'examples': [ - dedent("""\ + in the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ # Override ntp with chrony configuration on Ubuntu ntp: enabled: true ntp_client: chrony # Uses cloud-init default chrony configuration - """), - dedent("""\ + """ + ), + dedent( + """\ # Provide a custom ntp client configuration ntp: enabled: true @@ -188,120 +201,137 @@ meta = { servers: - ntp.server.local - ntp.ubuntu.com - - 192.168.23.2""")], - 'frequency': PER_INSTANCE, + - 192.168.23.2""" + ), + ], + "frequency": PER_INSTANCE, } schema = { - 'type': 'object', - 'properties': { - 'ntp': { - 'type': ['object', 'null'], - 'properties': { - 'pools': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "ntp": { + "type": ["object", "null"], + "properties": { + "pools": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: for Alpine Linux when using the Busybox NTP client this setting will be ignored due to the limited - functionality of Busybox's ntpd.""") + functionality of Busybox's ntpd.""" + ), }, - 'servers': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "servers": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with - the format ``{0-3}.{distro}.pool.ntp.org``.""") + the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), }, - 'ntp_client': { - 'type': 'string', - 'default': 'auto', - 'description': dedent("""\ + "ntp_client": { + "type": "string", + "default": "auto", + "description": dedent( + """\ Name of an NTP client to use to configure system NTP. When unprovided or 'auto' the default client preferred by the distribution will be used. The following built-in client names can be used to override existing configuration defaults: chrony, ntp, ntpdate, - systemd-timesyncd."""), + systemd-timesyncd.""" + ), }, - 'enabled': { - 'type': 'boolean', - 'default': True, - 'description': dedent("""\ + "enabled": { + "type": "boolean", + "default": True, + "description": dedent( + """\ Attempt to enable ntp clients if set to True. If set to False, ntp client will not be configured or - installed"""), + installed""" + ), }, - 'config': { - 'description': dedent("""\ + "config": { + "description": dedent( + """\ Configuration settings or overrides for the - ``ntp_client`` specified."""), - 'type': ['object'], - 'properties': { - 'confpath': { - 'type': 'string', - 'description': dedent("""\ + ``ntp_client`` specified.""" + ), + "type": ["object"], + "properties": { + "confpath": { + "type": "string", + "description": dedent( + """\ The path to where the ``ntp_client`` - configuration is written."""), + configuration is written.""" + ), }, - 'check_exe': { - 'type': 'string', - 'description': dedent("""\ + "check_exe": { + "type": "string", + "description": dedent( + """\ The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is - 'ntpd' because it runs the ntpd binary."""), + 'ntpd' because it runs the ntpd binary.""" + ), }, - 'packages': { - 'type': 'array', - 'items': { - 'type': 'string', + "packages": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True, - 'description': dedent("""\ + "uniqueItems": True, + "description": dedent( + """\ List of packages needed to be installed for the - selected ``ntp_client``."""), + selected ``ntp_client``.""" + ), }, - 'service_name': { - 'type': 'string', - 'description': dedent("""\ + "service_name": { + "type": "string", + "description": dedent( + """\ The systemd or sysvinit service name used to start and stop the ``ntp_client`` - service."""), + service.""" + ), }, - 'template': { - 'type': 'string', - 'description': dedent("""\ + "template": { + "type": "string", + "description": dedent( + """\ Inline template allowing users to define their own ``ntp_client`` configuration template. The value must start with '## template:jinja' to enable use of templating support. - """), + """ + ), }, }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'minProperties': 1, # If we have config, define something - 'additionalProperties': False + "minProperties": 1, # If we have config, define something + "additionalProperties": False, }, }, - 'additionalProperties': False + "additionalProperties": False, } - } + }, } -REQUIRED_NTP_CONFIG_KEYS = frozenset([ - 'check_exe', 'confpath', 'packages', 'service_name']) +REQUIRED_NTP_CONFIG_KEYS = frozenset( + ["check_exe", "confpath", "packages", "service_name"] +) __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -334,21 +364,23 @@ def select_ntp_client(ntp_client, distro): distro_cfg = distro_ntp_client_configs(distro.name) # user specified client, return its config - if ntp_client and ntp_client != 'auto': - LOG.debug('Selected NTP client "%s" via user-data configuration', - ntp_client) + if ntp_client and ntp_client != "auto": + LOG.debug( + 'Selected NTP client "%s" via user-data configuration', ntp_client + ) return distro_cfg.get(ntp_client, {}) # default to auto if unset in distro - distro_ntp_client = distro.get_option('ntp_client', 'auto') + distro_ntp_client = distro.get_option("ntp_client", "auto") clientcfg = {} if distro_ntp_client == "auto": for client in distro.preferred_ntp_clients: cfg = distro_cfg.get(client) - if subp.which(cfg.get('check_exe')): - LOG.debug('Selected NTP client "%s", already installed', - client) + if subp.which(cfg.get("check_exe")): + LOG.debug( + 'Selected NTP client "%s", already installed', client + ) clientcfg = cfg break @@ -356,11 +388,14 @@ def select_ntp_client(ntp_client, distro): client = distro.preferred_ntp_clients[0] LOG.debug( 'Selected distro preferred NTP client "%s", not yet installed', - client) + client, + ) clientcfg = distro_cfg.get(client) else: - LOG.debug('Selected NTP client "%s" via distro system config', - distro_ntp_client) + LOG.debug( + 'Selected NTP client "%s" via distro system config', + distro_ntp_client, + ) clientcfg = distro_cfg.get(distro_ntp_client, {}) return clientcfg @@ -378,7 +413,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"): if subp.which(check_exe): return if packages is None: - packages = ['ntp'] + packages = ["ntp"] install_func(packages) @@ -403,25 +438,34 @@ def generate_server_names(distro): names = [] pool_distro = distro - if distro == 'sles': + if distro == "sles": # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool - pool_distro = 'opensuse' - elif distro == 'alpine' or distro == 'eurolinux': + pool_distro = "opensuse" + elif distro == "alpine" or distro == "eurolinux": # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist # so use general x.pool.ntp.org instead. The same applies to EuroLinux - pool_distro = '' + pool_distro = "" for x in range(0, NR_POOL_SERVERS): - names.append(".".join( - [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + names.append( + ".".join( + [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n] + ) + ) return names -def write_ntp_config_template(distro_name, service_name=None, servers=None, - pools=None, path=None, template_fn=None, - template=None): +def write_ntp_config_template( + distro_name, + service_name=None, + servers=None, + pools=None, + path=None, + template_fn=None, + template=None, +): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @@ -444,27 +488,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None, if not pools: pools = [] - if (len(servers) == 0 and distro_name == 'alpine' and - service_name == 'ntpd'): + if ( + len(servers) == 0 + and distro_name == "alpine" + and service_name == "ntpd" + ): # Alpine's Busybox ntpd only understands "servers" configuration # and not "pool" configuration. servers = generate_server_names(distro_name) - LOG.debug( - 'Adding distro default ntp servers: %s', ','.join(servers)) + LOG.debug("Adding distro default ntp servers: %s", ",".join(servers)) elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( - 'Adding distro default ntp pool servers: %s', ','.join(pools)) + "Adding distro default ntp pool servers: %s", ",".join(pools) + ) if not path: - raise ValueError('Invalid value for path parameter') + raise ValueError("Invalid value for path parameter") if not template_fn and not template: - raise ValueError('Not template_fn or template provided') + raise ValueError("Not template_fn or template provided") - params = {'servers': servers, 'pools': pools} + params = {"servers": servers, "pools": pools} if template: - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) @@ -487,50 +534,62 @@ def supplemental_schema_validation(ntp_config): errors = [] missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) if missing: - keys = ', '.join(sorted(missing)) + keys = ", ".join(sorted(missing)) errors.append( - 'Missing required ntp:config keys: {keys}'.format(keys=keys)) - elif not any([ntp_config.get('template'), - ntp_config.get('template_name')]): + "Missing required ntp:config keys: {keys}".format(keys=keys) + ) + elif not any( + [ntp_config.get("template"), ntp_config.get("template_name")] + ): errors.append( - 'Either ntp:config:template or ntp:config:template_name values' - ' are required') + "Either ntp:config:template or ntp:config:template_name values" + " are required" + ) for key, value in sorted(ntp_config.items()): - keypath = 'ntp:config:' + key - if key == 'confpath': + keypath = "ntp:config:" + key + if key == "confpath": if not all([value, isinstance(value, str)]): errors.append( - 'Expected a config file path {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key == 'packages': + "Expected a config file path {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key == "packages": if not isinstance(value, list): errors.append( - 'Expected a list of required package names for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key in ('template', 'template_name'): + "Expected a list of required package names for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key in ("template", "template_name"): if value is None: # Either template or template_name can be none continue if not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) elif not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}. Found ({value})".format( + keypath=keypath, value=value + ) + ) if errors: - raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( - errors='\n'.join(errors))) + raise ValueError( + r"Invalid ntp configuration:\n{errors}".format( + errors="\n".join(errors) + ) + ) def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" - if 'ntp' not in cfg: + if "ntp" not in cfg: LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) + "Skipping module named %s, not present or disabled by cfg", name + ) return - ntp_cfg = cfg['ntp'] + ntp_cfg = cfg["ntp"] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package @@ -538,52 +597,61 @@ def handle(name, cfg, cloud, log, _args): if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)) + ) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable - enabled = ntp_cfg.get('enabled', True) + enabled = ntp_cfg.get("enabled", True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration - ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), - cloud.distro) + ntp_client_config = select_ntp_client( + ntp_cfg.get("ntp_client"), cloud.distro + ) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( - [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + [ntp_client_config, ntp_cfg.get("config", {})], reverse=True + ) supplemental_schema_validation(ntp_client_config) - rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + rename_ntp_conf(confpath=ntp_client_config.get("confpath")) template_fn = None - if not ntp_client_config.get('template'): - template_name = ( - ntp_client_config.get('template_name').replace('{distro}', - cloud.distro.name)) + if not ntp_client_config.get("template"): + template_name = ntp_client_config.get("template_name").replace( + "{distro}", cloud.distro.name + ) template_fn = cloud.get_template_filename(template_name) if not template_fn: - msg = ('No template found, not rendering %s' % - ntp_client_config.get('template_name')) + msg = ( + "No template found, not rendering %s" + % ntp_client_config.get("template_name") + ) raise RuntimeError(msg) - write_ntp_config_template(cloud.distro.name, - service_name=ntp_client_config.get( - 'service_name'), - servers=ntp_cfg.get('servers', []), - pools=ntp_cfg.get('pools', []), - path=ntp_client_config.get('confpath'), - template_fn=template_fn, - template=ntp_client_config.get('template')) - - install_ntp_client(cloud.distro.install_packages, - packages=ntp_client_config['packages'], - check_exe=ntp_client_config['check_exe']) + write_ntp_config_template( + cloud.distro.name, + service_name=ntp_client_config.get("service_name"), + servers=ntp_cfg.get("servers", []), + pools=ntp_cfg.get("pools", []), + path=ntp_client_config.get("confpath"), + template_fn=template_fn, + template=ntp_client_config.get("template"), + ) + + install_ntp_client( + cloud.distro.install_packages, + packages=ntp_client_config["packages"], + check_exe=ntp_client_config["check_exe"], + ) try: - cloud.distro.manage_service('reload', - ntp_client_config.get('service_name')) + cloud.distro.manage_service( + "reload", ntp_client_config.get("service_name") + ) except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 036baf85..14cdfab8 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -43,8 +43,7 @@ import os import time from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util REBOOT_FILE = "/var/run/reboot-required" REBOOT_CMD = ["/sbin/reboot"] @@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): log.debug("Rebooted, but still running after %s seconds", int(elapsed)) # If we got here, not good elapsed = time.time() - start - raise RuntimeError(("Reboot did not happen" - " after %s seconds!") % (int(elapsed))) + raise RuntimeError( + "Reboot did not happen after %s seconds!" % (int(elapsed)) + ) def handle(_name, cfg, cloud, log, _args): # Handle the old style + new config names - update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') - upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') - reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', - 'package_reboot_if_required') - pkglist = util.get_cfg_option_list(cfg, 'packages', []) + update = _multi_cfg_bool_get(cfg, "apt_update", "package_update") + upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade") + reboot_if_required = _multi_cfg_bool_get( + cfg, "apt_reboot_if_required", "package_reboot_if_required" + ) + pkglist = util.get_cfg_option_list(cfg, "packages", []) errors = [] if update or len(pkglist) or upgrade: @@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warning("Rebooting after upgrade or install per " - "%s", REBOOT_FILE) + log.warning( + "Rebooting after upgrade or install per %s", REBOOT_FILE + ) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warning("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning( + "%s failed with exceptions, re-raising the last one", len(errors) + ) raise errors[-1] + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 733c3910..cc1fe53e 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,22 +41,19 @@ keys to post. Available keys are: tries: 10 """ -from cloudinit import templater -from cloudinit import url_helper -from cloudinit import util - +from cloudinit import templater, url_helper, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE POST_LIST_ALL = [ - 'pub_key_dsa', - 'pub_key_rsa', - 'pub_key_ecdsa', - 'pub_key_ed25519', - 'instance_id', - 'hostname', - 'fqdn' + "pub_key_dsa", + "pub_key_rsa", + "pub_key_ecdsa", + "pub_key_ed25519", + "instance_id", + "hostname", + "fqdn", ] @@ -74,48 +71,58 @@ def handle(name, cfg, cloud, log, args): if len(args) != 0: ph_cfg = util.read_conf(args[0]) else: - if 'phone_home' not in cfg: - log.debug(("Skipping module named %s, " - "no 'phone_home' configuration found"), name) + if "phone_home" not in cfg: + log.debug( + "Skipping module named %s, " + "no 'phone_home' configuration found", + name, + ) return - ph_cfg = cfg['phone_home'] - - if 'url' not in ph_cfg: - log.warning(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + ph_cfg = cfg["phone_home"] + + if "url" not in ph_cfg: + log.warning( + "Skipping module named %s, " + "no 'url' found in 'phone_home' configuration", + name, + ) return - url = ph_cfg['url'] - post_list = ph_cfg.get('post', 'all') - tries = ph_cfg.get('tries') + url = ph_cfg["url"] + post_list = ph_cfg.get("post", "all") + tries = ph_cfg.get("tries") try: tries = int(tries) except Exception: tries = 10 - util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + util.logexc( + log, + "Configuration entry 'tries' is not an integer, using %s instead", + tries, + ) if post_list == "all": post_list = POST_LIST_ALL all_keys = {} - all_keys['instance_id'] = cloud.get_instance_id() - all_keys['hostname'] = cloud.get_hostname() - all_keys['fqdn'] = cloud.get_hostname(fqdn=True) + all_keys["instance_id"] = cloud.get_instance_id() + all_keys["hostname"] = cloud.get_hostname() + all_keys["fqdn"] = cloud.get_hostname(fqdn=True) pubkeys = { - 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', - 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', - 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', - 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', + "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub", + "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub", + "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub", + "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } for (n, path) in pubkeys.items(): try: all_keys[n] = util.load_file(path) except Exception: - util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + util.logexc( + log, "%s: failed to open, can not phone home that data!", path + ) submit_keys = {} for k in post_list: @@ -123,28 +130,37 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warning(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning( + "Requested key %s from 'post'" + " configuration list not available", + k, + ) # Get them read to be posted real_submit_keys = {} for (k, v) in submit_keys.items(): if v is None: - real_submit_keys[k] = 'N/A' + real_submit_keys[k] = "N/A" else: real_submit_keys[k] = str(v) # Incase the url is parameterized url_params = { - 'INSTANCE_ID': all_keys['instance_id'], + "INSTANCE_ID": all_keys["instance_id"], } url = templater.render_string(url, url_params) try: url_helper.read_file_or_url( - url, data=real_submit_keys, retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url, + data=real_submit_keys, + retries=tries, + sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths), + ) except Exception: - util.logexc(log, "Failed to post phone home data to %s in %s tries", - url, tries) + util.logexc( + log, "Failed to post phone home data to %s in %s tries", url, tries + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 5780a7e9..d4eb68c0 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -58,9 +58,8 @@ import re import subprocess import time +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE @@ -75,9 +74,9 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = subp.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(["procstat", "-c", str(pid)]) line = output.splitlines()[1] - m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) @@ -106,8 +105,9 @@ def check_condition(cond, log=None): return False else: if log: - log.warning(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning( + pre + "unexpected exit %s. " % ret + "do not apply change." + ) return False except Exception as e: if log: @@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args): devnull_fp = open(os.devnull, "w") - log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, - condition, execmd, [args, devnull_fp]) + util.fork_cb( + run_after_pid_gone, + mypid, + cmdline, + timeout, + log, + condition, + execmd, + [args, devnull_fp], + ) def load_power_state(cfg, distro): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found - pstate = cfg.get('power_state') + pstate = cfg.get("power_state") if pstate is None: return (None, None, None) @@ -155,22 +163,25 @@ def load_power_state(cfg, distro): if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") - modes_ok = ['halt', 'poweroff', 'reboot'] + modes_ok = ["halt", "poweroff", "reboot"] mode = pstate.get("mode") if mode not in distro.shutdown_options_map: raise TypeError( - "power_state[mode] required, must be one of: %s. found: '%s'." % - (','.join(modes_ok), mode)) + "power_state[mode] required, must be one of: %s. found: '%s'." + % (",".join(modes_ok), mode) + ) - args = distro.shutdown_command(mode=mode, - delay=pstate.get("delay", "now"), - message=pstate.get("message")) + args = distro.shutdown_command( + mode=mode, + delay=pstate.get("delay", "now"), + message=pstate.get("message"), + ) try: - timeout = float(pstate.get('timeout', 30.0)) + timeout = float(pstate.get("timeout", 30.0)) except ValueError as e: raise ValueError( - "failed to convert timeout '%s' to float." % pstate['timeout'] + "failed to convert timeout '%s' to float." % pstate["timeout"] ) from e condition = pstate.get("condition", True) @@ -186,8 +197,12 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): ret = 1 try: - proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDOUT) + proc = subprocess.Popen( + exe_args, + stdin=subprocess.PIPE, + stdout=output, + stderr=subprocess.STDOUT, + ) proc.communicate(data_in) ret = proc.returncode except Exception: @@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): except Exception as e: fatal("Unexpected Exception: %s" % e) - time.sleep(.25) + time.sleep(0.25) if not msg: fatal("Unexpected error in run_after_pid_gone") @@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): func(*args) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index dc20fc44..f51f49bc 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -108,23 +108,20 @@ key (by default the agent will execute with the ``--test`` flag). import os import socket -import yaml from io import StringIO -from cloudinit import helpers -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util -from cloudinit import url_helper +import yaml -AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 -PUPPET_AGENT_DEFAULT_ARGS = ['--test'] +from cloudinit import helpers, subp, temp_utils, url_helper, util +AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ["--test"] -class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, - csr_attributes_path, log): +class PuppetConstants(object): + def __init__( + self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log + ): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") @@ -134,18 +131,27 @@ class PuppetConstants(object): def _autostart_puppet(log): # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - subp.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - subp.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + if os.path.exists("/etc/default/puppet"): + subp.subp( + [ + "sed", + "-i", + "-e", + "s/^START=.*/START=yes/", + "/etc/default/puppet", + ], + capture=False, + ) + elif os.path.exists("/bin/systemctl"): + subp.subp( + ["/bin/systemctl", "enable", "puppet.service"], capture=False + ) + elif os.path.exists("/sbin/chkconfig"): + subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False) else: - log.warning(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning( + "Sorry we do not know how to enable puppet services on this system" + ) def get_config_value(puppet_bin, setting): @@ -153,12 +159,13 @@ def get_config_value(puppet_bin, setting): :param puppet_bin: path to puppet binary :param setting: setting to query """ - out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + out, _ = subp.subp([puppet_bin, "config", "print", setting]) return out.rstrip() -def install_puppet_aio(url=AIO_INSTALL_URL, version=None, - collection=None, cleanup=True): +def install_puppet_aio( + url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True +): """Install puppet-agent from the puppetlabs repositories using the one-shot shell script @@ -169,62 +176,70 @@ def install_puppet_aio(url=AIO_INSTALL_URL, version=None, """ args = [] if version is not None: - args = ['-v', version] + args = ["-v", version] if collection is not None: - args += ['-c', collection] + args += ["-c", collection] # Purge puppetlabs repos after installation if cleanup: - args += ['--cleanup'] + args += ["--cleanup"] content = url_helper.readurl(url=url, retries=5).contents # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: - tmpf = os.path.join(tmpd, 'puppet-install') + tmpf = os.path.join(tmpd, "puppet-install") util.write_file(tmpf, content, mode=0o700) return subp.subp([tmpf] + args, capture=False) def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything - if 'puppet' not in cfg: - log.debug(("Skipping module named %s," - " no 'puppet' configuration found"), name) + if "puppet" not in cfg: + log.debug( + "Skipping module named %s, no 'puppet' configuration found", name + ) return - puppet_cfg = cfg['puppet'] + puppet_cfg = cfg["puppet"] # Start by installing the puppet package if necessary... - install = util.get_cfg_option_bool(puppet_cfg, 'install', True) - version = util.get_cfg_option_str(puppet_cfg, 'version', None) - collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install = util.get_cfg_option_bool(puppet_cfg, "install", True) + version = util.get_cfg_option_str(puppet_cfg, "version", None) + collection = util.get_cfg_option_str(puppet_cfg, "collection", None) install_type = util.get_cfg_option_str( - puppet_cfg, 'install_type', 'packages') - cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) - run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) - start_puppetd = util.get_cfg_option_bool(puppet_cfg, - 'start_service', - default=True) + puppet_cfg, "install_type", "packages" + ) + cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True) + run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False) + start_puppetd = util.get_cfg_option_bool( + puppet_cfg, "start_service", default=True + ) aio_install_url = util.get_cfg_option_str( - puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) + puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL + ) # AIO and distro packages use different paths - if install_type == 'aio': - puppet_user = 'root' - puppet_bin = '/opt/puppetlabs/bin/puppet' - puppet_package = 'puppet-agent' + if install_type == "aio": + puppet_user = "root" + puppet_bin = "/opt/puppetlabs/bin/puppet" + puppet_package = "puppet-agent" else: # default to 'packages' - puppet_user = 'puppet' - puppet_bin = 'puppet' - puppet_package = 'puppet' + puppet_user = "puppet" + puppet_bin = "puppet" + puppet_package = "puppet" package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', puppet_package) + puppet_cfg, "package_name", puppet_package + ) if not install and version: - log.warning(("Puppet install set to false but version supplied," - " doing nothing.")) + log.warning( + "Puppet install set to false but version supplied, doing nothing." + ) elif install: - log.debug(("Attempting to install puppet %s from %s"), - version if version else 'latest', install_type) + log.debug( + "Attempting to install puppet %s from %s", + version if version else "latest", + install_type, + ) if install_type == "packages": cloud.distro.install_packages((package_name, version)) @@ -235,17 +250,21 @@ def handle(name, cfg, cloud, log, _args): run = False conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + puppet_cfg, "conf_file", get_config_value(puppet_bin, "config") + ) ssl_dir = util.get_cfg_option_str( - puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir") + ) csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', - get_config_value(puppet_bin, 'csr_attributes')) + puppet_cfg, + "csr_attributes_path", + get_config_value(puppet_bin, "csr_attributes"), + ) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration - if 'conf' in puppet_cfg: + if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values @@ -254,30 +273,31 @@ def handle(name, cfg, cloud, log, _args): # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] - cleaned_contents = '\n'.join(cleaned_lines) + cleaned_contents = "\n".join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.read_file( - StringIO(cleaned_contents), - source=p_constants.conf_path) - for (cfg_name, cfg) in puppet_cfg['conf'].items(): + StringIO(cleaned_contents), source=p_constants.conf_path + ) + for (cfg_name, cfg) in puppet_cfg["conf"].items(): # Cert configuration is a special case # Dump the puppetserver ca certificate in the correct place - if cfg_name == 'ca_cert': + if cfg_name == "ca_cert": # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, "root") util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root") util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, - puppet_user, 'root') + util.chownbyname( + p_constants.ssl_cert_path, puppet_user, "root" + ) else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): - if o == 'certname': + if o == "certname": # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) @@ -288,14 +308,16 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - util.rename(p_constants.conf_path, "%s.old" - % (p_constants.conf_path)) + util.rename( + p_constants.conf_path, "%s.old" % (p_constants.conf_path) + ) util.write_file(p_constants.conf_path, puppet_config.stringify()) - if 'csr_attributes' in puppet_cfg: - util.write_file(p_constants.csr_attributes_path, - yaml.dump(puppet_cfg['csr_attributes'], - default_flow_style=False)) + if "csr_attributes" in puppet_cfg: + util.write_file( + p_constants.csr_attributes_path, + yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False), + ) # Set it up so it autostarts if start_puppetd: @@ -303,18 +325,21 @@ def handle(name, cfg, cloud, log, _args): # Run the agent if needed if run: - log.debug('Running puppet-agent') - cmd = [puppet_bin, 'agent'] - if 'exec_args' in puppet_cfg: - cmd_args = puppet_cfg['exec_args'] + log.debug("Running puppet-agent") + cmd = [puppet_bin, "agent"] + if "exec_args" in puppet_cfg: + cmd_args = puppet_cfg["exec_args"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.extend(cmd_args.split()) else: - log.warning("Unknown type %s provided for puppet" - " 'exec_args' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) else: cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) @@ -322,6 +347,7 @@ def handle(name, cfg, cloud, log, _args): if start_puppetd: # Start puppetd - subp.subp(['service', 'puppet', 'start'], capture=False) + subp.subp(["service", "puppet", "start"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py index d5e0ecb2..87be5348 100644 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ b/cloudinit/config/cc_refresh_rmc_and_interface.py @@ -34,20 +34,18 @@ This module handles """ +import errno + from cloudinit import log as logging +from cloudinit import netinfo, subp, util from cloudinit.settings import PER_ALWAYS -from cloudinit import util -from cloudinit import subp -from cloudinit import netinfo - -import errno frequency = PER_ALWAYS LOG = logging.getLogger(__name__) # Ensure that /opt/rsct/bin has been added to standard PATH of the # distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl . -RMCCTRL = 'rmcctrl' +RMCCTRL = "rmcctrl" def handle(name, _cfg, _cloud, _log, _args): @@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args): return LOG.debug( - 'Making the IPv6 up explicitly. ' - 'Ensuring IPv6 interface is not being handled by NetworkManager ' - 'and it is restarted to re-establish the communication with ' - 'the hypervisor') + "Making the IPv6 up explicitly. " + "Ensuring IPv6 interface is not being handled by NetworkManager " + "and it is restarted to re-establish the communication with " + "the hypervisor" + ) ifaces = find_ipv6_ifaces() @@ -80,7 +79,7 @@ def find_ipv6_ifaces(): ifaces = [] for iface, data in info.items(): if iface == "lo": - LOG.debug('Skipping localhost interface') + LOG.debug("Skipping localhost interface") if len(data.get("ipv4", [])) != 0: # skip this interface, as it has ipv4 addrs continue @@ -92,16 +91,16 @@ def refresh_ipv6(interface): # IPv6 interface is explicitly brought up, subsequent to which the # RMC services are restarted to re-establish the communication with # the hypervisor. - subp.subp(['ip', 'link', 'set', interface, 'down']) - subp.subp(['ip', 'link', 'set', interface, 'up']) + subp.subp(["ip", "link", "set", interface, "down"]) + subp.subp(["ip", "link", "set", interface, "up"]) def sysconfig_path(iface): - return '/etc/sysconfig/network-scripts/ifcfg-' + iface + return "/etc/sysconfig/network-scripts/ifcfg-" + iface def restart_network_manager(): - subp.subp(['systemctl', 'restart', 'NetworkManager']) + subp.subp(["systemctl", "restart", "NetworkManager"]) def disable_ipv6(iface_file): @@ -113,12 +112,11 @@ def disable_ipv6(iface_file): contents = util.load_file(iface_file) except IOError as e: if e.errno == errno.ENOENT: - LOG.debug("IPv6 interface file %s does not exist\n", - iface_file) + LOG.debug("IPv6 interface file %s does not exist\n", iface_file) else: raise e - if 'IPV6INIT' not in contents: + if "IPV6INIT" not in contents: LOG.debug("Interface file %s did not have IPV6INIT", iface_file) return @@ -135,11 +133,12 @@ def disable_ipv6(iface_file): def search(contents): # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file. - return( - contents.startswith("IPV6ADDR") or - contents.startswith("IPADDR6") or - contents.startswith("IPV6INIT") or - contents.startswith("NM_CONTROLLED")) + return ( + contents.startswith("IPV6ADDR") + or contents.startswith("IPADDR6") + or contents.startswith("IPV6INIT") + or contents.startswith("NM_CONTROLLED") + ) def refresh_rmc(): @@ -152,8 +151,8 @@ def refresh_rmc(): # until the subsystem and all resource managers are stopped. # -s : start Resource Monitoring & Control subsystem. try: - subp.subp([RMCCTRL, '-z']) - subp.subp([RMCCTRL, '-s']) + subp.subp([RMCCTRL, "-z"]) + subp.subp([RMCCTRL, "-s"]) except Exception: - util.logexc(LOG, 'Failed to refresh the RMC subsystem.') + util.logexc(LOG, "Failed to refresh the RMC subsystem.") raise diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py index 1cd72774..3b929903 100644 --- a/cloudinit/config/cc_reset_rmc.py +++ b/cloudinit/config/cc_reset_rmc.py @@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages. import os from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp frequency = PER_INSTANCE @@ -49,34 +48,34 @@ frequency = PER_INSTANCE # The symlink for RMCCTRL and RECFGCT are # /usr/sbin/rsct/bin/rmcctrl and # /usr/sbin/rsct/install/bin/recfgct respectively. -RSCT_PATH = '/opt/rsct/install/bin' -RMCCTRL = 'rmcctrl' -RECFGCT = 'recfgct' +RSCT_PATH = "/opt/rsct/install/bin" +RMCCTRL = "rmcctrl" +RECFGCT = "recfgct" LOG = logging.getLogger(__name__) -NODE_ID_FILE = '/etc/ct_node_id' +NODE_ID_FILE = "/etc/ct_node_id" def handle(name, _cfg, cloud, _log, _args): # Ensuring node id has to be generated only once during first boot - if cloud.datasource.platform_type == 'none': - LOG.debug('Skipping creation of new ct_node_id node') + if cloud.datasource.platform_type == "none": + LOG.debug("Skipping creation of new ct_node_id node") return if not os.path.isdir(RSCT_PATH): LOG.debug("module disabled, RSCT_PATH not present") return - orig_path = os.environ.get('PATH') + orig_path = os.environ.get("PATH") try: add_path(orig_path) reset_rmc() finally: if orig_path: - os.environ['PATH'] = orig_path + os.environ["PATH"] = orig_path else: - del os.environ['PATH'] + del os.environ["PATH"] def reconfigure_rsct_subsystems(): @@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems(): LOG.debug(out.strip()) return out except subp.ProcessExecutionError: - util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.') + util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.") raise def get_node_id(): try: fp = util.load_file(NODE_ID_FILE) - node_id = fp.split('\n')[0] + node_id = fp.split("\n")[0] return node_id except Exception: - util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE) + util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE) raise @@ -107,25 +106,25 @@ def add_path(orig_path): # So thet cloud init automatically find and # run RECFGCT to create new node_id. suff = ":" + orig_path if orig_path else "" - os.environ['PATH'] = RSCT_PATH + suff - return os.environ['PATH'] + os.environ["PATH"] = RSCT_PATH + suff + return os.environ["PATH"] def rmcctrl(): # Stop the RMC subsystem and all resource managers so that we can make # some changes to it try: - return subp.subp([RMCCTRL, '-z']) + return subp.subp([RMCCTRL, "-z"]) except Exception: - util.logexc(LOG, 'Failed to stop the RMC subsystem.') + util.logexc(LOG, "Failed to stop the RMC subsystem.") raise def reset_rmc(): - LOG.debug('Attempting to reset RMC.') + LOG.debug("Attempting to reset RMC.") node_id_before = get_node_id() - LOG.debug('Node ID at beginning of module: %s', node_id_before) + LOG.debug("Node ID at beginning of module: %s", node_id_before) # Stop the RMC subsystem and all resource managers so that we can make # some changes to it @@ -133,11 +132,11 @@ def reset_rmc(): reconfigure_rsct_subsystems() node_id_after = get_node_id() - LOG.debug('Node ID at end of module: %s', node_id_after) + LOG.debug("Node ID at end of module: %s", node_id_after) # Check if new node ID is generated or not # by comparing old and new node ID if node_id_after == node_id_before: - msg = 'New node ID did not get generated.' + msg = "New node ID did not get generated." LOG.error(msg) raise Exception(msg) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 00bb7ae7..b009c392 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,21 +13,21 @@ import os import stat from textwrap import dedent +from cloudinit import subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import util NOBLOCK = "noblock" frequency = PER_ALWAYS -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_resizefs', - 'name': 'Resizefs', - 'title': 'Resize filesystem', - 'description': dedent("""\ + "id": "cc_resizefs", + "name": "Resizefs", + "title": "Resize filesystem", + "description": dedent( + """\ Resize a filesystem to use all avaliable space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized @@ -36,22 +36,26 @@ meta = { running. Optionally, the resize operation can be performed in the background while cloud-init continues running modules. This can be enabled by setting ``resize_rootfs`` to ``true``. This module can be - disabled altogether by setting ``resize_rootfs`` to ``false``."""), - 'distros': distros, - 'examples': [ - 'resize_rootfs: false # disable root filesystem resize operation'], - 'frequency': PER_ALWAYS, + disabled altogether by setting ``resize_rootfs`` to ``false``.""" + ), + "distros": distros, + "examples": [ + "resize_rootfs: false # disable root filesystem resize operation" + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'resize_rootfs': { - 'enum': [True, False, NOBLOCK], - 'description': dedent("""\ - Whether to resize the root partition. Default: 'true'""") + "type": "object", + "properties": { + "resize_rootfs": { + "enum": [True, False, NOBLOCK], + "description": dedent( + """\ + Whether to resize the root partition. Default: 'true'""" + ), } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -63,32 +67,38 @@ def _resize_btrfs(mount_point, devpth): # Use a subvolume that is not ro to trick the resize operation to do the # "right" thing. The use of ".snapshot" is specific to "snapper" a generic # solution would be walk the subvolumes and find a rw mounted subvolume. - if (not util.mount_is_read_write(mount_point) and - os.path.isdir("%s/.snapshots" % mount_point)): - return ('btrfs', 'filesystem', 'resize', 'max', - '%s/.snapshots' % mount_point) + if not util.mount_is_read_write(mount_point) and os.path.isdir( + "%s/.snapshots" % mount_point + ): + return ( + "btrfs", + "filesystem", + "resize", + "max", + "%s/.snapshots" % mount_point, + ) else: - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + return ("btrfs", "filesystem", "resize", "max", mount_point) def _resize_ext(mount_point, devpth): - return ('resize2fs', devpth) + return ("resize2fs", devpth) def _resize_xfs(mount_point, devpth): - return ('xfs_growfs', mount_point) + return ("xfs_growfs", mount_point) def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', mount_point) + return ("growfs", "-y", mount_point) def _resize_zfs(mount_point, devpth): - return ('zpool', 'online', '-e', mount_point, devpth) + return ("zpool", "online", "-e", mount_point, devpth) def _resize_hammer2(mount_point, devpth): - return ('hammer2', 'growfs', mount_point) + return ("hammer2", "growfs", mount_point) def _can_skip_resize_ufs(mount_point, devpth): @@ -100,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): # growfs exits with 1 for almost all cases up to this one. # This means we can't just use rcs=[0, 1] as subp parameter: try: - subp.subp(['growfs', '-N', devpth]) + subp.subp(["growfs", "-N", devpth]) except subp.ProcessExecutionError as e: if e.stderr.startswith(skip_start) and skip_contain in e.stderr: # This FS is already at the desired size @@ -114,17 +124,15 @@ def _can_skip_resize_ufs(mount_point, devpth): # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('btrfs', _resize_btrfs), - ('ext', _resize_ext), - ('xfs', _resize_xfs), - ('ufs', _resize_ufs), - ('zfs', _resize_zfs), - ('hammer2', _resize_hammer2), + ("btrfs", _resize_btrfs), + ("ext", _resize_ext), + ("xfs", _resize_xfs), + ("ufs", _resize_ufs), + ("zfs", _resize_zfs), + ("hammer2", _resize_hammer2), ] -RESIZE_FS_PRECHECK_CMDS = { - 'ufs': _can_skip_resize_ufs -} +RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs} def can_skip_resize(fs_type, resize_what, devpth): @@ -148,52 +156,66 @@ def maybe_get_writable_device_path(devpath, info, log): container = util.is_container() # Ensure the path is a block device. - if (devpath == "/dev/root" and not os.path.exists(devpath) and - not container): + if ( + devpath == "/dev/root" + and not os.path.exists(devpath) + and not container + ): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) - if devpath == 'overlayroot': + if devpath == "overlayroot": log.debug("Not attempting to resize devpath '%s': %s", devpath, info) return None # FreeBSD zpool can also just use gpt/