From 3c551f6ebc12f7729a2755c89b19b9000e27cc88 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 8 Jun 2020 12:49:12 -0400 Subject: Move subp into its own module. (#416) This was painful, but it finishes a TODO from cloudinit/subp.py. It moves the following from util to subp: ProcessExecutionError subp which target_path I moved subp_blob_in_tempfile into cc_chef, which is its only caller. That saved us from having to deal with it using write_file and temp_utils from subp (which does not import any cloudinit things now). It is arguable that 'target_path' could be moved to a 'path_utils' or something, but in order to use it from subp and also from utils, we had to get it out of utils. --- cloudinit/config/cc_power_state_change.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'cloudinit/config/cc_power_state_change.py') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 3e81a3c7..41ffb46c 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -56,6 +56,7 @@ import subprocess import time from cloudinit.settings import PER_INSTANCE +from cloudinit import subp from cloudinit import util frequency = PER_INSTANCE @@ -71,7 +72,7 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = util.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) return m.group(2) -- cgit v1.2.3 From 79a8ce7e714ae1686c10bff77612eab0f6eccc95 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Thu, 20 Aug 2020 00:18:25 +0100 Subject: Add Alpine Linux support. (#535) Add new module cc_apk_configure for creating Alpine /etc/apk/repositories file. Modify cc_ca_certs, cc_ntp, cc_power_state_change, and cc_resolv_conf for Alpine. Add Alpine template files for Chrony and Busybox NTP support. Add Alpine template file for /etc/hosts. --- README.md | 2 +- cloudinit/config/cc_apk_configure.py | 263 ++++++++++++++++++ cloudinit/config/cc_ca_certs.py | 22 +- cloudinit/config/cc_ntp.py | 61 ++++- cloudinit/config/cc_power_state_change.py | 57 +++- cloudinit/config/cc_resolv_conf.py | 4 +- cloudinit/distros/__init__.py | 7 +- cloudinit/distros/alpine.py | 165 ++++++++++++ cloudinit/util.py | 3 +- config/cloud.cfg.tmpl | 21 +- doc/rtd/topics/availability.rst | 13 +- doc/rtd/topics/instancedata.rst | 1 + doc/rtd/topics/modules.rst | 1 + doc/rtd/topics/network-config.rst | 2 +- templates/chrony.conf.alpine.tmpl | 38 +++ templates/hosts.alpine.tmpl | 28 ++ templates/ntp.conf.alpine.tmpl | 10 + tests/unittests/test_cli.py | 2 +- .../test_handler/test_handler_apk_configure.py | 299 +++++++++++++++++++++ .../test_handler/test_handler_ca_certs.py | 11 +- tests/unittests/test_handler/test_handler_ntp.py | 129 ++++++--- .../test_handler/test_handler_power_state.py | 29 +- tests/unittests/test_handler/test_schema.py | 1 + tools/render-cloudcfg | 5 +- 24 files changed, 1068 insertions(+), 106 deletions(-) create mode 100644 cloudinit/config/cc_apk_configure.py create mode 100644 cloudinit/distros/alpine.py create mode 100644 templates/chrony.conf.alpine.tmpl create mode 100644 templates/hosts.alpine.tmpl create mode 100644 templates/ntp.conf.alpine.tmpl create mode 100644 tests/unittests/test_handler/test_handler_apk_configure.py (limited to 'cloudinit/config/cc_power_state_change.py') diff --git a/README.md b/README.md index a3455135..435405da 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Ubuntu
SLES/openSUSE
RHEL/CentOS
Fedora
Gentoo Linux
Debian
ArchLinux
FreeBSD
NetBSD
OpenBSD










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py new file mode 100644 index 00000000..84d7a0b6 --- /dev/null +++ b/cloudinit/config/cc_apk_configure.py @@ -0,0 +1,263 @@ +# Copyright (c) 2020 Dermot Bradley +# +# Author: Dermot Bradley +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Apk Configure: Configures apk repositories file.""" + +from textwrap import dedent + +from cloudinit import log as logging +from cloudinit import temp_utils +from cloudinit import templater +from cloudinit import util +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + + +# If no mirror is specified then use this one +DEFAULT_MIRROR = "https://alpine.global.ssl.fastly.net/alpine" + + +REPOSITORIES_TEMPLATE = """\ +## template:jinja +# +# Created by cloud-init +# +# This file is written on first boot of an instance +# + +{{ alpine_baseurl }}/{{ alpine_version }}/main +{% if community_enabled -%} +{{ alpine_baseurl }}/{{ alpine_version }}/community +{% endif -%} +{% if testing_enabled -%} +{% if alpine_version != 'edge' %} +# +# Testing - using with non-Edge installation may cause problems! +# +{% endif %} +{{ alpine_baseurl }}/edge/testing +{% endif %} +{% if local_repo != '' %} + +# +# Local repo +# +{{ local_repo }}/{{ alpine_version }} +{% endif %} + +""" + + +frequency = PER_INSTANCE +distros = ['alpine'] +schema = { + 'id': 'cc_apk_configure', + 'name': 'APK Configure', + 'title': 'Configure apk repositories file', + 'description': dedent("""\ + This module handles configuration of the /etc/apk/repositories file. + + .. note:: + To ensure that apk configuration is valid yaml, any strings + containing special characters, especially ``:`` should be quoted. + """), + 'distros': distros, + 'examples': [ + dedent("""\ + # Keep the existing /etc/apk/repositories file unaltered. + apk_repos: + preserve_repositories: true + """), + dedent("""\ + # Create repositories file for Alpine v3.12 main and community + # using default mirror site. + apk_repos: + alpine_repo: + community_enabled: true + version: 'v3.12' + """), + dedent("""\ + # Create repositories file for Alpine Edge main, community, and + # testing using a specified mirror site and also a local repo. + apk_repos: + alpine_repo: + base_url: 'https://some-alpine-mirror/alpine' + community_enabled: true + testing_enabled: true + version: 'edge' + local_repo_base_url: 'https://my-local-server/local-alpine' + """), + ], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'apk_repos': { + 'type': 'object', + 'properties': { + 'preserve_repositories': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + By default, cloud-init will generate a new repositories + file ``/etc/apk/repositories`` based on any valid + configuration settings specified within a apk_repos + section of cloud config. To disable this behavior and + preserve the repositories file from the pristine image, + set ``preserve_repositories`` to ``true``. + + The ``preserve_repositories`` option overrides + all other config keys that would alter + ``/etc/apk/repositories``. + """) + }, + 'alpine_repo': { + 'type': ['object', 'null'], + 'properties': { + 'base_url': { + 'type': 'string', + 'default': DEFAULT_MIRROR, + 'description': dedent("""\ + The base URL of an Alpine repository, or + mirror, to download official packages from. + If not specified then it defaults to ``{}`` + """.format(DEFAULT_MIRROR)) + }, + 'community_enabled': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + Whether to add the Community repo to the + repositories file. By default the Community + repo is not included. + """) + }, + 'testing_enabled': { + 'type': 'boolean', + 'default': False, + 'description': dedent("""\ + Whether to add the Testing repo to the + repositories file. By default the Testing + repo is not included. It is only recommended + to use the Testing repo on a machine running + the ``Edge`` version of Alpine as packages + installed from Testing may have dependancies + that conflict with those in non-Edge Main or + Community repos." + """) + }, + 'version': { + 'type': 'string', + 'description': dedent("""\ + The Alpine version to use (e.g. ``v3.12`` or + ``edge``) + """) + }, + }, + 'required': ['version'], + 'minProperties': 1, + 'additionalProperties': False, + }, + 'local_repo_base_url': { + 'type': 'string', + 'description': dedent("""\ + The base URL of an Alpine repository containing + unofficial packages + """) + } + }, + 'required': [], + 'minProperties': 1, # Either preserve_repositories or alpine_repo + 'additionalProperties': False, + } + } +} + +__doc__ = get_schema_doc(schema) + + +def handle(name, cfg, cloud, log, _args): + """ + Call to handle apk_repos sections in cloud-config file. + + @param name: The module name "apk-configure" from cloud.cfg + @param cfg: A nested dict containing the entire cloud config contents. + @param cloud: The CloudInit object in use. + @param log: Pre-initialized Python logger object to use for logging. + @param _args: Any module arguments from cloud.cfg + """ + + # If there is no "apk_repos" section in the configuration + # then do nothing. + apk_section = cfg.get('apk_repos') + if not apk_section: + LOG.debug(("Skipping module named %s," + " no 'apk_repos' section found"), name) + return + + validate_cloudconfig_schema(cfg, schema) + + # If "preserve_repositories" is explicitly set to True in + # the configuration do nothing. + if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): + LOG.debug(("Skipping module named %s," + " 'preserve_repositories' is set"), name) + return + + # If there is no "alpine_repo" subsection of "apk_repos" present in the + # configuration then do nothing, as at least "version" is required to + # create valid repositories entries. + alpine_repo = apk_section.get('alpine_repo') + if not alpine_repo: + LOG.debug(("Skipping module named %s," + " no 'alpine_repo' configuration found"), name) + return + + # If there is no "version" value present in configuration then do nothing. + alpine_version = alpine_repo.get('version') + if not alpine_version: + LOG.debug(("Skipping module named %s," + " 'version' not specified in alpine_repo"), name) + return + + local_repo = apk_section.get('local_repo_base_url', '') + + _write_repositories_file(alpine_repo, alpine_version, local_repo) + + +def _write_repositories_file(alpine_repo, alpine_version, local_repo): + """ + Write the /etc/apk/repositories file with the specified entries. + + @param alpine_repo: A nested dict of the alpine_repo configuration. + @param alpine_version: A string of the Alpine version to use. + @param local_repo: A string containing the base URL of a local repo. + """ + + repo_file = '/etc/apk/repositories' + + alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + + params = {'alpine_baseurl': alpine_baseurl, + 'alpine_version': alpine_version, + 'community_enabled': alpine_repo.get('community_enabled'), + 'testing_enabled': alpine_repo.get('testing_enabled'), + 'local_repo': local_repo} + + tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + template_fn = tfile[1] # Filepath is second item in tuple + util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) + + LOG.debug('Generating Alpine repository configuration file: %s', + repo_file) + templater.render_to_file(template_fn, repo_file, params) + # Clean up temporary template + util.del_file(template_fn) + + +# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 910b78de..3c453d91 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -16,11 +16,16 @@ can be removed from the system with the configuration option certificates must be specified using valid yaml. in order to specify a multiline certificate, the yaml multiline list syntax must be used +.. note:: + For Alpine Linux the "remove-defaults" functionality works if the + ca-certificates package is installed but not if the + ca-certificates-bundle package is installed. + **Internal name:** ``cc_ca_certs`` **Module frequency:** per instance -**Supported distros:** ubuntu, debian +**Supported distros:** alpine, debian, ubuntu **Config keys**:: @@ -45,7 +50,7 @@ CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) -distros = ['ubuntu', 'debian'] +distros = ['alpine', 'debian', 'ubuntu'] def update_ca_certs(): @@ -83,7 +88,7 @@ def add_ca_certs(certs): util.write_file(CA_CERT_CONFIG, out, omode="wb") -def remove_default_ca_certs(): +def remove_default_ca_certs(distro_name): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. @@ -91,11 +96,14 @@ def remove_default_ca_certs(): util.delete_dir_contents(CA_CERT_PATH) util.delete_dir_contents(CA_CERT_SYSTEM_PATH) util.write_file(CA_CERT_CONFIG, "", mode=0o644) - debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" - subp.subp(('debconf-set-selections', '-'), debconf_sel) + + if distro_name != 'alpine': + debconf_sel = ( + "ca-certificates ca-certificates/trust_new_crts " + "select no") + subp.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, _cloud, log, _args): +def handle(name, cfg, cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -117,7 +125,7 @@ def handle(name, cfg, _cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs() + remove_default_ca_certs(cloud.distro.name) # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 7d3f73ff..3d7279d6 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,7 +24,8 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 -distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu'] +distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel', + 'sles', 'ubuntu'] NTP_CLIENT_CONFIG = { 'chrony': { @@ -63,6 +64,17 @@ NTP_CLIENT_CONFIG = { # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { + 'alpine': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'packages': [], + 'service_name': 'ntpd', + }, + }, 'debian': { 'chrony': { 'confpath': '/etc/chrony/chrony.conf', @@ -114,11 +126,11 @@ schema = { Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the - distro's ntp package, it will be copied to ``/etc/ntp.conf.dist`` - before any changes are made. A list of ntp pools and ntp servers can - be provided under the ``ntp`` config key. If no ntp ``servers`` or - ``pools`` are provided, 4 pools will be used in the format - ``{0-3}.{distro}.pool.ntp.org``."""), + distro's ntp package, it will be copied to a file with ``.dist`` + appended to the filename before any changes are made. A list of ntp + pools and ntp servers can be provided under the ``ntp`` config key. + If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used + in the format ``{0-3}.{distro}.pool.ntp.org``."""), 'distros': distros, 'examples': [ dedent("""\ @@ -171,7 +183,10 @@ schema = { 'description': dedent("""\ List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of - the format ``{0-3}.{distro}.pool.ntp.org``.""") + the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: + for Alpine Linux when using the Busybox NTP client + this setting will be ignored due to the limited + functionality of Busybox's ntpd.""") }, 'servers': { 'type': 'array', @@ -364,21 +379,30 @@ def generate_server_names(distro): """ names = [] pool_distro = distro - # For legal reasons x.pool.sles.ntp.org does not exist, - # use the opensuse pool + if distro == 'sles': + # For legal reasons x.pool.sles.ntp.org does not exist, + # use the opensuse pool pool_distro = 'opensuse' + elif distro == 'alpine': + # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist + # so use general x.pool.ntp.org instead. + pool_distro = '' + for x in range(0, NR_POOL_SERVERS): - name = "%d.%s.pool.ntp.org" % (x, pool_distro) - names.append(name) + names.append(".".join( + [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + return names -def write_ntp_config_template(distro_name, servers=None, pools=None, - path=None, template_fn=None, template=None): +def write_ntp_config_template(distro_name, service_name=None, servers=None, + pools=None, path=None, template_fn=None, + template=None): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. + @param service_name: string. The name of the NTP client service. @param servers: A list of strings specifying ntp servers. Defaults to empty list. @param pools: A list of strings specifying ntp pools. Defaults to empty @@ -397,7 +421,14 @@ def write_ntp_config_template(distro_name, servers=None, pools=None, if not pools: pools = [] - if len(servers) == 0 and len(pools) == 0: + if (len(servers) == 0 and distro_name == 'alpine' and + service_name == 'ntpd'): + # Alpine's Busybox ntpd only understands "servers" configuration + # and not "pool" configuration. + servers = generate_server_names(distro_name) + LOG.debug( + 'Adding distro default ntp servers: %s', ','.join(servers)) + elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( 'Adding distro default ntp pool servers: %s', ','.join(pools)) @@ -532,6 +563,8 @@ def handle(name, cfg, cloud, log, _args): raise RuntimeError(msg) write_ntp_config_template(cloud.distro.name, + service_name=ntp_client_config.get( + 'service_name'), servers=ntp_cfg.get('servers', []), pools=ntp_cfg.get('pools', []), path=ntp_client_config.get('confpath'), diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 41ffb46c..ab953a0d 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -22,9 +22,8 @@ The ``delay`` key specifies a duration to be added onto any shutdown command used. Therefore, if a 5 minute delay and a 120 second shutdown are specified, the maximum amount of time between cloud-init starting and the system shutting down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay`` -key must have an argument in a form that the ``shutdown`` utility recognizes. -The most common format is the form ``+5`` for 5 minutes. See ``man shutdown`` -for more options. +key must have an argument in either the form ``+5`` for 5 minutes or ``now`` +for immediate shutdown. Optionally, a command can be run to determine whether or not the system should shut down. The command to be run should be specified in the @@ -33,6 +32,10 @@ the system should shut down. The command to be run should be specified in the ``condition`` key is omitted or the command specified by the ``condition`` key returns 0. +.. note:: + With Alpine Linux any message value specified is ignored as Alpine's halt, + poweroff, and reboot commands do not support broadcasting a message. + **Internal name:** ``cc_power_state_change`` **Module frequency:** per instance @@ -112,9 +115,9 @@ def check_condition(cond, log=None): return False -def handle(_name, cfg, _cloud, log, _args): +def handle(_name, cfg, cloud, log, _args): try: - (args, timeout, condition) = load_power_state(cfg) + (args, timeout, condition) = load_power_state(cfg, cloud.distro.name) if args is None: log.debug("no power_state provided. doing nothing") return @@ -141,7 +144,19 @@ def handle(_name, cfg, _cloud, log, _args): condition, execmd, [args, devnull_fp]) -def load_power_state(cfg): +def convert_delay(delay, fmt=None, scale=None): + if not fmt: + fmt = "+%s" + if not scale: + scale = 1 + + if delay != "now": + delay = fmt % int(int(delay) * int(scale)) + + return delay + + +def load_power_state(cfg, distro_name): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found pstate = cfg.get('power_state') @@ -161,20 +176,34 @@ def load_power_state(cfg): (','.join(opt_map.keys()), mode)) delay = pstate.get("delay", "now") - # convert integer 30 or string '30' to '+30' + message = pstate.get("message") + scale = 1 + fmt = "+%s" + command = ["shutdown", opt_map[mode]] + + if distro_name == 'alpine': + # Convert integer 30 or string '30' to '1800' (seconds) as Alpine's + # halt/poweroff/reboot commands take seconds rather than minutes. + scale = 60 + # No "+" in front of delay value as not supported by Alpine's commands. + fmt = "%s" + if delay == "now": + # Alpine's commands do not understand "now". + delay = "0" + command = [mode, "-d"] + # Alpine's commands don't support a message. + message = None + try: - delay = "+%s" % int(delay) + delay = convert_delay(delay, fmt=fmt, scale=scale) except ValueError: - pass - - if delay != "now" and not re.match(r"\+[0-9]+", delay): raise TypeError( "power_state[delay] must be 'now' or '+m' (minutes)." " found '%s'." % delay) - args = ["shutdown", opt_map[mode], delay] - if pstate.get("message"): - args.append(pstate.get("message")) + args = command + [delay] + if message: + args.append(message) try: timeout = float(pstate.get('timeout', 30.0)) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 69f4768a..519e66eb 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -30,7 +30,7 @@ are configured correctly. **Module frequency:** per instance -**Supported distros:** fedora, rhel, sles +**Supported distros:** alpine, fedora, rhel, sles **Config keys**:: @@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -distros = ['fedora', 'opensuse', 'rhel', 'sles'] +distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles'] def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index c7163e1c..effb4276 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -40,12 +40,13 @@ from .networking import LinuxNetworking ALL_DISTROS = 'all' OSFAMILIES = { + 'alpine': ['alpine'], + 'arch': ['arch'], 'debian': ['debian', 'ubuntu'], - 'redhat': ['amazon', 'centos', 'fedora', 'rhel'], - 'gentoo': ['gentoo'], 'freebsd': ['freebsd'], + 'gentoo': ['gentoo'], + 'redhat': ['amazon', 'centos', 'fedora', 'rhel'], 'suse': ['opensuse', 'sles'], - 'arch': ['arch'], } LOG = logging.getLogger(__name__) diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py new file mode 100644 index 00000000..e42443fc --- /dev/null +++ b/cloudinit/distros/alpine.py @@ -0,0 +1,165 @@ +# Copyright (C) 2016 Matt Dainty +# Copyright (C) 2020 Dermot Bradley +# +# Author: Matt Dainty +# Author: Dermot Bradley +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import subp +from cloudinit import util + +from cloudinit.distros.parsers.hostname import HostnameConf + +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + +NETWORK_FILE_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} + +""" + + +class Distro(distros.Distro): + init_cmd = ['rc-service'] # init scripts + locale_conf_fn = "/etc/profile.d/locale.sh" + network_conf_fn = "/etc/network/interfaces" + renderer_configs = { + "eni": {"eni_path": network_conf_fn, + "eni_header": NETWORK_FILE_HEADER} + } + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + self.default_locale = 'C.UTF-8' + self.osfamily = 'alpine' + cfg['ssh_svcname'] = 'sshd' + + def get_locale(self): + """The default locale for Alpine Linux is different than + cloud-init's DataSource default. + """ + return self.default_locale + + def apply_locale(self, locale, out_fn=None): + # Alpine has limited locale support due to musl library limitations + + if not locale: + locale = self.default_locale + if not out_fn: + out_fn = self.locale_conf_fn + + lines = [ + "#", + "# This file is created by cloud-init once per new instance boot", + "#", + "export CHARSET=UTF-8", + "export LANG=%s" % locale, + "export LC_COLLATE=C", + "", + ] + util.write_file(out_fn, "\n".join(lines), 0o644) + + def install_packages(self, pkglist): + self.update_package_sources() + self.package_command('add', pkgs=pkglist) + + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + + def _bring_up_interfaces(self, device_names): + use_all = False + for d in device_names: + if d == 'all': + use_all = True + if use_all: + return distros.Distro._bring_up_interface(self, '-a') + else: + return distros.Distro._bring_up_interfaces(self, device_names) + + def _write_hostname(self, your_hostname, out_fn): + conf = None + try: + # Try to update the previous one + # so lets see if we can read it first. + conf = self._read_hostname_conf(out_fn) + except IOError: + pass + if not conf: + conf = HostnameConf('') + conf.set_hostname(your_hostname) + util.write_file(out_fn, str(conf), 0o644) + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname_conf(self, filename): + conf = HostnameConf(util.load_file(filename)) + conf.parse() + return conf + + def _read_hostname(self, filename, default=None): + hostname = None + try: + conf = self._read_hostname_conf(filename) + hostname = conf.hostname + except IOError: + pass + if not hostname: + return default + return hostname + + def _get_localhost_ip(self): + return "127.0.1.1" + + def set_timezone(self, tz): + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + cmd = ['apk'] + # Redirect output + cmd.append("--quiet") + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + if command: + cmd.append(command) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + subp.subp(cmd, capture=False) + + def update_package_sources(self): + self._runner.run("update-sources", self.package_command, + ["update"], freq=PER_INSTANCE) + + @property + def preferred_ntp_clients(self): + """Allow distro to determine the preferred ntp client list""" + if not self._preferred_ntp_clients: + self._preferred_ntp_clients = ['chrony', 'ntp'] + + return self._preferred_ntp_clients + +# vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index edd37039..dd263803 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -548,7 +548,8 @@ def system_info(): if system == "linux": linux_dist = info['dist'][0].lower() if linux_dist in ( - 'arch', 'centos', 'debian', 'fedora', 'rhel', 'suse'): + 'alpine', 'arch', 'centos', 'debian', 'fedora', 'rhel', + 'suse'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index b44cbce7..2beb9b0c 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -21,7 +21,7 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["amazon", "centos", "fedora", "rhel"] %} +{% if variant in ["alpine", "amazon", "centos", "fedora", "rhel"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -71,6 +71,9 @@ cloud_init_modules: - set_hostname - update_hostname - update_etc_hosts +{% if variant in ["alpine"] %} + - resolv_conf +{% endif %} {% if not variant.endswith("bsd") %} - ca-certs - rsyslog @@ -104,6 +107,9 @@ cloud_config_modules: {% if variant in ["suse"] %} - zypper-add-repo {% endif %} +{% if variant in ["alpine"] %} + - apk-configure +{% endif %} {% if variant not in ["freebsd", "netbsd"] %} - ntp {% endif %} @@ -145,7 +151,9 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu"] %} +{% if variant in ["alpine", "amazon", "arch", "centos", "debian", + "fedora", "freebsd", "netbsd", "openbsd", "rhel", + "suse", "ubuntu"] %} distro: {{ variant }} {% else %} # Unknown/fallback distro. @@ -196,7 +204,8 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["amazon", "arch", "centos", "fedora", "rhel", "suse"] %} +{% elif variant in ["alpine", "amazon", "arch", "centos", "fedora", + "rhel", "suse"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} @@ -210,13 +219,19 @@ system_info: {% endif %} {% if variant == "suse" %} groups: [cdrom, users] +{% elif variant == "alpine" %} + groups: [adm, sudo] {% elif variant == "arch" %} groups: [wheel, users] {% else %} groups: [wheel, adm, systemd-journal] {% endif %} sudo: ["ALL=(ALL) NOPASSWD:ALL"] +{% if variant == "alpine" %} + shell: /bin/ash +{% else %} shell: /bin/bash +{% endif %} # Other config here will be given to the distro class and/or path classes paths: cloud_dir: /var/lib/cloud/ diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index 84490460..8f56a7d2 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -17,16 +17,17 @@ Distributions Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD and OpenBSD: -- Ubuntu -- SLES/openSUSE -- RHEL/CentOS -- Fedora -- Gentoo Linux -- Debian +- Alpine Linux - ArchLinux +- Debian +- Fedora - FreeBSD +- Gentoo Linux - NetBSD - OpenBSD +- RHEL/CentOS +- SLES/openSUSE +- Ubuntu Clouds ====== diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 845098bb..255245a4 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -132,6 +132,7 @@ This shall be the distro name, version and release as determined by Example output: +- alpine, 3.12.0, '' - centos, 7.5, core - debian, 9, stretch - freebsd, 12.0-release-p10, diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index 9c9be804..e30fe0fe 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -6,6 +6,7 @@ Modules ******* .. contents:: Table of Contents +.. automodule:: cloudinit.config.cc_apk_configure .. automodule:: cloudinit.config.cc_apt_configure .. automodule:: cloudinit.config.cc_apt_pipelining .. automodule:: cloudinit.config.cc_bootcmd diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 8eeadebf..08db04d8 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -165,7 +165,7 @@ supported formats. The following ``renderers`` are supported in cloud-init: - **ENI** /etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package -found in Ubuntu and Debian. +found in Alpine Linux, Debian and Ubuntu. - **Netplan** diff --git a/templates/chrony.conf.alpine.tmpl b/templates/chrony.conf.alpine.tmpl new file mode 100644 index 00000000..45efc18c --- /dev/null +++ b/templates/chrony.conf.alpine.tmpl @@ -0,0 +1,38 @@ +## template:jinja +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usable directives. +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# This directive specifies the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specifies the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 diff --git a/templates/hosts.alpine.tmpl b/templates/hosts.alpine.tmpl new file mode 100644 index 00000000..33c1a941 --- /dev/null +++ b/templates/hosts.alpine.tmpl @@ -0,0 +1,28 @@ +## template:jinja +{# +This file /etc/cloud/templates/hosts.alpine.tmpl is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.alpine.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.1.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost.localdomain localhost +127.0.0.1 localhost4.localdomain4 localhost4 + +# The following lines are desirable for IPv6 capable hosts +::1 {{fqdn}} {{hostname}} +::1 localhost6.localdomain6 localhost6 + +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +ff02::3 ip6-allhosts diff --git a/templates/ntp.conf.alpine.tmpl b/templates/ntp.conf.alpine.tmpl new file mode 100644 index 00000000..59ca8fc1 --- /dev/null +++ b/templates/ntp.conf.alpine.tmpl @@ -0,0 +1,10 @@ +## template:jinja +# /etc/ntp.conf +# +# Configuration for Busybox ntpd - it only supports "server" lines. + +{% if servers %}# Servers +{% endif %} +{% for server in servers -%} +server {{server}} +{% endfor %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 43d996b9..dcf0fe5a 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -224,7 +224,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) expected_doc_sections = [ '**Supported distros:** all', - '**Supported distros:** centos, debian, fedora', + '**Supported distros:** alpine, centos, debian, fedora', '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/test_handler/test_handler_apk_configure.py new file mode 100644 index 00000000..8acc0b33 --- /dev/null +++ b/tests/unittests/test_handler/test_handler_apk_configure.py @@ -0,0 +1,299 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +""" test_apk_configure +Test creation of repositories file +""" + +import logging +import os +import textwrap + +from cloudinit import (cloud, helpers, util) + +from cloudinit.config import cc_apk_configure +from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) + +REPO_FILE = "/etc/apk/repositories" +DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine" +CC_APK = 'cloudinit.config.cc_apk_configure' + + +class TestNoConfig(FilesystemMockingTestCase): + def setUp(self): + super(TestNoConfig, self).setUp() + self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos') + self.name = "apk-configure" + self.cloud_init = None + self.log = logging.getLogger("TestNoConfig") + self.args = [] + + def test_no_config(self): + """ + Test that nothing is done if no apk-configure + configuration is provided. + """ + config = util.get_builtin_cfg() + + cc_apk_configure.handle(self.name, config, self.cloud_init, + self.log, self.args) + + self.assertEqual(0, self.m_write_repos.call_count) + + +class TestConfig(FilesystemMockingTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.new_root = self.tmp_dir() + self.new_root = self.reRoot(root=self.new_root) + for dirname in ['tmp', 'etc/apk']: + util.ensure_dir(os.path.join(self.new_root, dirname)) + self.paths = helpers.Paths({'templates_dir': self.new_root}) + self.name = "apk-configure" + self.cloud = cloud.Cloud(None, self.paths, None, None, None) + self.log = logging.getLogger("TestNoConfig") + self.args = [] + + @mock.patch(CC_APK + '._write_repositories_file') + def test_no_repo_settings(self, m_write_repos): + """ + Test that nothing is written if the 'alpine-repo' key + is not present. + """ + config = {"apk_repos": {}} + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + self.assertEqual(0, m_write_repos.call_count) + + @mock.patch(CC_APK + '._write_repositories_file') + def test_empty_repo_settings(self, m_write_repos): + """ + Test that nothing is written if 'alpine_repo' list is empty. + """ + config = {"apk_repos": {"alpine_repo": []}} + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + self.assertEqual(0, m_write_repos.call_count) + + def test_only_main_repo(self): + """ + Test when only details of main repo is written to file. + """ + alpine_version = 'v3.12' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_main_and_community_repos(self): + """ + Test when only details of main and community repos are + written to file. + """ + alpine_version = 'edge' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_main_community_testing_repos(self): + """ + Test when details of main, community and testing repos + are written to file. + """ + alpine_version = 'v3.12' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + # + # Testing - using with non-Edge installation may cause problems! + # + {0}/edge/testing + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_edge_main_community_testing_repos(self): + """ + Test when details of main, community and testing repos + for Edge version of Alpine are written to file. + """ + alpine_version = 'edge' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + {0}/{1}/testing + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_main_community_testing_local_repos(self): + """ + Test when details of main, community, testing and + local repos are written to file. + """ + alpine_version = 'v3.12' + local_repo_url = 'http://some.mirror/whereever' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + }, + "local_repo_base_url": local_repo_url + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + # + # Testing - using with non-Edge installation may cause problems! + # + {0}/edge/testing + + # + # Local repo + # + {2}/{1} + + """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_edge_main_community_testing_local_repos(self): + """ + Test when details of main, community, testing and local repos + for Edge version of Alpine are written to file. + """ + alpine_version = 'edge' + local_repo_url = 'http://some.mirror/whereever' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + }, + "local_repo_base_url": local_repo_url + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + {0}/edge/testing + + # + # Local repo + # + {2}/{1} + + """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index c1aff181..e74a0a08 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import cloud +from cloudinit import distros from cloudinit.config import cc_ca_certs from cloudinit import helpers from cloudinit import subp @@ -46,8 +47,9 @@ class TestConfig(TestCase): def setUp(self): super(TestConfig, self).setUp() self.name = "ca-certs" + distro = self._fetch_distro('ubuntu') self.paths = None - self.cloud = cloud.Cloud(None, self.paths, None, None, None) + self.cloud = cloud.Cloud(None, self.paths, None, distro, None) self.log = logging.getLogger("TestNoConfig") self.args = [] @@ -62,6 +64,11 @@ class TestConfig(TestCase): self.mock_remove = self.mocks.enter_context( mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + def test_no_trusted_list(self): """ Test that no certificates are written if the 'trusted' key is not @@ -275,7 +282,7 @@ class TestRemoveDefaultCaCerts(TestCase): mock.patch.object(util, 'write_file')) mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp')) - cc_ca_certs.remove_default_ca_certs() + cc_ca_certs.remove_default_ca_certs('ubuntu') mock_delete.assert_has_calls([ mock.call("/usr/share/ca-certificates/"), diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 92a33ec1..6b9c8377 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -239,6 +239,35 @@ class TestNtp(FilesystemMockingTestCase): self.assertEqual(delta[distro][client][key], result[client][key]) + def _get_expected_pools(self, pools, distro, client): + if client in ['ntp', 'chrony']: + if client == 'ntp' and distro == 'alpine': + # NTP for Alpine Linux is Busybox's ntp which does not + # support 'pool' lines in its configuration file. + expected_pools = [] + else: + expected_pools = [ + 'pool {0} iburst'.format(pool) for pool in pools] + elif client == 'systemd-timesyncd': + expected_pools = " ".join(pools) + + return expected_pools + + def _get_expected_servers(self, servers, distro, client): + if client in ['ntp', 'chrony']: + if client == 'ntp' and distro == 'alpine': + # NTP for Alpine Linux is Busybox's ntp which only supports + # 'server' lines without iburst option. + expected_servers = [ + 'server {0}'.format(srv) for srv in servers] + else: + expected_servers = [ + 'server {0} iburst'.format(srv) for srv in servers] + elif client == 'systemd-timesyncd': + expected_servers = " ".join(servers) + + return expected_servers + def test_ntp_handler_real_distro_ntp_templates(self): """Test ntp handler renders the shipped distro ntp client templates.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] @@ -269,27 +298,35 @@ class TestNtp(FilesystemMockingTestCase): content = util.load_file(confpath) if client in ['ntp', 'chrony']: content_lines = content.splitlines() - expected_servers = [ - 'server {0} iburst'.format(srv) for srv in servers] + expected_servers = self._get_expected_servers(servers, + distro, + client) print('distro=%s client=%s' % (distro, client)) for sline in expected_servers: self.assertIn(sline, content_lines, ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) - expected_pools = [ - 'pool {0} iburst'.format(pool) for pool in pools] - for pline in expected_pools: - self.assertIn(pline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) + expected_pools = self._get_expected_pools(pools, distro, + client) + if expected_pools != []: + for pline in expected_pools: + self.assertIn(pline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) elif client == 'systemd-timesyncd': + expected_servers = self._get_expected_servers(servers, + distro, + client) + expected_pools = self._get_expected_pools(pools, + distro, + client) expected_content = ( "# cloud-init generated file\n" + "# See timesyncd.conf(5) for details.\n\n" + - "[Time]\nNTP=%s %s \n" % (" ".join(servers), - " ".join(pools))) + "[Time]\nNTP=%s %s \n" % (expected_servers, + expected_pools)) self.assertEqual(expected_content, content) def test_no_ntpcfg_does_nothing(self): @@ -312,10 +349,20 @@ class TestNtp(FilesystemMockingTestCase): confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) - pools = cc_ntp.generate_server_names(mycloud.distro.name) - self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) + if distro == 'alpine': + # _mock_ntp_client_config call above did not specify a + # client value and so it defaults to "ntp" which on + # Alpine Linux only supports servers and not pools. + + servers = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers {0}\npools []\n".format(servers), + util.load_file(confpath)) + else: + pools = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers []\npools {0}\n".format(pools), + util.load_file(confpath)) self.assertNotIn('Invalid config:', self.logs.getvalue()) @skipUnlessJsonSchema() @@ -374,18 +421,19 @@ class TestNtp(FilesystemMockingTestCase): invalid_config = { 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp: Additional properties are not allowed " - "('invalidkey' was unexpected)", - self.logs.getvalue()) - self.assertEqual( - "servers []\npools ['0.mycompany.pool.ntp.org']\n", - util.load_file(confpath)) + if distro != 'alpine': + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp: Additional properties are not " + "allowed ('invalidkey' was unexpected)", + self.logs.getvalue()) + self.assertEqual( + "servers []\npools ['0.mycompany.pool.ntp.org']\n", + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -452,9 +500,22 @@ class TestNtp(FilesystemMockingTestCase): confpath = ntpconfig['confpath'] service_name = ntpconfig['service_name'] m_select.return_value = ntpconfig - pools = cc_ntp.generate_server_names(mycloud.distro.name) - # force uses systemd path - m_sysd.return_value = True + + hosts = cc_ntp.generate_server_names(mycloud.distro.name) + uses_systemd = True + expected_service_call = ['systemctl', 'reload-or-restart', + service_name] + expected_content = "servers []\npools {0}\n".format(hosts) + + if distro == 'alpine': + uses_systemd = False + expected_service_call = ['service', service_name, 'restart'] + # _mock_ntp_client_config call above did not specify a client + # value and so it defaults to "ntp" which on Alpine Linux only + # supports servers and not pools. + expected_content = "servers {0}\npools []\n".format(hosts) + + m_sysd.return_value = uses_systemd with mock.patch('cloudinit.config.cc_ntp.util') as m_util: # allow use of util.mergemanydict m_util.mergemanydict.side_effect = util.mergemanydict @@ -465,11 +526,9 @@ class TestNtp(FilesystemMockingTestCase): cfg['ntp']['enabled']) cc_ntp.handle('notimportant', cfg, mycloud, None, None) m_subp.subp.assert_called_with( - ['systemctl', 'reload-or-restart', - service_name], capture=True) - self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) + expected_service_call, capture=True) + + self.assertEqual(expected_content, util.load_file(confpath)) def test_opensuse_picks_chrony(self): """Test opensuse picks chrony or ntp on certain distro versions""" diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 0d8d17b9..93b24fdc 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -11,62 +11,63 @@ from cloudinit.tests.helpers import mock class TestLoadPowerState(t_help.TestCase): def test_no_config(self): # completely empty config should mean do nothing - (cmd, _timeout, _condition) = psc.load_power_state({}) + (cmd, _timeout, _condition) = psc.load_power_state({}, 'ubuntu') self.assertIsNone(cmd) def test_irrelevant_config(self): # no power_state field in config should return None for cmd - (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}) + (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}, + 'ubuntu') self.assertIsNone(cmd) def test_invalid_mode(self): cfg = {'power_state': {'mode': 'gibberish'}} - self.assertRaises(TypeError, psc.load_power_state, cfg) + self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu') cfg = {'power_state': {'mode': ''}} - self.assertRaises(TypeError, psc.load_power_state, cfg) + self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu') def test_empty_mode(self): cfg = {'power_state': {'message': 'goodbye'}} - self.assertRaises(TypeError, psc.load_power_state, cfg) + self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu') def test_valid_modes(self): cfg = {'power_state': {}} for mode in ('halt', 'poweroff', 'reboot'): cfg['power_state']['mode'] = mode - check_lps_ret(psc.load_power_state(cfg), mode=mode) + check_lps_ret(psc.load_power_state(cfg, 'ubuntu'), mode=mode) def test_invalid_delay(self): cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}} - self.assertRaises(TypeError, psc.load_power_state, cfg) + self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu') def test_valid_delay(self): cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} for delay in ("now", "+1", "+30"): cfg['power_state']['delay'] = delay - check_lps_ret(psc.load_power_state(cfg)) + check_lps_ret(psc.load_power_state(cfg, 'ubuntu')) def test_message_present(self): cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}} - ret = psc.load_power_state(cfg) - check_lps_ret(psc.load_power_state(cfg)) + ret = psc.load_power_state(cfg, 'ubuntu') + check_lps_ret(psc.load_power_state(cfg, 'ubuntu')) self.assertIn(cfg['power_state']['message'], ret[0]) def test_no_message(self): # if message is not present, then no argument should be passed for it cfg = {'power_state': {'mode': 'poweroff'}} - (cmd, _timeout, _condition) = psc.load_power_state(cfg) + (cmd, _timeout, _condition) = psc.load_power_state(cfg, 'ubuntu') self.assertNotIn("", cmd) - check_lps_ret(psc.load_power_state(cfg)) + check_lps_ret(psc.load_power_state(cfg, 'ubuntu')) self.assertTrue(len(cmd) == 3) def test_condition_null_raises(self): cfg = {'power_state': {'mode': 'poweroff', 'condition': None}} - self.assertRaises(TypeError, psc.load_power_state, cfg) + self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu') def test_condition_default_is_true(self): cfg = {'power_state': {'mode': 'poweroff'}} - _cmd, _timeout, cond = psc.load_power_state(cfg) + _cmd, _timeout, cond = psc.load_power_state(cfg, 'ubuntu') self.assertEqual(cond, True) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 99f0b06c..44292571 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -24,6 +24,7 @@ class GetSchemaTest(CiTestCase): schema = get_schema() self.assertCountEqual( [ + 'cc_apk_configure', 'cc_apt_configure', 'cc_bootcmd', 'cc_locale', diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 9322b2c3..ed454840 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,8 +4,9 @@ import argparse import os import sys -VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", - "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"] +VARIANTS = ["alpine", "amazon", "arch", "centos", "debian", "fedora", + "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu", + "unknown"] if "avoid-pep8-E402-import-not-top-of-file": -- cgit v1.2.3 From 07104504ab5b30efd2d1f7a8c36effe18b8e5fe0 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Tue, 25 Aug 2020 17:21:18 +0200 Subject: tox: bump the pylint version to 2.6.0 in the default run (#544) Changes: tox: bump the pylint version to 2.6.0 in the default run Fix pylint 2.6.0 W0707 warnings (raise-missing-from) --- cloudinit/analyze/show.py | 2 +- cloudinit/cmd/devel/make_mime.py | 6 ++-- cloudinit/config/cc_disk_setup.py | 36 ++++++++++++++-------- cloudinit/config/cc_growpart.py | 8 ++--- cloudinit/config/cc_power_state_change.py | 12 +++++--- cloudinit/config/cc_rsyslog.py | 6 ++-- cloudinit/config/cc_set_hostname.py | 2 +- cloudinit/config/cc_ubuntu_advantage.py | 2 +- cloudinit/config/schema.py | 2 +- cloudinit/distros/__init__.py | 5 +-- cloudinit/distros/arch.py | 4 +-- cloudinit/distros/parsers/resolv_conf.py | 7 +++-- cloudinit/gpg.py | 5 +-- cloudinit/handlers/jinja_template.py | 3 +- cloudinit/net/__init__.py | 7 +++-- cloudinit/net/cmdline.py | 4 +-- cloudinit/net/dhcp.py | 4 +-- cloudinit/net/network_state.py | 19 +++++++----- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/sources/DataSourceEc2.py | 8 +++-- cloudinit/sources/DataSourceIBMCloud.py | 3 +- cloudinit/sources/DataSourceMAAS.py | 3 +- cloudinit/sources/DataSourceOpenNebula.py | 13 +++++--- cloudinit/sources/DataSourceOpenStack.py | 4 +-- cloudinit/sources/DataSourceSmartOS.py | 4 ++- cloudinit/sources/helpers/azure.py | 34 +++++++++++--------- cloudinit/sources/helpers/netlink.py | 2 +- cloudinit/sources/helpers/openstack.py | 34 +++++++++++--------- cloudinit/subp.py | 3 +- cloudinit/url_helper.py | 6 ++-- cloudinit/util.py | 8 ++--- tests/cloud_tests/platforms/azurecloud/instance.py | 7 +++-- tests/cloud_tests/platforms/azurecloud/platform.py | 23 +++++++++----- tests/cloud_tests/platforms/ec2/instance.py | 4 +-- tests/cloud_tests/platforms/ec2/platform.py | 18 ++++++----- tests/cloud_tests/platforms/lxd/instance.py | 3 +- tests/cloud_tests/platforms/platforms.py | 6 ++-- tests/cloud_tests/testcases/__init__.py | 6 ++-- tools/mock-meta.py | 14 +++++---- tox.ini | 2 +- 40 files changed, 205 insertions(+), 136 deletions(-) (limited to 'cloudinit/config/cc_power_state_change.py') diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 0c825b23..01a4d3e5 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -267,7 +267,7 @@ def gather_timestamps_using_systemd(): except OSError as err: raise RuntimeError('Could not determine container boot ' 'time from /proc/1/cmdline. ({})' - .format(err)) + .format(err)) from err status = CONTAINER_CODE else: status = FAIL_CODE diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 77e10540..4e6a5778 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -22,8 +22,10 @@ def file_content_type(text): try: filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) - except ValueError: - raise argparse.ArgumentError(text, "Invalid value for %r" % (text)) + except ValueError as e: + raise argparse.ArgumentError( + text, "Invalid value for %r" % (text) + ) from e def get_parser(parser=None): diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index d957cfe3..a7bdc703 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -251,7 +251,9 @@ def enumerate_disk(device, nodeps=False): try: info, _err = subp.subp(lsblk_cmd) except Exception as e: - raise Exception("Failed during disk check for %s\n%s" % (device, e)) + raise Exception( + "Failed during disk check for %s\n%s" % (device, e) + ) from e parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0] @@ -313,7 +315,9 @@ def check_fs(device): try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: - raise Exception("Failed during disk check for %s\n%s" % (device, e)) + raise Exception( + "Failed during disk check for %s\n%s" % (device, e) + ) from e if out: if len(out.splitlines()) == 1: @@ -428,8 +432,8 @@ def get_dyn_func(*args): else: return globals()[func_name] - except KeyError: - raise Exception("No such function %s to call!" % func_name) + except KeyError as e: + raise Exception("No such function %s to call!" % func_name) from e def get_hdd_size(device): @@ -437,7 +441,7 @@ def get_hdd_size(device): size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) except Exception as e: - raise Exception("Failed to get %s size\n%s" % (device, e)) + raise Exception("Failed to get %s size\n%s" % (device, e)) from e return int(size_in_bytes) / int(sector_size) @@ -455,8 +459,9 @@ def check_partition_mbr_layout(device, layout): try: out, _err = subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: - raise Exception("Error running partition command on %s\n%s" % ( - device, e)) + raise Exception( + "Error running partition command on %s\n%s" % (device, e) + ) from e found_layout = [] for line in out.splitlines(): @@ -485,8 +490,9 @@ def check_partition_gpt_layout(device, layout): try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: - raise Exception("Error running partition command on %s\n%s" % ( - device, e)) + raise Exception( + "Error running partition command on %s\n%s" % (device, e) + ) from e out_lines = iter(out.splitlines()) # Skip header. Output looks like: @@ -657,8 +663,10 @@ def purge_disk(device): try: LOG.info("Purging filesystem on /dev/%s", d['name']) subp.subp(wipefs_cmd) - except Exception: - raise Exception("Failed FS purge of /dev/%s" % d['name']) + except Exception as e: + raise Exception( + "Failed FS purge of /dev/%s" % d['name'] + ) from e purge_disk_ptable(device) @@ -700,7 +708,9 @@ def exec_mkpart_mbr(device, layout): try: subp.subp(prt_cmd, data="%s\n" % layout) except Exception as e: - raise Exception("Failed to partition device %s\n%s" % (device, e)) + raise Exception( + "Failed to partition device %s\n%s" % (device, e) + ) from e read_parttbl(device) @@ -997,6 +1007,6 @@ def mkfs(fs_cfg): try: subp.subp(fs_cmd, shell=shell) except Exception as e: - raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) + raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index c5d93f81..237c3d02 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -148,14 +148,14 @@ class ResizeGrowPart(object): if e.exit_code != 1: util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", diskdev, partnum) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e return (before, before) try: subp.subp(["growpart", diskdev, partnum]) except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e return (before, get_size(partdev)) @@ -187,14 +187,14 @@ class ResizeGpart(object): except subp.ProcessExecutionError as e: if e.exit_code != 0: util.logexc(LOG, "Failed: gpart recover %s", diskdev) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e before = get_size(partdev) try: subp.subp(["gpart", "resize", "-i", partnum, diskdev]) except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) - raise ResizeFailedException(e) + raise ResizeFailedException(e) from e # Since growing the FS requires a reboot, make sure we reboot # first when this module has finished. diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index ab953a0d..6fcb8a7d 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -196,10 +196,11 @@ def load_power_state(cfg, distro_name): try: delay = convert_delay(delay, fmt=fmt, scale=scale) - except ValueError: + except ValueError as e: raise TypeError( "power_state[delay] must be 'now' or '+m' (minutes)." - " found '%s'." % delay) + " found '%s'." % delay + ) from e args = command + [delay] if message: @@ -207,9 +208,10 @@ def load_power_state(cfg, distro_name): try: timeout = float(pstate.get('timeout', 30.0)) - except ValueError: - raise ValueError("failed to convert timeout '%s' to float." % - pstate['timeout']) + except ValueError as e: + raise ValueError( + "failed to convert timeout '%s' to float." % pstate['timeout'] + ) from e condition = pstate.get("condition", True) if not isinstance(condition, (str, list, bool)): diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 1354885a..2a2bc931 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -347,8 +347,10 @@ class SyslogRemotesLine(object): if self.port: try: int(self.port) - except ValueError: - raise ValueError("port '%s' is not an integer" % self.port) + except ValueError as e: + raise ValueError( + "port '%s' is not an integer" % self.port + ) from e if not self.addr: raise ValueError("address is required") diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index c13020d8..1d23d80d 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -85,7 +85,7 @@ def handle(name, cfg, cloud, log, _args): except Exception as e: msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) util.logexc(log, msg) - raise SetHostnameError("%s: %s" % (msg, e)) + raise SetHostnameError("%s: %s" % (msg, e)) from e write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn}) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 35ded5db..d61dc655 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -115,7 +115,7 @@ def configure_ua(token=None, enable=None): msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( error=str(e)) util.logexc(LOG, msg) - raise RuntimeError(msg) + raise RuntimeError(msg) from e enable_errors = [] for service in enable: try: diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 2d8c7577..8a966aee 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -210,7 +210,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): error = SchemaValidationError(errors) if annotate: print(annotated_cloudconfig_file({}, content, error.schema_errors)) - raise error + raise error from e try: validate_cloudconfig_schema( cloudconfig, schema, strict=True) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index effb4276..2537608f 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -607,10 +607,11 @@ class Distro(metaclass=abc.ABCMeta): lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name]) try: cmd = next(tool for tool in lock_tools if subp.which(tool[0])) - except StopIteration: + except StopIteration as e: raise RuntimeError(( "Unable to lock user account '%s'. No tools available. " - " Tried: %s.") % (name, [c[0] for c in lock_tools])) + " Tried: %s.") % (name, [c[0] for c in lock_tools]) + ) from e try: subp.subp(cmd) except Exception as e: diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 038aa9ac..967be168 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -61,9 +61,9 @@ class Distro(distros.Distro): def _write_network_config(self, netconfig): try: return self._supported_write_network_config(netconfig) - except RendererNotFoundError: + except RendererNotFoundError as e: # Fall back to old _write_network - raise NotImplementedError + raise NotImplementedError from e def _write_network(self, settings): entries = net_util.translate_network(settings) diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 299d54b5..62929d03 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -150,9 +150,10 @@ class ResolvConf(object): tail = '' try: (cfg_opt, cfg_values) = head.split(None, 1) - except (IndexError, ValueError): - raise IOError("Incorrectly formatted resolv.conf line %s" - % (i + 1)) + except (IndexError, ValueError) as e: + raise IOError( + "Incorrectly formatted resolv.conf line %s" % (i + 1) + ) from e if cfg_opt not in ['nameserver', 'domain', 'search', 'sortlist', 'options']: raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index 72b5ac59..be0ca0ea 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -63,10 +63,11 @@ def recv_key(key, keyserver, retries=(1, 1)): "Import failed with exit code %d, will try again in %ss", error.exit_code, naplen) time.sleep(naplen) - except StopIteration: + except StopIteration as e: raise ValueError( ("Failed to import key '%s' from keyserver '%s' " - "after %d tries: %s") % (key, keyserver, trynum, error)) + "after %d tries: %s") % (key, keyserver, trynum, error) + ) from e def delete_key(key): diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py index ce3accf6..aadfbf86 100644 --- a/cloudinit/handlers/jinja_template.py +++ b/cloudinit/handlers/jinja_template.py @@ -83,7 +83,8 @@ def render_jinja_payload_from_file( if e.errno == EACCES: raise RuntimeError( 'Cannot render jinja template vars. No read permission on' - " '%s'. Try sudo" % instance_data_file) + " '%s'. Try sudo" % instance_data_file + ) from e rendered_payload = render_jinja_payload( payload, payload_fn, instance_data, debug) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 322af77b..e233149a 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -506,7 +506,9 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): try: _rename_interfaces(extract_physdevs(netcfg)) except RuntimeError as e: - raise RuntimeError('Failed to apply network config names: %s' % e) + raise RuntimeError( + 'Failed to apply network config names: %s' % e + ) from e def interface_has_own_mac(ifname, strict=False): @@ -965,7 +967,8 @@ class EphemeralIPv4Network(object): self.prefix = mask_to_net_prefix(prefix_or_mask) except ValueError as e: raise ValueError( - 'Cannot setup network: {0}'.format(e)) + 'Cannot setup network: {0}'.format(e) + ) from e self.connectivity_url = connectivity_url self.interface = interface diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 7ca7262b..cc8dc17b 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -112,8 +112,8 @@ def _klibc_to_config_entry(content, mac_addrs=None): data = util.load_shell_content(content) try: name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6'] - except KeyError: - raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") + except KeyError as e: + raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") from e # ipconfig on precise does not write PROTO # IPv6 config gives us IPV6PROTO, not PROTO. diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 9230cf7a..4394c68b 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -82,8 +82,8 @@ class EphemeralDHCPv4(object): try: leases = maybe_perform_dhcp_discovery( self.iface, self.dhcp_log_func) - except InvalidDHCPLeaseFileError: - raise NoDHCPLeaseError() + except InvalidDHCPLeaseFileError as e: + raise NoDHCPLeaseError() from e if not leases: raise NoDHCPLeaseError() self.lease = leases[-1] diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 7bfe8be0..b2f7d31e 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -297,9 +297,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): command_type = command['type'] try: handler = self.command_handlers[command_type] - except KeyError: - raise RuntimeError("No handler found for" - " command '%s'" % command_type) + except KeyError as e: + raise RuntimeError( + "No handler found for command '%s'" % command_type + ) from e try: handler(self, command) except InvalidCommand: @@ -316,9 +317,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): continue try: handler = self.command_handlers[command_type] - except KeyError: - raise RuntimeError("No handler found for" - " command '%s'" % command_type) + except KeyError as e: + raise RuntimeError( + "No handler found for command '%s'" % command_type + ) from e try: handler(self, command) self._v2_common(command) @@ -914,9 +916,10 @@ def _normalize_route(route): if metric: try: normal_route['metric'] = int(metric) - except ValueError: + except ValueError as e: raise TypeError( - 'Route config metric {} is not an integer'.format(metric)) + 'Route config metric {} is not an integer'.format(metric) + ) from e return normal_route diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 86cc7c28..f3c6452b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1147,7 +1147,7 @@ def read_azure_ovf(contents): except Exception as e: error_str = "Invalid ovf-env.xml: %s" % e report_diagnostic_event(error_str) - raise BrokenAzureDataSource(error_str) + raise BrokenAzureDataSource(error_str) from e results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 355b4e2f..1d09c12a 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -617,9 +617,11 @@ def parse_strict_mode(cfgval): if sleep: try: sleep = int(sleep) - except ValueError: - raise ValueError("Invalid sleep '%s' in strict_id setting '%s': " - "not an integer" % (sleep, cfgval)) + except ValueError as e: + raise ValueError( + "Invalid sleep '%s' in strict_id setting '%s': not an integer" + % (sleep, cfgval) + ) from e else: sleep = None diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index d2aa9a58..8d196185 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -303,7 +303,8 @@ def read_md(): except sources.BrokenMetadata as e: raise RuntimeError( "Failed reading IBM config disk (platform=%s path=%s): %s" % - (platform, path, e)) + (platform, path, e) + ) from e ret.update(results) return ret diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c80f70c2..9156925f 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -226,7 +226,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, except url_helper.UrlError as e: if e.code == 404 and not optional: raise MAASSeedDirMalformed( - "Missing required %s: %s" % (path, e)) + "Missing required %s: %s" % (path, e) + ) from e elif e.code != 404: raise e diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 12b1f94f..45481938 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -399,18 +399,23 @@ def read_context_disk_dir(source_dir, distro, asuser=None): if asuser is not None: try: pwd.getpwnam(asuser) - except KeyError: + except KeyError as e: raise BrokenContextDiskDir( "configured user '{user}' does not exist".format( - user=asuser)) + user=asuser) + ) from e try: path = os.path.join(source_dir, 'context.sh') content = util.load_file(path) context = parse_shell_config(content, asuser=asuser) except subp.ProcessExecutionError as e: - raise BrokenContextDiskDir("Error processing context.sh: %s" % (e)) + raise BrokenContextDiskDir( + "Error processing context.sh: %s" % (e) + ) from e except IOError as e: - raise NonContextDiskDir("Error reading context.sh: %s" % (e)) + raise NonContextDiskDir( + "Error reading context.sh: %s" % (e) + ) from e else: raise NonContextDiskDir("Missing context.sh") diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index bf539091..d4b43f44 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -194,10 +194,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): 'timeout': url_params.timeout_seconds}) except openstack.NonReadable as e: raise sources.InvalidMetaDataException(str(e)) - except (openstack.BrokenMetadata, IOError): + except (openstack.BrokenMetadata, IOError) as e: msg = 'Broken metadata address {addr}'.format( addr=self.metadata_address) - raise sources.InvalidMetaDataException(msg) + raise sources.InvalidMetaDataException(msg) from e return result diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 843b3a2a..f1f903bc 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -413,7 +413,9 @@ class JoyentMetadataClient(object): response.append(byte) except OSError as exc: if exc.errno == errno.EAGAIN: - raise JoyentMetadataTimeoutException(msg % as_ascii()) + raise JoyentMetadataTimeoutException( + msg % as_ascii() + ) from exc raise def _write(self, msg): diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6156c75b..b968a96f 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -98,8 +98,10 @@ def get_boot_telemetry(): LOG.debug("Collecting boot telemetry") try: kernel_start = float(time.time()) - float(util.uptime()) - except ValueError: - raise RuntimeError("Failed to determine kernel start timestamp") + except ValueError as e: + raise RuntimeError( + "Failed to determine kernel start timestamp" + ) from e try: out, _ = subp.subp(['/bin/systemctl', @@ -116,12 +118,13 @@ def get_boot_telemetry(): user_start = kernel_start + (float(tsm) / 1000000) except subp.ProcessExecutionError as e: - raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" - % e) + raise RuntimeError( + "Failed to get UserspaceTimestampMonotonic: %s" % e + ) from e except ValueError as e: - raise RuntimeError("Failed to parse " - "UserspaceTimestampMonotonic from systemd: %s" - % e) + raise RuntimeError( + "Failed to parse UserspaceTimestampMonotonic from systemd: %s" % e + ) from e try: out, _ = subp.subp(['/bin/systemctl', 'show', @@ -137,12 +140,14 @@ def get_boot_telemetry(): cloudinit_activation = kernel_start + (float(tsm) / 1000000) except subp.ProcessExecutionError as e: - raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" - % e) + raise RuntimeError( + "Failed to get InactiveExitTimestampMonotonic: %s" % e + ) from e except ValueError as e: - raise RuntimeError("Failed to parse " - "InactiveExitTimestampMonotonic from systemd: %s" - % e) + raise RuntimeError( + "Failed to parse InactiveExitTimestampMonotonic from systemd: %s" + % e + ) from e evt = events.ReportingEvent( BOOT_EVENT_TYPE, 'boot-telemetry', @@ -642,9 +647,10 @@ class WALinuxAgentShim: try: name = os.path.basename(hook_file).replace('.json', '') dhcp_options[name] = json.loads(util.load_file((hook_file))) - except ValueError: + except ValueError as e: raise ValueError( - '{_file} is not valid JSON data'.format(_file=hook_file)) + '{_file} is not valid JSON data'.format(_file=hook_file) + ) from e return dhcp_options @staticmethod diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py index a74a3588..c2ad587b 100644 --- a/cloudinit/sources/helpers/netlink.py +++ b/cloudinit/sources/helpers/netlink.py @@ -74,7 +74,7 @@ def create_bound_netlink_socket(): netlink_socket.setblocking(0) except socket.error as e: msg = "Exception during netlink socket create: %s" % e - raise NetlinkCreateSocketError(msg) + raise NetlinkCreateSocketError(msg) from e LOG.debug("Created netlink socket") return netlink_socket diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 1050efb0..65e020c5 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -280,8 +280,9 @@ class BaseReader(metaclass=abc.ABCMeta): try: data = translator(data) except Exception as e: - raise BrokenMetadata("Failed to process " - "path %s: %s" % (path, e)) + raise BrokenMetadata( + "Failed to process path %s: %s" % (path, e) + ) from e if found: results[name] = data @@ -291,8 +292,9 @@ class BaseReader(metaclass=abc.ABCMeta): try: metadata['random_seed'] = base64.b64decode(random_seed) except (ValueError, TypeError) as e: - raise BrokenMetadata("Badly formatted metadata" - " random_seed entry: %s" % e) + raise BrokenMetadata( + "Badly formatted metadata random_seed entry: %s" % e + ) from e # load any files that were provided files = {} @@ -304,8 +306,9 @@ class BaseReader(metaclass=abc.ABCMeta): try: files[path] = self._read_content_path(item) except Exception as e: - raise BrokenMetadata("Failed to read provided " - "file %s: %s" % (path, e)) + raise BrokenMetadata( + "Failed to read provided file %s: %s" % (path, e) + ) from e results['files'] = files # The 'network_config' item in metadata is a content pointer @@ -317,8 +320,9 @@ class BaseReader(metaclass=abc.ABCMeta): content = self._read_content_path(net_item, decode=True) results['network_config'] = content except IOError as e: - raise BrokenMetadata("Failed to read network" - " configuration: %s" % (e)) + raise BrokenMetadata( + "Failed to read network configuration: %s" % (e) + ) from e # To openstack, user can specify meta ('nova boot --meta=key=value') # and those will appear under metadata['meta']. @@ -370,8 +374,9 @@ class ConfigDriveReader(BaseReader): try: return util.load_json(self._path_read(path)) except Exception as e: - raise BrokenMetadata("Failed to process " - "path %s: %s" % (path, e)) + raise BrokenMetadata( + "Failed to process path %s: %s" % (path, e) + ) from e def read_v1(self): """Reads a version 1 formatted location. @@ -395,16 +400,17 @@ class ConfigDriveReader(BaseReader): path = found[name] try: contents = self._path_read(path) - except IOError: - raise BrokenMetadata("Failed to read: %s" % path) + except IOError as e: + raise BrokenMetadata("Failed to read: %s" % path) from e try: # Disable not-callable pylint check; pylint isn't able to # determine that every member of FILES_V1 has a callable in # the appropriate position md[key] = translator(contents) # pylint: disable=E1102 except Exception as e: - raise BrokenMetadata("Failed to process " - "path %s: %s" % (path, e)) + raise BrokenMetadata( + "Failed to process path %s: %s" % (path, e) + ) from e else: md[key] = copy.deepcopy(default) diff --git a/cloudinit/subp.py b/cloudinit/subp.py index 804ef3ca..3e4efa42 100644 --- a/cloudinit/subp.py +++ b/cloudinit/subp.py @@ -262,7 +262,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, raise ProcessExecutionError( cmd=args, reason=e, errno=e.errno, stdout="-" if decode else b"-", - stderr="-" if decode else b"-") + stderr="-" if decode else b"-" + ) from e finally: if devnull_fp: devnull_fp.close() diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f3c0cf9c..caa88435 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -95,7 +95,7 @@ def read_file_or_url(url, **kwargs): code = e.errno if e.errno == ENOENT: code = NOT_FOUND - raise UrlError(cause=e, code=code, headers=None, url=url) + raise UrlError(cause=e, code=code, headers=None, url=url) from e return FileResponse(file_path, contents=contents) else: return readurl(url, **kwargs) @@ -575,8 +575,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, timestamp=None): try: import oauthlib.oauth1 as oauth1 - except ImportError: - raise NotImplementedError('oauth support is not available') + except ImportError as e: + raise NotImplementedError('oauth support is not available') from e if timestamp: timestamp = str(timestamp) diff --git a/cloudinit/util.py b/cloudinit/util.py index dd263803..cf9e349f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -359,7 +359,7 @@ def decomp_gzip(data, quiet=True, decode=True): if quiet: return data else: - raise DecompressionError(str(e)) + raise DecompressionError(str(e)) from e def extract_usergroup(ug_pair): @@ -1363,7 +1363,7 @@ def chownbyname(fname, user=None, group=None): if group: gid = grp.getgrnam(group).gr_gid except KeyError as e: - raise OSError("Unknown user or group: %s" % (e)) + raise OSError("Unknown user or group: %s" % (e)) from e chownbyid(fname, uid, gid) @@ -2387,8 +2387,8 @@ def human2bytes(size): try: num = float(num) - except ValueError: - raise ValueError("'%s' is not valid input." % size_in) + except ValueError as e: + raise ValueError("'%s' is not valid input." % size_in) from e if num < 0: raise ValueError("'%s': cannot be negative" % size_in) diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py index a136cf0d..eedbaae8 100644 --- a/tests/cloud_tests/platforms/azurecloud/instance.py +++ b/tests/cloud_tests/platforms/azurecloud/instance.py @@ -134,9 +134,10 @@ class AzureCloudInstance(Instance): self.vm_name, vm_params) LOG.debug('creating instance %s from image_id=%s', self.vm_name, self.image_id) - except CloudError: - raise RuntimeError('failed creating instance:\n{}'.format( - traceback.format_exc())) + except CloudError as e: + raise RuntimeError( + 'failed creating instance:\n{}'.format(traceback.format_exc()) + ) from e if wait: self.instance.wait() diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py index cb62a74b..a664f612 100644 --- a/tests/cloud_tests/platforms/azurecloud/platform.py +++ b/tests/cloud_tests/platforms/azurecloud/platform.py @@ -59,9 +59,12 @@ class AzureCloudPlatform(Platform): self.vnet = self._create_vnet() self.subnet = self._create_subnet() self.nic = self._create_nic() - except CloudError: - raise RuntimeError('failed creating a resource:\n{}'.format( - traceback.format_exc())) + except CloudError as e: + raise RuntimeError( + 'failed creating a resource:\n{}'.format( + traceback.format_exc() + ) + ) from e def create_instance(self, properties, config, features, image_id, user_data=None): @@ -105,8 +108,10 @@ class AzureCloudPlatform(Platform): if image_id.find('__') > 0: image_id = image_id.split('__')[1] LOG.debug('image_id shortened to %s', image_id) - except KeyError: - raise RuntimeError('no images found for %s' % img_conf['release']) + except KeyError as e: + raise RuntimeError( + 'no images found for %s' % img_conf['release'] + ) from e return AzureCloudImage(self, img_conf, image_id) @@ -140,9 +145,11 @@ class AzureCloudPlatform(Platform): secret=azure_creds['clientSecret'], tenant=azure_creds['tenantId']) return credentials, subscription_id - except KeyError: - raise RuntimeError('Please configure Azure service principal' - ' credentials in %s' % cred_file) + except KeyError as e: + raise RuntimeError( + 'Please configure Azure service principal' + ' credentials in %s' % cred_file + ) from e def _create_resource_group(self): """Create resource group""" diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py index ab6037b1..d2e84047 100644 --- a/tests/cloud_tests/platforms/ec2/instance.py +++ b/tests/cloud_tests/platforms/ec2/instance.py @@ -49,11 +49,11 @@ class EC2Instance(Instance): # OutputBytes comes from platform._decode_console_output_as_bytes response = self.instance.console_output() return response['OutputBytes'] - except KeyError: + except KeyError as e: if 'Output' in response: msg = ("'OutputBytes' did not exist in console_output() but " "'Output' did: %s..." % response['Output'][0:128]) - raise util.PlatformError('console_log', msg) + raise util.PlatformError('console_log', msg) from e return ('No Console Output [%s]' % self.instance).encode() def destroy(self): diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py index 7a3d0fe0..b61a2ffb 100644 --- a/tests/cloud_tests/platforms/ec2/platform.py +++ b/tests/cloud_tests/platforms/ec2/platform.py @@ -35,12 +35,14 @@ class EC2Platform(Platform): self.ec2_resource = b3session.resource('ec2') self.ec2_region = b3session.region_name self.key_name = self._upload_public_key(config) - except botocore.exceptions.NoRegionError: + except botocore.exceptions.NoRegionError as e: raise RuntimeError( - 'Please configure default region in $HOME/.aws/config') - except botocore.exceptions.NoCredentialsError: + 'Please configure default region in $HOME/.aws/config' + ) from e + except botocore.exceptions.NoCredentialsError as e: raise RuntimeError( - 'Please configure ec2 credentials in $HOME/.aws/credentials') + 'Please configure ec2 credentials in $HOME/.aws/credentials' + ) from e self.vpc = self._create_vpc() self.internet_gateway = self._create_internet_gateway() @@ -125,8 +127,10 @@ class EC2Platform(Platform): try: image_ami = image['id'] - except KeyError: - raise RuntimeError('No images found for %s!' % img_conf['release']) + except KeyError as e: + raise RuntimeError( + 'No images found for %s!' % img_conf['release'] + ) from e LOG.debug('found image: %s', image_ami) image = EC2Image(self, img_conf, image_ami) @@ -195,7 +199,7 @@ class EC2Platform(Platform): CidrBlock=self.ipv4_cidr, AmazonProvidedIpv6CidrBlock=True) except botocore.exceptions.ClientError as e: - raise RuntimeError(e) + raise RuntimeError(e) from e vpc.wait_until_available() self._tag_resource(vpc) diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index b27b9848..2b973a08 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -175,7 +175,8 @@ class LXDInstance(Instance): raise PlatformError( "console log", "Console log failed [%d]: stdout=%s stderr=%s" % ( - e.exit_code, e.stdout, e.stderr)) + e.exit_code, e.stdout, e.stderr) + ) from e def reboot(self, wait=True): """Reboot instance.""" diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py index 58f65e52..ac3b6563 100644 --- a/tests/cloud_tests/platforms/platforms.py +++ b/tests/cloud_tests/platforms/platforms.py @@ -74,8 +74,10 @@ class Platform(object): try: return tmirror.json_entries[0] - except IndexError: - raise RuntimeError('no images found with filter: %s' % img_filter) + except IndexError as e: + raise RuntimeError( + 'no images found with filter: %s' % img_filter + ) from e class FilterMirror(mirrors.BasicMirrorWriter): diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py index e8c371ca..bb9785d3 100644 --- a/tests/cloud_tests/testcases/__init__.py +++ b/tests/cloud_tests/testcases/__init__.py @@ -21,8 +21,10 @@ def discover_test(test_name): config.name_sanitize(test_name)) try: testmod = importlib.import_module(testmod_name) - except NameError: - raise ValueError('no test verifier found at: {}'.format(testmod_name)) + except NameError as e: + raise ValueError( + 'no test verifier found at: {}'.format(testmod_name) + ) from e found = [mod for name, mod in inspect.getmembers(testmod) if (inspect.isclass(mod) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a58e0260..9dd067b9 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -258,12 +258,14 @@ class MetaDataHandler(object): try: key_id = int(mybe_key) key_name = key_ids[key_id] - except ValueError: - raise WebException(hclient.BAD_REQUEST, - "%s: not an integer" % mybe_key) - except IndexError: - raise WebException(hclient.NOT_FOUND, - "Unknown key id %r" % mybe_key) + except ValueError as e: + raise WebException( + hclient.BAD_REQUEST, "%s: not an integer" % mybe_key + ) from e + except IndexError as e: + raise WebException( + hclient.NOT_FOUND, "Unknown key id %r" % mybe_key + ) from e # Extract the possible sub-params result = traverse(nparams[1:], { "openssh-key": "\n".join(avail_keys[key_name]), diff --git a/tox.ini b/tox.ini index f619dbf5..a92c63e0 100644 --- a/tox.ini +++ b/tox.ini @@ -23,7 +23,7 @@ setenv = basepython = python3 deps = # requirements - pylint==2.5.3 + pylint==2.6.0 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt -- cgit v1.2.3